file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.rs | extern crate clap;
use clap::{App, Arg};
use std::io::{stderr, stdin, stdout, Write, Read};
use std::time::{Duration, Instant};
use std::net::{SocketAddr, TcpListener, IpAddr};
const DEFAULT_BUFFER_SIZE: usize = 4096;
const DEFAULT_ITERATION_COUNT: usize = 1;
const DEFAULT_ADDRESS: &'static str = "127.0.0.1";
macro_rules! print_err_into {
($err_write: expr, $fmt:expr) => ({
use std::io::Write;
if let Err(e) = writeln!($err_write, $fmt) {
panic!("Error while writing to stderr: {}", e);
}
});
($err_write: expr, $fmt:expr, $($arg:tt)*) => ({
use std::io::Write;
if let Err(e) = writeln!($err_write, $fmt, $($arg)*) {
panic!("Error while writing to stderr: {}", e);
}
});
}
macro_rules! print_err {
($fmt:expr) => ({
use std::io::{stderr, Write};
if let Err(e) = writeln!(stderr(), $fmt) {
panic!("Error while writing to stderr: {}", e);
}
});
($fmt:expr, $($arg:tt)*) => ({
use std::io::{stderr, Write};
if let Err(e) = writeln!(stderr(), $fmt, $($arg)*) {
panic!("Error while writing to stderr: {}", e);
}
});
}
#[derive(Default)]
struct TransferInfo {
/// The total number of bytes transferred.
total_bytes_transferred: usize,
/// The number of times the Bytes Per Second has been measured.
total_measures: usize,
/// Accumulation of all of the Bytes Per Second measures.
total_bps: f64,
/// The Bytes Per Second during the last measure.
last_bps: f64,
/// The number of bytes transferred during the last measure.
last_bytes_transferred: usize,
}
#[inline]
fn exit_err() -> ! {
std::process::exit(1);
}
fn main() {
let matches = App::new("Throughput")
.version("1.1")
.author("Adolph C.")
.about("Measures the throughput of stdin or a socket.")
.arg(Arg::with_name("address")
.short("l")
.long("addr")
.value_name("IP Address")
.help("IP address to listen to. Defaults to 127.0.0.1. Must specify port.")
.takes_value(true))
.arg(Arg::with_name("buffer_size")
.short("b")
.long("bufsize")
.value_name("BYTES")
.help("The size of the buffer used to read from the stream in bytes. Defaults to 4096.")
.takes_value(true))
.arg(Arg::with_name("iterations")
.short("i")
.long("iterations")
.help("The number of times the buffer should be filled before a measure is taken. Defaults to 1.")
.takes_value(true))
.arg(Arg::with_name("port")
.short("p")
.long("port")
.value_name("PORT_NUMBER")
.help("Port to listen on. Must be specified if address is given.")
.takes_value(true))
.arg(Arg::with_name("pass")
.long("pass")
.help("If present, throughput will print to stderr and pass input to stdout.")
.takes_value(false))
.after_help("If a port/address is not specified, throughput will read from stdin.")
.get_matches();
let passthrough = matches.is_present("pass");
let buffer_size: usize;
let iterations: usize;
if let Some(buf_size_str) = matches.value_of("buffer_size") {
if let Ok(bsize) = buf_size_str.parse() {
buffer_size = bsize;
} else {
print_err!("Buffer size must be a valid number.");
exit_err();
}
} else {
buffer_size = DEFAULT_BUFFER_SIZE;
}
if let Some(iterations_str) = matches.value_of("iterations") {
if let Ok(it) = iterations_str.parse() {
iterations = it;
} else {
print_err!("Iterations must be a valid number.");
exit_err();
}
} else {
iterations = DEFAULT_ITERATION_COUNT;
}
let address_present = matches.is_present("address");
let port_present = matches.is_present("port");
if address_present || port_present {
if !port_present {
print_err!("A port must be speicified alongside a address.");
exit_err();
} else {
let address = matches.value_of("address").unwrap_or(DEFAULT_ADDRESS);
let port = matches.value_of("port").expect("Expected port arg to have value.");
if let Ok(parsed_port) = port.parse() {
measure_tcp_stream(address, parsed_port, buffer_size, iterations, passthrough);
} else {
print_err!("Port must be a valid number from 0 to 65535");
exit_err();
}
}
} else {
measure_stdin(buffer_size, iterations, passthrough);
}
}
fn measure_tcp_stream(address: &str, port: u16, buffer_size: usize, iterations: usize, passthrough: bool) {
let parsed_addr: IpAddr = match address.parse() {
Ok(parsed) => parsed,
Err(_) => {
print_err!("Bad IP address {}", address);
exit_err();
}
};
let socket_addr = SocketAddr::new(parsed_addr, port);
match TcpListener::bind(socket_addr) {
Ok(listener) => {
println!("Listening at {}", socket_addr);
match listener.accept() {
Ok((stream, incoming_addr)) => {
println!("Reading incoming data from {}", incoming_addr);
println!();
measure_reader(stream, buffer_size, iterations, passthrough);
},
Err(err) => {
print_err!("There was an error accepting a connection.");
print_err!("ERROR: {}", err);
exit_err();
}
}
},
Err(err) => {
print_err!("There was an error connecting to {}", socket_addr);
print_err!("ERROR: {}", err);
exit_err();
}
};
}
fn measure_stdin(buffer_size: usize, iterations: usize, passthrough: bool) {
let input = stdin();
measure_reader(input.lock(), buffer_size, iterations, passthrough);
}
fn measure_reader<R: Read>(mut reader: R, buffer_size: usize, iterations: usize, passthrough: bool) {
let output = stdout();
let mut locked_output = output.lock();
let err_out = stderr();
let mut locked_error = err_out.lock();
let mut buffer = Vec::with_capacity(buffer_size);
buffer.resize(buffer_size, 0);
let mut last_measured = Instant::now();
let mut transfer_info = TransferInfo::default();
loop {
let mut end_loop = false;
for _ in 0..iterations {
match reader.read(&mut buffer) {
Ok(bytes_read) => {
transfer_info.last_bytes_transferred += bytes_read;
transfer_info.total_bytes_transferred += bytes_read;
if bytes_read == 0 {
end_loop = true;
break;
} else if passthrough {
if let Err(err) = locked_output.write_all(&buffer[0..bytes_read]) {
print_err_into!(locked_error, "Error while writing buffer into stdout: {}", err);
exit_err();
}
}
}
Err(err) => {
print_err_into!(locked_error, "Error while reading into buffer: {}", err);
}
}
}
let measure_end = Instant::now();
let duration = measure_end.duration_since(last_measured);
if duration.as_secs() > 0 || end_loop {
transfer_info.last_bps = bytes_per_second(transfer_info.last_bytes_transferred, duration);
transfer_info.total_measures += 1;
transfer_info.total_bps += transfer_info.last_bps;
let _print_result = if passthrough {
print_info(&mut locked_error, &mut transfer_info)
} else {
print_info(&mut locked_output, &mut transfer_info)
};
match _print_result {
Ok(_) => {},
Err(err) => {
print_err_into!(locked_error, "Error while printing output: {}", err);
exit_err();
}
}
last_measured = measure_end;
transfer_info.last_bps = 0.0;
transfer_info.last_bytes_transferred = 0;
}
if end_loop { return; }
}
}
fn print_info<W: Write>(output: &mut W, transfer_info: &mut TransferInfo) -> Result<(), std::io::Error> {
if transfer_info.total_measures > 1 { term_move_up(output, 3)?; }
let (mem_total_transfer, unit_total_transfer) = byte_to_mem_units(transfer_info.total_bytes_transferred as f64);
print_fixed_width(output, "Data Transferred:", 24);
write!(output, "{:.3} {} ({} cycles)",
mem_total_transfer, unit_total_transfer, transfer_info.total_measures)?;
term_clear_line(output)?;
let (mem_single, unit_single) = byte_to_mem_units(transfer_info.last_bps);
print_fixed_width(output, "Transfer Speed:", 24);
write!(output, "{:.3} {}/sec", mem_single, unit_single)?;
term_clear_line(output)?;
let avg_bps = transfer_info.total_bps / transfer_info.total_measures as f64;
let (mem_avg, unit_avg) = byte_to_mem_units(avg_bps);
print_fixed_width(output, "Average Transfer Speed:", 24);
write!(output, "{:.3} {}/sec", mem_avg, unit_avg)?;
term_clear_line(output)?;
Ok(())
}
fn print_fixed_width<W: Write>(output: &mut W, text: &str, columns: usize) {
if let Err(err) = output.write(text.as_bytes()) {
panic!("[print_fixed_width] Error while writing to stream: {}", err);
}
if text.len() < columns {
let remaining = columns - text.len();
let pad = [b' '];
for _ in 0..remaining {
if let Err(err) = output.write(&pad) {
panic!("[print_fixed_width] Error while padding output: {}", err);
}
}
}
}
/// Clears to the end of the current line.
#[inline]
fn term_clear_line<W: Write>(output: &mut W) -> Result<(), std::io::Error> {
writeln!(output, "\x1b[K")?;
Ok(())
}
/// Moves the cursor up one line.
#[inline]
fn term_move_up<W: Write>(output: &mut W, lines: usize) -> Result<(), std::io::Error> {
write!(output, "\x1b[{}A", lines)?;
Ok(())
}
fn | (bytes: f64) -> (f64, &'static str) {
const KB: f64 = 1024.0;
const MB: f64 = KB * 1024.0;
const GB: f64 = MB * 1024.0;
const TB: f64 = GB * 1024.0;
if bytes >= TB { (bytes / TB, "TB") }
else if bytes >= GB { (bytes / GB, "GB") }
else if bytes >= MB { (bytes / MB, "MB") }
else if bytes >= KB { (bytes / KB, "KB") }
else { (bytes, "Bytes") }
}
fn bytes_per_second(bytes_read: usize, duration: Duration) -> f64 {
let duration_seconds =
duration.as_secs() as f64 +
duration.subsec_nanos() as f64 / 1000000000.0;
return bytes_read as f64 / duration_seconds;
} | byte_to_mem_units | identifier_name |
zplsc_c_echogram.py | """
@package mi.dataset.driver.zplsc_c
@file mi/dataset/driver/zplsc_c/zplsc_c_echogram.py
@author Craig Risien/Rene Gelinas
@brief ZPLSC Echogram generation for the ooicore
Release notes:
This class supports the generation of ZPLSC-C echograms. It needs matplotlib version 1.3.1 for the code to display
the colorbar at the bottom of the figure. If matplotlib version 1.1.1 is used, the colorbar is plotted over the
figure instead of at the bottom of it.
"""
from datetime import datetime
import numpy as np
import matplotlib
from matplotlib.dates import date2num, num2date
from modest_image import imshow
import mi.dataset.driver.zplsc_c.zplsc_functions as zf
matplotlib.use("Agg")
import matplotlib.pyplot as plt
__author__ = 'Craig Risien, Rene Gelinas'
REF_TIME = date2num(datetime(1900, 1, 1, 0, 0, 0))
class ZplscCParameters(object):
# TODO: This class should be replaced by methods to get the CCs from the system.
# Configuration Parameters
Salinity = 32 # Salinity in psu
Pressure = 150 # in dbars (~ depth of instrument in meters).
Bins2Avg = 1 # number of range bins to average - 1 is no averaging
class ZplscCCalibrationCoefficients(object):
# TODO: This class should be replaced by methods to get the CCs from the system.
ka = 464.3636
kb = 3000.0
kc = 1.893
A = 0.001466
B = 0.0002388
C = 0.000000100335
TVR = []
VTX = []
BP = []
EL = []
DS = []
# Freq 38kHz
TVR.append(1.691999969482e2)
VTX.append(1.533999938965e2)
BP.append(8.609999902546e-3)
EL.append(1.623000030518e2)
DS.append(2.280000038445e-2)
# Freq 125kHz
TVR.append(1.668999938965e2)
VTX.append(5.8e+01)
BP.append(1.530999969691e-2)
EL.append(1.376999969482e2)
DS.append(2.280000038445e-2)
# Freq 200kHz
TVR.append(1.688999938965e2)
VTX.append(9.619999694824e1)
BP.append(1.530999969691e-2)
EL.append(1.456000061035e2)
DS.append(2.250000089407e-2)
# Freq 455kHz
TVR.append(1.696000061035e2)
VTX.append(1.301000061035e2)
BP.append(8.609999902546e-3)
EL.append(1.491999969482e2)
DS.append(2.300000004470e-2)
class ZPLSCCPlot(object):
font_size_small = 14
font_size_large = 18
num_xticks = 25
num_yticks = 7
interplot_spacing = 0.1
lower_percentile = 5
upper_percentile = 95
def __init__(self, data_times, channel_data_dict, frequency_dict, depth_range, _min_db=None, _max_db=None):
self.fig = None
self.power_data_dict = self._transpose_and_flip(channel_data_dict)
if (_min_db is None) or (_max_db is None):
self.min_db, self.max_db = self._get_power_range(channel_data_dict)
else:
self.min_db = _min_db
self.max_db = _max_db
self.frequency_dict = frequency_dict
# convert ntp time, i.e. seconds since 1900-01-01 00:00:00 to matplotlib time
self.data_times = (data_times / (60 * 60 * 24)) + REF_TIME
max_depth, _ = self.power_data_dict[1].shape
self._setup_plot(depth_range, max_depth)
def generate_plots(self):
"""
Generate plots for all channels in data set
"""
freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}
data_axes = None
for index, frequency in enumerate(sorted(freq_to_channel)):
channel = freq_to_channel[frequency]
td_f = self.frequency_dict[channel]
title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)
data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,
self.min_db, self.max_db)
if data_axes:
self._display_x_labels(self.ax[2], self.data_times)
self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])
self._display_colorbar(self.fig, data_axes)
def write_image(self, filename):
self.fig.savefig(filename)
plt.close(self.fig)
self.fig = None
def _setup_plot(self, depth_range, max_depth):
# subset the yticks so that we don't plot every one
yticks = np.linspace(0, max_depth, self.num_yticks)
# create range vector (depth in meters)
yticklabels = np.round(np.linspace(depth_range[0][-1], depth_range[0][0], self.num_yticks)).astype(int)
self.fig, self.ax = plt.subplots(len(self.frequency_dict), sharex=True, sharey=True)
self.fig.subplots_adjust(hspace=self.interplot_spacing)
self.fig.set_size_inches(40, 19)
for axes in self.ax:
axes.grid(False)
axes.set_ylabel('depth (m)', fontsize=self.font_size_small)
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels, fontsize=self.font_size_small)
axes.tick_params(axis="both", labelcolor="k", pad=4, direction='out', length=5, width=2)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
@staticmethod
def _get_power_range(power_dict):
# Calculate the power data range across all channels
all_power_data = np.concatenate(power_dict.values())
max_db = np.nanpercentile(all_power_data, ZPLSCCPlot.upper_percentile)
min_db = np.nanpercentile(all_power_data, ZPLSCCPlot.lower_percentile)
return min_db, max_db
@staticmethod
def | (power_dict):
for channel in power_dict:
# Transpose array data so we have time on the x-axis and depth on the y-axis
power_dict[channel] = power_dict[channel].transpose()
# reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)
power_dict[channel] = power_dict[channel][::-1]
return power_dict
@staticmethod
def _generate_plot(ax, power_data, title, min_db, max_db):
"""
Generate a ZPLS plot for an individual channel
:param ax: matplotlib axis to receive the plot image
:param power_data: Transducer data array
:param title: plot title
:param min_db: minimum power level
:param max_db: maximum power level
"""
# only generate plots for the transducers that have data
if power_data.size <= 0:
return
ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)
return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)
@staticmethod
def _display_x_labels(ax, data_times):
time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = data_times.size
# X axis label
# subset the xticks so that we don't plot every one
if time_length < ZPLSCCPlot.num_xticks:
ZPLSCCPlot.num_xticks = time_length
xticks = np.linspace(0, time_length, ZPLSCCPlot.num_xticks)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(data_times[::xstep])] + [num2date(data_times[-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
ax.set_xlabel('time (UTC)', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=45, horizontalalignment='center', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xlim(0, time_length)
@staticmethod
def _display_colorbar(fig, data_axes):
# Add a colorbar to the specified figure using the data from the given axes
ax = fig.add_axes([0.965, 0.12, 0.01, 0.775])
cb = fig.colorbar(data_axes, cax=ax, use_gridspec=True)
cb.set_label('dB', fontsize=ZPLSCCPlot.font_size_large)
cb.ax.tick_params(labelsize=ZPLSCCPlot.font_size_small)
class ZPLSCCEchogram(object):
def __init__(self):
self.cc = ZplscCCalibrationCoefficients()
self.params = ZplscCParameters()
def compute_backscatter(self, profile_hdr, chan_data, sound_speed, depth_range, sea_absorb):
"""
Compute the backscatter volumes values for one zplsc_c profile data record.
This code was borrowed from ASL MatLab code that reads in zplsc-c raw data
and performs calculations in order to compute the backscatter volume in db.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:param chan_data: Raw frequency data from the zplsc-c instrument.
:param sound_speed: Speed of sound at based on speed of sound, pressure and salinity.
:param depth_range: Range of the depth of the measurements
:param sea_absorb: Seawater absorption coefficient for each frequency
:return: sv: Volume backscatter in db
"""
_N = []
if self.params.Bins2Avg > 1:
for chan in range(profile_hdr.num_channels):
el = self.cc.EL[chan] - 2.5/self.cc.DS[chan] + np.array(chan_data[chan])/(26214*self.cc.DS[chan])
power = 10**(el/10)
# Perform bin averaging
num_bins = len(chan_data[chan])/self.params.Bins2Avg
pwr_avg = []
for _bin in range(num_bins):
pwr_avg.append(np.mean(power[_bin*self.params.Bins2Avg:(_bin+1)*self.params.Bins2Avg]))
el_avg = 10*np.log10(pwr_avg)
_N.append(np.round(26214*self.cc.DS[chan]*(el_avg - self.cc.EL[chan] + 2.5/self.cc.DS[chan])))
else:
for chan in range(profile_hdr.num_channels):
_N.append(np.array(chan_data[chan]))
sv = []
for chan in range(profile_hdr.num_channels):
# Calculate correction to Sv due to non square transmit pulse
sv_offset = zf.compute_sv_offset(profile_hdr.frequency[chan], profile_hdr.pulse_length[chan])
sv.append(self.cc.EL[chan]-2.5/self.cc.DS[chan] + _N[chan]/(26214*self.cc.DS[chan]) - self.cc.TVR[chan] -
20*np.log10(self.cc.VTX[chan]) + 20*np.log10(depth_range[chan]) +
2*sea_absorb[chan]*depth_range[chan] -
10*np.log10(0.5*sound_speed*profile_hdr.pulse_length[chan]/1e6*self.cc.BP[chan]) +
sv_offset)
return sv
def compute_echogram_metadata(self, profile_hdr):
"""
Compute the metadata parameters needed to compute the zplsc-c valume backscatter values.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:return: sound_speed : Speed of sound based on temperature, pressure and salinity.
depth_range : Range of depth values of the zplsc-c data.
sea_absorb : Sea absorbtion based on temperature, pressure, salinity and frequency.
"""
# If the temperature sensor is available, compute the temperature from the counts.
temperature = 0
if profile_hdr.is_sensor_available:
temperature = zf.zplsc_c_temperature(profile_hdr.temperature, self.cc.ka, self.cc.kb, self.cc.kc,
self.cc.A, self.cc.B, self.cc.C)
sound_speed = zf.zplsc_c_ss(temperature, self.params.Pressure, self.params.Salinity)
_m = []
depth_range = []
for chan in range(profile_hdr.num_channels):
_m.append(np.array([x for x in range(1, (profile_hdr.num_bins[chan]/self.params.Bins2Avg)+1)]))
depth_range.append(sound_speed*profile_hdr.lockout_index[0]/(2*profile_hdr.digitization_rate[0]) +
(sound_speed/4)*(((2*_m[chan]-1)*profile_hdr.range_samples[0]*self.params.Bins2Avg-1) /
float(profile_hdr.digitization_rate[0]) +
profile_hdr.pulse_length[0]/1e6))
sea_absorb = []
for chan in range(profile_hdr.num_channels):
# Calculate absorption coeff for each frequency.
sea_absorb.append(zf.zplsc_c_absorbtion(temperature, self.params.Pressure, self.params.Salinity,
profile_hdr.frequency[chan]))
return sound_speed, depth_range, sea_absorb
| _transpose_and_flip | identifier_name |
zplsc_c_echogram.py | """
@package mi.dataset.driver.zplsc_c
@file mi/dataset/driver/zplsc_c/zplsc_c_echogram.py
@author Craig Risien/Rene Gelinas
@brief ZPLSC Echogram generation for the ooicore
Release notes:
This class supports the generation of ZPLSC-C echograms. It needs matplotlib version 1.3.1 for the code to display
the colorbar at the bottom of the figure. If matplotlib version 1.1.1 is used, the colorbar is plotted over the
figure instead of at the bottom of it.
"""
from datetime import datetime
import numpy as np
import matplotlib
from matplotlib.dates import date2num, num2date
from modest_image import imshow
import mi.dataset.driver.zplsc_c.zplsc_functions as zf
matplotlib.use("Agg")
import matplotlib.pyplot as plt
__author__ = 'Craig Risien, Rene Gelinas'
REF_TIME = date2num(datetime(1900, 1, 1, 0, 0, 0))
class ZplscCParameters(object):
# TODO: This class should be replaced by methods to get the CCs from the system.
# Configuration Parameters
Salinity = 32 # Salinity in psu
Pressure = 150 # in dbars (~ depth of instrument in meters).
Bins2Avg = 1 # number of range bins to average - 1 is no averaging
class ZplscCCalibrationCoefficients(object):
# TODO: This class should be replaced by methods to get the CCs from the system.
ka = 464.3636
kb = 3000.0
kc = 1.893
A = 0.001466
B = 0.0002388
C = 0.000000100335
TVR = []
VTX = []
BP = []
EL = []
DS = []
# Freq 38kHz
TVR.append(1.691999969482e2)
VTX.append(1.533999938965e2)
BP.append(8.609999902546e-3)
EL.append(1.623000030518e2)
DS.append(2.280000038445e-2)
# Freq 125kHz
TVR.append(1.668999938965e2)
VTX.append(5.8e+01)
BP.append(1.530999969691e-2)
EL.append(1.376999969482e2)
DS.append(2.280000038445e-2)
# Freq 200kHz
TVR.append(1.688999938965e2)
VTX.append(9.619999694824e1)
BP.append(1.530999969691e-2)
EL.append(1.456000061035e2)
DS.append(2.250000089407e-2)
# Freq 455kHz
TVR.append(1.696000061035e2)
VTX.append(1.301000061035e2)
BP.append(8.609999902546e-3)
EL.append(1.491999969482e2)
DS.append(2.300000004470e-2)
class ZPLSCCPlot(object):
font_size_small = 14
font_size_large = 18
num_xticks = 25
num_yticks = 7
interplot_spacing = 0.1
lower_percentile = 5
upper_percentile = 95
def __init__(self, data_times, channel_data_dict, frequency_dict, depth_range, _min_db=None, _max_db=None):
self.fig = None
self.power_data_dict = self._transpose_and_flip(channel_data_dict)
if (_min_db is None) or (_max_db is None):
self.min_db, self.max_db = self._get_power_range(channel_data_dict)
else:
self.min_db = _min_db
self.max_db = _max_db
self.frequency_dict = frequency_dict
# convert ntp time, i.e. seconds since 1900-01-01 00:00:00 to matplotlib time
self.data_times = (data_times / (60 * 60 * 24)) + REF_TIME
max_depth, _ = self.power_data_dict[1].shape
self._setup_plot(depth_range, max_depth)
def generate_plots(self):
"""
Generate plots for all channels in data set
"""
freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}
data_axes = None
for index, frequency in enumerate(sorted(freq_to_channel)):
channel = freq_to_channel[frequency]
td_f = self.frequency_dict[channel]
title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)
data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,
self.min_db, self.max_db)
if data_axes:
self._display_x_labels(self.ax[2], self.data_times)
self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])
self._display_colorbar(self.fig, data_axes)
def write_image(self, filename):
self.fig.savefig(filename)
plt.close(self.fig)
self.fig = None
def _setup_plot(self, depth_range, max_depth):
# subset the yticks so that we don't plot every one
yticks = np.linspace(0, max_depth, self.num_yticks)
# create range vector (depth in meters)
yticklabels = np.round(np.linspace(depth_range[0][-1], depth_range[0][0], self.num_yticks)).astype(int)
self.fig, self.ax = plt.subplots(len(self.frequency_dict), sharex=True, sharey=True)
self.fig.subplots_adjust(hspace=self.interplot_spacing)
self.fig.set_size_inches(40, 19)
for axes in self.ax:
axes.grid(False)
axes.set_ylabel('depth (m)', fontsize=self.font_size_small)
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels, fontsize=self.font_size_small)
axes.tick_params(axis="both", labelcolor="k", pad=4, direction='out', length=5, width=2)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
@staticmethod
def _get_power_range(power_dict):
# Calculate the power data range across all channels
all_power_data = np.concatenate(power_dict.values())
max_db = np.nanpercentile(all_power_data, ZPLSCCPlot.upper_percentile)
min_db = np.nanpercentile(all_power_data, ZPLSCCPlot.lower_percentile)
return min_db, max_db
@staticmethod
def _transpose_and_flip(power_dict):
for channel in power_dict:
# Transpose array data so we have time on the x-axis and depth on the y-axis
power_dict[channel] = power_dict[channel].transpose()
# reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)
power_dict[channel] = power_dict[channel][::-1]
return power_dict
@staticmethod
def _generate_plot(ax, power_data, title, min_db, max_db):
"""
Generate a ZPLS plot for an individual channel
:param ax: matplotlib axis to receive the plot image
:param power_data: Transducer data array
:param title: plot title
:param min_db: minimum power level
:param max_db: maximum power level
"""
# only generate plots for the transducers that have data
if power_data.size <= 0:
return
ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)
return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)
@staticmethod
def _display_x_labels(ax, data_times):
time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = data_times.size
# X axis label
# subset the xticks so that we don't plot every one
if time_length < ZPLSCCPlot.num_xticks:
ZPLSCCPlot.num_xticks = time_length
xticks = np.linspace(0, time_length, ZPLSCCPlot.num_xticks)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(data_times[::xstep])] + [num2date(data_times[-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
ax.set_xlabel('time (UTC)', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=45, horizontalalignment='center', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xlim(0, time_length)
@staticmethod
def _display_colorbar(fig, data_axes):
# Add a colorbar to the specified figure using the data from the given axes
ax = fig.add_axes([0.965, 0.12, 0.01, 0.775])
cb = fig.colorbar(data_axes, cax=ax, use_gridspec=True)
cb.set_label('dB', fontsize=ZPLSCCPlot.font_size_large)
cb.ax.tick_params(labelsize=ZPLSCCPlot.font_size_small)
class ZPLSCCEchogram(object):
def __init__(self):
self.cc = ZplscCCalibrationCoefficients()
self.params = ZplscCParameters()
def compute_backscatter(self, profile_hdr, chan_data, sound_speed, depth_range, sea_absorb):
"""
Compute the backscatter volumes values for one zplsc_c profile data record.
This code was borrowed from ASL MatLab code that reads in zplsc-c raw data
and performs calculations in order to compute the backscatter volume in db.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:param chan_data: Raw frequency data from the zplsc-c instrument.
:param sound_speed: Speed of sound at based on speed of sound, pressure and salinity.
:param depth_range: Range of the depth of the measurements
:param sea_absorb: Seawater absorption coefficient for each frequency
:return: sv: Volume backscatter in db
"""
_N = []
if self.params.Bins2Avg > 1:
for chan in range(profile_hdr.num_channels):
el = self.cc.EL[chan] - 2.5/self.cc.DS[chan] + np.array(chan_data[chan])/(26214*self.cc.DS[chan])
power = 10**(el/10)
# Perform bin averaging
num_bins = len(chan_data[chan])/self.params.Bins2Avg
pwr_avg = []
for _bin in range(num_bins):
pwr_avg.append(np.mean(power[_bin*self.params.Bins2Avg:(_bin+1)*self.params.Bins2Avg]))
el_avg = 10*np.log10(pwr_avg)
_N.append(np.round(26214*self.cc.DS[chan]*(el_avg - self.cc.EL[chan] + 2.5/self.cc.DS[chan])))
else:
|
sv = []
for chan in range(profile_hdr.num_channels):
# Calculate correction to Sv due to non square transmit pulse
sv_offset = zf.compute_sv_offset(profile_hdr.frequency[chan], profile_hdr.pulse_length[chan])
sv.append(self.cc.EL[chan]-2.5/self.cc.DS[chan] + _N[chan]/(26214*self.cc.DS[chan]) - self.cc.TVR[chan] -
20*np.log10(self.cc.VTX[chan]) + 20*np.log10(depth_range[chan]) +
2*sea_absorb[chan]*depth_range[chan] -
10*np.log10(0.5*sound_speed*profile_hdr.pulse_length[chan]/1e6*self.cc.BP[chan]) +
sv_offset)
return sv
def compute_echogram_metadata(self, profile_hdr):
"""
Compute the metadata parameters needed to compute the zplsc-c valume backscatter values.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:return: sound_speed : Speed of sound based on temperature, pressure and salinity.
depth_range : Range of depth values of the zplsc-c data.
sea_absorb : Sea absorbtion based on temperature, pressure, salinity and frequency.
"""
# If the temperature sensor is available, compute the temperature from the counts.
temperature = 0
if profile_hdr.is_sensor_available:
temperature = zf.zplsc_c_temperature(profile_hdr.temperature, self.cc.ka, self.cc.kb, self.cc.kc,
self.cc.A, self.cc.B, self.cc.C)
sound_speed = zf.zplsc_c_ss(temperature, self.params.Pressure, self.params.Salinity)
_m = []
depth_range = []
for chan in range(profile_hdr.num_channels):
_m.append(np.array([x for x in range(1, (profile_hdr.num_bins[chan]/self.params.Bins2Avg)+1)]))
depth_range.append(sound_speed*profile_hdr.lockout_index[0]/(2*profile_hdr.digitization_rate[0]) +
(sound_speed/4)*(((2*_m[chan]-1)*profile_hdr.range_samples[0]*self.params.Bins2Avg-1) /
float(profile_hdr.digitization_rate[0]) +
profile_hdr.pulse_length[0]/1e6))
sea_absorb = []
for chan in range(profile_hdr.num_channels):
# Calculate absorption coeff for each frequency.
sea_absorb.append(zf.zplsc_c_absorbtion(temperature, self.params.Pressure, self.params.Salinity,
profile_hdr.frequency[chan]))
return sound_speed, depth_range, sea_absorb
| for chan in range(profile_hdr.num_channels):
_N.append(np.array(chan_data[chan])) | conditional_block |
zplsc_c_echogram.py | """
@package mi.dataset.driver.zplsc_c
@file mi/dataset/driver/zplsc_c/zplsc_c_echogram.py
@author Craig Risien/Rene Gelinas
@brief ZPLSC Echogram generation for the ooicore
Release notes:
This class supports the generation of ZPLSC-C echograms. It needs matplotlib version 1.3.1 for the code to display
the colorbar at the bottom of the figure. If matplotlib version 1.1.1 is used, the colorbar is plotted over the
figure instead of at the bottom of it.
"""
from datetime import datetime
import numpy as np
import matplotlib
from matplotlib.dates import date2num, num2date
from modest_image import imshow
import mi.dataset.driver.zplsc_c.zplsc_functions as zf
matplotlib.use("Agg")
import matplotlib.pyplot as plt
__author__ = 'Craig Risien, Rene Gelinas'
REF_TIME = date2num(datetime(1900, 1, 1, 0, 0, 0))
class ZplscCParameters(object):
# TODO: This class should be replaced by methods to get the CCs from the system.
# Configuration Parameters
Salinity = 32 # Salinity in psu
Pressure = 150 # in dbars (~ depth of instrument in meters).
Bins2Avg = 1 # number of range bins to average - 1 is no averaging
class ZplscCCalibrationCoefficients(object):
# TODO: This class should be replaced by methods to get the CCs from the system.
ka = 464.3636
kb = 3000.0
kc = 1.893
A = 0.001466
B = 0.0002388
C = 0.000000100335
TVR = []
VTX = []
BP = []
EL = []
DS = []
# Freq 38kHz
TVR.append(1.691999969482e2)
VTX.append(1.533999938965e2)
BP.append(8.609999902546e-3)
EL.append(1.623000030518e2)
DS.append(2.280000038445e-2)
# Freq 125kHz
TVR.append(1.668999938965e2)
VTX.append(5.8e+01)
BP.append(1.530999969691e-2)
EL.append(1.376999969482e2)
DS.append(2.280000038445e-2)
# Freq 200kHz
TVR.append(1.688999938965e2)
VTX.append(9.619999694824e1)
BP.append(1.530999969691e-2)
EL.append(1.456000061035e2)
DS.append(2.250000089407e-2)
# Freq 455kHz
TVR.append(1.696000061035e2)
VTX.append(1.301000061035e2)
BP.append(8.609999902546e-3)
EL.append(1.491999969482e2)
DS.append(2.300000004470e-2)
class ZPLSCCPlot(object):
font_size_small = 14
font_size_large = 18
num_xticks = 25
num_yticks = 7
interplot_spacing = 0.1
lower_percentile = 5
upper_percentile = 95
def __init__(self, data_times, channel_data_dict, frequency_dict, depth_range, _min_db=None, _max_db=None):
self.fig = None
self.power_data_dict = self._transpose_and_flip(channel_data_dict)
if (_min_db is None) or (_max_db is None):
self.min_db, self.max_db = self._get_power_range(channel_data_dict)
else:
self.min_db = _min_db
self.max_db = _max_db
self.frequency_dict = frequency_dict
# convert ntp time, i.e. seconds since 1900-01-01 00:00:00 to matplotlib time
self.data_times = (data_times / (60 * 60 * 24)) + REF_TIME
max_depth, _ = self.power_data_dict[1].shape
self._setup_plot(depth_range, max_depth)
def generate_plots(self):
"""
Generate plots for all channels in data set
"""
freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}
data_axes = None
for index, frequency in enumerate(sorted(freq_to_channel)):
channel = freq_to_channel[frequency]
td_f = self.frequency_dict[channel]
title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)
data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,
self.min_db, self.max_db)
if data_axes:
self._display_x_labels(self.ax[2], self.data_times)
self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])
self._display_colorbar(self.fig, data_axes)
def write_image(self, filename):
self.fig.savefig(filename)
plt.close(self.fig)
self.fig = None
def _setup_plot(self, depth_range, max_depth):
# subset the yticks so that we don't plot every one
yticks = np.linspace(0, max_depth, self.num_yticks)
# create range vector (depth in meters)
yticklabels = np.round(np.linspace(depth_range[0][-1], depth_range[0][0], self.num_yticks)).astype(int)
self.fig, self.ax = plt.subplots(len(self.frequency_dict), sharex=True, sharey=True)
self.fig.subplots_adjust(hspace=self.interplot_spacing)
self.fig.set_size_inches(40, 19)
for axes in self.ax:
axes.grid(False)
axes.set_ylabel('depth (m)', fontsize=self.font_size_small)
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels, fontsize=self.font_size_small)
axes.tick_params(axis="both", labelcolor="k", pad=4, direction='out', length=5, width=2)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
@staticmethod
def _get_power_range(power_dict):
# Calculate the power data range across all channels
all_power_data = np.concatenate(power_dict.values())
max_db = np.nanpercentile(all_power_data, ZPLSCCPlot.upper_percentile)
min_db = np.nanpercentile(all_power_data, ZPLSCCPlot.lower_percentile)
return min_db, max_db
@staticmethod
def _transpose_and_flip(power_dict):
for channel in power_dict:
# Transpose array data so we have time on the x-axis and depth on the y-axis
power_dict[channel] = power_dict[channel].transpose()
# reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)
power_dict[channel] = power_dict[channel][::-1]
return power_dict
@staticmethod
def _generate_plot(ax, power_data, title, min_db, max_db):
"""
Generate a ZPLS plot for an individual channel
:param ax: matplotlib axis to receive the plot image
:param power_data: Transducer data array
:param title: plot title
:param min_db: minimum power level
:param max_db: maximum power level
"""
# only generate plots for the transducers that have data
if power_data.size <= 0:
return
ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)
return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)
@staticmethod
def _display_x_labels(ax, data_times):
|
@staticmethod
def _display_colorbar(fig, data_axes):
# Add a colorbar to the specified figure using the data from the given axes
ax = fig.add_axes([0.965, 0.12, 0.01, 0.775])
cb = fig.colorbar(data_axes, cax=ax, use_gridspec=True)
cb.set_label('dB', fontsize=ZPLSCCPlot.font_size_large)
cb.ax.tick_params(labelsize=ZPLSCCPlot.font_size_small)
class ZPLSCCEchogram(object):
def __init__(self):
self.cc = ZplscCCalibrationCoefficients()
self.params = ZplscCParameters()
def compute_backscatter(self, profile_hdr, chan_data, sound_speed, depth_range, sea_absorb):
"""
Compute the backscatter volumes values for one zplsc_c profile data record.
This code was borrowed from ASL MatLab code that reads in zplsc-c raw data
and performs calculations in order to compute the backscatter volume in db.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:param chan_data: Raw frequency data from the zplsc-c instrument.
:param sound_speed: Speed of sound at based on speed of sound, pressure and salinity.
:param depth_range: Range of the depth of the measurements
:param sea_absorb: Seawater absorption coefficient for each frequency
:return: sv: Volume backscatter in db
"""
_N = []
if self.params.Bins2Avg > 1:
for chan in range(profile_hdr.num_channels):
el = self.cc.EL[chan] - 2.5/self.cc.DS[chan] + np.array(chan_data[chan])/(26214*self.cc.DS[chan])
power = 10**(el/10)
# Perform bin averaging
num_bins = len(chan_data[chan])/self.params.Bins2Avg
pwr_avg = []
for _bin in range(num_bins):
pwr_avg.append(np.mean(power[_bin*self.params.Bins2Avg:(_bin+1)*self.params.Bins2Avg]))
el_avg = 10*np.log10(pwr_avg)
_N.append(np.round(26214*self.cc.DS[chan]*(el_avg - self.cc.EL[chan] + 2.5/self.cc.DS[chan])))
else:
for chan in range(profile_hdr.num_channels):
_N.append(np.array(chan_data[chan]))
sv = []
for chan in range(profile_hdr.num_channels):
# Calculate correction to Sv due to non square transmit pulse
sv_offset = zf.compute_sv_offset(profile_hdr.frequency[chan], profile_hdr.pulse_length[chan])
sv.append(self.cc.EL[chan]-2.5/self.cc.DS[chan] + _N[chan]/(26214*self.cc.DS[chan]) - self.cc.TVR[chan] -
20*np.log10(self.cc.VTX[chan]) + 20*np.log10(depth_range[chan]) +
2*sea_absorb[chan]*depth_range[chan] -
10*np.log10(0.5*sound_speed*profile_hdr.pulse_length[chan]/1e6*self.cc.BP[chan]) +
sv_offset)
return sv
def compute_echogram_metadata(self, profile_hdr):
"""
Compute the metadata parameters needed to compute the zplsc-c valume backscatter values.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:return: sound_speed : Speed of sound based on temperature, pressure and salinity.
depth_range : Range of depth values of the zplsc-c data.
sea_absorb : Sea absorbtion based on temperature, pressure, salinity and frequency.
"""
# If the temperature sensor is available, compute the temperature from the counts.
temperature = 0
if profile_hdr.is_sensor_available:
temperature = zf.zplsc_c_temperature(profile_hdr.temperature, self.cc.ka, self.cc.kb, self.cc.kc,
self.cc.A, self.cc.B, self.cc.C)
sound_speed = zf.zplsc_c_ss(temperature, self.params.Pressure, self.params.Salinity)
_m = []
depth_range = []
for chan in range(profile_hdr.num_channels):
_m.append(np.array([x for x in range(1, (profile_hdr.num_bins[chan]/self.params.Bins2Avg)+1)]))
depth_range.append(sound_speed*profile_hdr.lockout_index[0]/(2*profile_hdr.digitization_rate[0]) +
(sound_speed/4)*(((2*_m[chan]-1)*profile_hdr.range_samples[0]*self.params.Bins2Avg-1) /
float(profile_hdr.digitization_rate[0]) +
profile_hdr.pulse_length[0]/1e6))
sea_absorb = []
for chan in range(profile_hdr.num_channels):
# Calculate absorption coeff for each frequency.
sea_absorb.append(zf.zplsc_c_absorbtion(temperature, self.params.Pressure, self.params.Salinity,
profile_hdr.frequency[chan]))
return sound_speed, depth_range, sea_absorb
| time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = data_times.size
# X axis label
# subset the xticks so that we don't plot every one
if time_length < ZPLSCCPlot.num_xticks:
ZPLSCCPlot.num_xticks = time_length
xticks = np.linspace(0, time_length, ZPLSCCPlot.num_xticks)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(data_times[::xstep])] + [num2date(data_times[-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
ax.set_xlabel('time (UTC)', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=45, horizontalalignment='center', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xlim(0, time_length) | identifier_body |
zplsc_c_echogram.py | """
@package mi.dataset.driver.zplsc_c
@file mi/dataset/driver/zplsc_c/zplsc_c_echogram.py
@author Craig Risien/Rene Gelinas
@brief ZPLSC Echogram generation for the ooicore
Release notes:
This class supports the generation of ZPLSC-C echograms. It needs matplotlib version 1.3.1 for the code to display
the colorbar at the bottom of the figure. If matplotlib version 1.1.1 is used, the colorbar is plotted over the
figure instead of at the bottom of it.
"""
from datetime import datetime
import numpy as np
import matplotlib
from matplotlib.dates import date2num, num2date
from modest_image import imshow
import mi.dataset.driver.zplsc_c.zplsc_functions as zf
matplotlib.use("Agg")
import matplotlib.pyplot as plt
__author__ = 'Craig Risien, Rene Gelinas'
REF_TIME = date2num(datetime(1900, 1, 1, 0, 0, 0))
class ZplscCParameters(object):
# TODO: This class should be replaced by methods to get the CCs from the system.
# Configuration Parameters
Salinity = 32 # Salinity in psu
Pressure = 150 # in dbars (~ depth of instrument in meters).
Bins2Avg = 1 # number of range bins to average - 1 is no averaging
class ZplscCCalibrationCoefficients(object):
# TODO: This class should be replaced by methods to get the CCs from the system.
ka = 464.3636
kb = 3000.0
kc = 1.893
A = 0.001466
B = 0.0002388
C = 0.000000100335
TVR = []
VTX = []
BP = []
EL = []
DS = []
# Freq 38kHz
TVR.append(1.691999969482e2)
VTX.append(1.533999938965e2)
BP.append(8.609999902546e-3)
EL.append(1.623000030518e2)
DS.append(2.280000038445e-2)
# Freq 125kHz
TVR.append(1.668999938965e2)
VTX.append(5.8e+01)
BP.append(1.530999969691e-2)
EL.append(1.376999969482e2)
DS.append(2.280000038445e-2)
# Freq 200kHz
TVR.append(1.688999938965e2)
VTX.append(9.619999694824e1)
BP.append(1.530999969691e-2)
EL.append(1.456000061035e2)
DS.append(2.250000089407e-2)
# Freq 455kHz
TVR.append(1.696000061035e2)
VTX.append(1.301000061035e2)
BP.append(8.609999902546e-3)
EL.append(1.491999969482e2)
DS.append(2.300000004470e-2)
class ZPLSCCPlot(object):
font_size_small = 14
font_size_large = 18
num_xticks = 25
num_yticks = 7
interplot_spacing = 0.1
lower_percentile = 5
upper_percentile = 95
def __init__(self, data_times, channel_data_dict, frequency_dict, depth_range, _min_db=None, _max_db=None):
self.fig = None
self.power_data_dict = self._transpose_and_flip(channel_data_dict)
if (_min_db is None) or (_max_db is None):
self.min_db, self.max_db = self._get_power_range(channel_data_dict)
else:
self.min_db = _min_db
self.max_db = _max_db
self.frequency_dict = frequency_dict
# convert ntp time, i.e. seconds since 1900-01-01 00:00:00 to matplotlib time
self.data_times = (data_times / (60 * 60 * 24)) + REF_TIME
max_depth, _ = self.power_data_dict[1].shape
self._setup_plot(depth_range, max_depth)
def generate_plots(self):
"""
Generate plots for all channels in data set
"""
freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}
data_axes = None
for index, frequency in enumerate(sorted(freq_to_channel)):
channel = freq_to_channel[frequency]
td_f = self.frequency_dict[channel]
title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)
data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,
self.min_db, self.max_db)
if data_axes:
self._display_x_labels(self.ax[2], self.data_times)
self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])
self._display_colorbar(self.fig, data_axes)
def write_image(self, filename):
self.fig.savefig(filename)
plt.close(self.fig)
self.fig = None
def _setup_plot(self, depth_range, max_depth):
# subset the yticks so that we don't plot every one
yticks = np.linspace(0, max_depth, self.num_yticks)
# create range vector (depth in meters)
yticklabels = np.round(np.linspace(depth_range[0][-1], depth_range[0][0], self.num_yticks)).astype(int)
self.fig, self.ax = plt.subplots(len(self.frequency_dict), sharex=True, sharey=True)
self.fig.subplots_adjust(hspace=self.interplot_spacing)
self.fig.set_size_inches(40, 19)
for axes in self.ax:
axes.grid(False)
axes.set_ylabel('depth (m)', fontsize=self.font_size_small)
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels, fontsize=self.font_size_small)
axes.tick_params(axis="both", labelcolor="k", pad=4, direction='out', length=5, width=2)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
@staticmethod
def _get_power_range(power_dict):
# Calculate the power data range across all channels
all_power_data = np.concatenate(power_dict.values())
max_db = np.nanpercentile(all_power_data, ZPLSCCPlot.upper_percentile)
min_db = np.nanpercentile(all_power_data, ZPLSCCPlot.lower_percentile)
return min_db, max_db
@staticmethod
def _transpose_and_flip(power_dict):
for channel in power_dict:
# Transpose array data so we have time on the x-axis and depth on the y-axis
power_dict[channel] = power_dict[channel].transpose()
# reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)
power_dict[channel] = power_dict[channel][::-1]
return power_dict
@staticmethod
def _generate_plot(ax, power_data, title, min_db, max_db):
"""
Generate a ZPLS plot for an individual channel
:param ax: matplotlib axis to receive the plot image
:param power_data: Transducer data array
:param title: plot title
:param min_db: minimum power level
:param max_db: maximum power level
"""
# only generate plots for the transducers that have data
if power_data.size <= 0:
return
ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)
return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)
@staticmethod
def _display_x_labels(ax, data_times):
time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = data_times.size
# X axis label
# subset the xticks so that we don't plot every one
if time_length < ZPLSCCPlot.num_xticks:
ZPLSCCPlot.num_xticks = time_length
xticks = np.linspace(0, time_length, ZPLSCCPlot.num_xticks)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(data_times[::xstep])] + [num2date(data_times[-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
ax.set_xlabel('time (UTC)', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=45, horizontalalignment='center', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xlim(0, time_length)
@staticmethod
def _display_colorbar(fig, data_axes):
# Add a colorbar to the specified figure using the data from the given axes
ax = fig.add_axes([0.965, 0.12, 0.01, 0.775])
cb = fig.colorbar(data_axes, cax=ax, use_gridspec=True)
cb.set_label('dB', fontsize=ZPLSCCPlot.font_size_large)
cb.ax.tick_params(labelsize=ZPLSCCPlot.font_size_small)
class ZPLSCCEchogram(object):
def __init__(self):
self.cc = ZplscCCalibrationCoefficients()
self.params = ZplscCParameters()
def compute_backscatter(self, profile_hdr, chan_data, sound_speed, depth_range, sea_absorb):
"""
Compute the backscatter volumes values for one zplsc_c profile data record.
This code was borrowed from ASL MatLab code that reads in zplsc-c raw data
and performs calculations in order to compute the backscatter volume in db.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:param chan_data: Raw frequency data from the zplsc-c instrument.
:param sound_speed: Speed of sound at based on speed of sound, pressure and salinity.
:param depth_range: Range of the depth of the measurements
:param sea_absorb: Seawater absorption coefficient for each frequency
:return: sv: Volume backscatter in db
"""
_N = []
if self.params.Bins2Avg > 1:
for chan in range(profile_hdr.num_channels):
el = self.cc.EL[chan] - 2.5/self.cc.DS[chan] + np.array(chan_data[chan])/(26214*self.cc.DS[chan])
power = 10**(el/10)
# Perform bin averaging
num_bins = len(chan_data[chan])/self.params.Bins2Avg
pwr_avg = []
for _bin in range(num_bins):
pwr_avg.append(np.mean(power[_bin*self.params.Bins2Avg:(_bin+1)*self.params.Bins2Avg]))
el_avg = 10*np.log10(pwr_avg)
_N.append(np.round(26214*self.cc.DS[chan]*(el_avg - self.cc.EL[chan] + 2.5/self.cc.DS[chan])))
else:
for chan in range(profile_hdr.num_channels):
_N.append(np.array(chan_data[chan]))
sv = []
for chan in range(profile_hdr.num_channels):
# Calculate correction to Sv due to non square transmit pulse
sv_offset = zf.compute_sv_offset(profile_hdr.frequency[chan], profile_hdr.pulse_length[chan])
sv.append(self.cc.EL[chan]-2.5/self.cc.DS[chan] + _N[chan]/(26214*self.cc.DS[chan]) - self.cc.TVR[chan] -
20*np.log10(self.cc.VTX[chan]) + 20*np.log10(depth_range[chan]) +
2*sea_absorb[chan]*depth_range[chan] -
10*np.log10(0.5*sound_speed*profile_hdr.pulse_length[chan]/1e6*self.cc.BP[chan]) +
sv_offset)
return sv
def compute_echogram_metadata(self, profile_hdr):
"""
Compute the metadata parameters needed to compute the zplsc-c valume backscatter values.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:return: sound_speed : Speed of sound based on temperature, pressure and salinity.
depth_range : Range of depth values of the zplsc-c data.
sea_absorb : Sea absorbtion based on temperature, pressure, salinity and frequency.
"""
# If the temperature sensor is available, compute the temperature from the counts.
temperature = 0
if profile_hdr.is_sensor_available:
temperature = zf.zplsc_c_temperature(profile_hdr.temperature, self.cc.ka, self.cc.kb, self.cc.kc,
self.cc.A, self.cc.B, self.cc.C)
sound_speed = zf.zplsc_c_ss(temperature, self.params.Pressure, self.params.Salinity)
_m = []
depth_range = []
for chan in range(profile_hdr.num_channels): |
sea_absorb = []
for chan in range(profile_hdr.num_channels):
# Calculate absorption coeff for each frequency.
sea_absorb.append(zf.zplsc_c_absorbtion(temperature, self.params.Pressure, self.params.Salinity,
profile_hdr.frequency[chan]))
return sound_speed, depth_range, sea_absorb | _m.append(np.array([x for x in range(1, (profile_hdr.num_bins[chan]/self.params.Bins2Avg)+1)]))
depth_range.append(sound_speed*profile_hdr.lockout_index[0]/(2*profile_hdr.digitization_rate[0]) +
(sound_speed/4)*(((2*_m[chan]-1)*profile_hdr.range_samples[0]*self.params.Bins2Avg-1) /
float(profile_hdr.digitization_rate[0]) +
profile_hdr.pulse_length[0]/1e6)) | random_line_split |
index.js | /* eslint-disable no-alert */
import {
YellowBox,
View, Text, TouchableOpacity, Alert, Platform, AsyncStorage
} from 'react-native';
import _ from 'lodash';
/* eslint-disable no-plusplus */
import React, { Component } from 'react';
import DialogInput from 'react-native-dialog-input';
import firebase from 'firebase';
import i18n from 'i18n-js';
import { Agenda } from 'react-native-calendars';
import * as Permissions from 'expo-permissions';
import { Notifications } from 'expo';
import styles from './styles';
YellowBox.ignoreWarnings(['Setting a timer']);
const _console = _.clone(console);
console.warn = (message) => {
if (message.indexOf('Setting a timer') <= -1) {
_console.warn(message);
}
};
export default class | extends Component {
_isMounted = false
constructor(props) {
super(props);
this.state = {
items: {},
isDialogVisible: false,
notifyEvents: this.notify(props.navigation.state.params.events),
pushNotficationToken: '',
timeToNotify: 1,
synchronizedEvents:
this.structureSynchronizedEvents(props.navigation.state.params.events.items)
};
}
async componentDidMount() {
this.registerForPushNotificationsAsync();
this._isMounted = true;
if (Platform.OS === 'android') {
Notifications.createChannelAndroidAsync('reminders', {
name: 'Reminders',
priority: 'max',
vibrate: [0, 250, 250, 250],
});
}
}
componentWillUnmount() {
this._isMounted = false;
}
/**
* Registers device to receive push notifications
*/
registerForPushNotificationsAsync = async () => {
const { status } = await Permissions.askAsync(Permissions.NOTIFICATIONS);
if (status !== 'granted') {
Alert.alert(i18n.t('permissionNotGranted'),
i18n.t('allowNotifications'),
[
{ text: 'ok' }
]);
}
}
/**
*
* @param {object} day - information of the current day
* formats items with information of the day
*/
loadItems = (day) => {
setTimeout(() => {
const uppderBoundForSync = 85;
const lowerBoundForSync = -15;
for (let i = lowerBoundForSync; i < uppderBoundForSync; i++) {
const time = day.timestamp + i * 24 * 60 * 60 * 1000;
const strTime = this.timeToString(time);
if (!this.state.items[strTime]) {
this.state.items[strTime] = [];
const todayEvents = this.state.synchronizedEvents
.filter((event) => { return strTime === event.date; });
const numItems = todayEvents.length;
for (let j = 0; j < numItems; j++) {
this.state.items[strTime].push({
name: todayEvents[j].title,
startTime: todayEvents[j].startTime,
endTime: todayEvents[j].endTime,
description: todayEvents[j].description,
address: todayEvents[j].address,
height: 80
});
}
}
}
const newItems = {};
Object.keys(this.state.items).forEach((key) => { newItems[key] = this.state.items[key]; });
if (this._isMounted) {
this.setState({
items: newItems
});
}
}, 1000);
}
/**
*
* @param {object} events - All the user events
* Formats the events to only return required events
*/
notify = (events) => {
const notifyArray = [];
events.items.forEach((element) => {
const date = new Date(element.start.dateTime);
if (element.summary) {
if (element.summary.includes('conpass') && date.getTime() > (new Date()).getTime()) {
notifyArray.push({
startDate: element.start.dateTime,
summary: element.summary,
});
}
}
});
return notifyArray;
};
/**
* Schedules push notifications to user upon adjusting the timer
*/
sendPushNotification = () => {
Notifications.cancelAllScheduledNotificationsAsync();
this.state.notifyEvents.forEach((element) => {
const localNotification = {
to: this.state.pushNotficationToken,
sound: 'default',
priority: 'high',
title: 'Conpass Notification',
body: element.summary,
channelId: 'reminders',
ios: { _displayInForeground: true }
};
const date = new Date(element.startDate);
const t = date.getTime() - this.state.timeToNotify * 60 * 1000;
const schedulingOptions = {
time: t
};
Notifications.scheduleLocalNotificationAsync(localNotification, schedulingOptions);
});
return 'Notifications sent';
}
/**
* @param {boolean} boolean - True or false
* Shows or hides the dialong box of 'Adjust time' button
*/
showDialog=(boolean) => {
if (this._isMounted) {
this.setState({ isDialogVisible: boolean });
}
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
sendInput = (number) => {
if (/^\d+$/.test(number.toString())) {
if (this._isMounted) {
this.setState({ timeToNotify: number });
this.setState({ isDialogVisible: false });
}
} else {
// your call back function
Alert.alert(i18n.t('numbersOnly'),
'',
[
{ text: 'ok' }
]);
return;
}
setTimeout(() => {
this.sendPushNotification();
}, 100);
}
/**
* Fetches new events on google calendar
*/
refreshCalendar =async () => {
const accessToken = await AsyncStorage.getItem('accessToken');
const userInfoResponse = await fetch('https://www.googleapis.com/calendar/v3/calendars/primary/events?key=AIzaSyBAHObp5Ic3CbJpkX2500tNhf53e_3wBMA&timeMin=2020-01-01T01:00:00.000Z', {
headers: { Authorization: `Bearer ${accessToken}` },
});
const jsonFile = await userInfoResponse.json();
const { error } = jsonFile;
if (error) {
firebase.auth().signOut();
this.props.navigation.navigate('LoginScreen');
Alert.alert(i18n.t('logInAgain'),
'',
[
{ text: 'ok' }
]);
return;
}
const stringFile = JSON.stringify(jsonFile);
AsyncStorage.setItem('events', stringFile);
this.props.navigation.navigate('FetchScreen');
}
/**
* @param {String} description - A location to go to
* Navigates to the home component
*/
sendDirections = (description) => {
if (!description) {
Alert.alert(i18n.t('noAddress'),
'',
[
{ text: 'ok' }
]);
return '';
}
this.props.navigation.navigate('HomeScreen', { description });
return 'address sent';
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
rowHasChanged(r1, r2) {
return r1.name !== r2.name;
}
/**
* @param {integer} time - time of the event
* restructure time in a certain format
*/
timeToString(time) {
const date = new Date(time);
return date.toISOString().split('T')[0];
}
/**
* @param {object} events - All the events the user has
* Structures all the events the user has
*/
structureSynchronizedEvents(events) {
const tempArray = [];
events.forEach((event) => {
tempArray.push(
{
date: event.start.dateTime != null ? event.start.dateTime.substring(0, event.start.dateTime.indexOf('T')) : event.start.date,
title: event.summary != null ? event.summary : 'No Title For this Event',
startTime: event.start.dateTime != null ? event.start.dateTime : event.start.date,
endTime: event.end.dateTime != null ? event.end.dateTime : event.end.date,
description: event.description != null ? event.description : '',
address: event.location != null ? event.location : ''
}
);
});
if (this._isMounted) {
this.setState({
synchronizedEvents: this.tempArray
});
}
return tempArray;
}
/**
* @param {object} item - information of item
* present event in the agenda
*/
renderItem(item) {
const { address } = item;
const { description } = item;
return (
<TouchableOpacity
style={[styles.item, { height: item.height }]}
onPress={() => {
return Alert.alert(item.name,
`${item.startTime} - ${item.endTime}\n${item.description}\n${item.address}`,
[
{ text: i18n.t('cancel') },
{
text: i18n.t('getDirections'),
onPress: () => {
if (address) { this.sendDirections(address.split(',')[0]); } else { this.sendDirections(description.split('\n')[0]); }
}
},
],
{ cancelable: false });
}}
>
<Text style={{ color: 'white' }}>{item.name}</Text>
</TouchableOpacity>
);
}
/**
* add line to empty day
*/
renderEmptyDate = () => {
return (
<View
style={{
borderBottomColor: 'rgba(105,105,105,0.1)',
borderBottomWidth: 1,
}}
/>
);
}
render() {
return (
<View
style={{ height: '100%', width: '100%', position: 'absolute' }}
>
<DialogInput
isDialogVisible={this.state.isDialogVisible}
title={i18n.t('setReminderTime')}
keyboardType="numeric"
message={i18n.t('reminderMessage')}
hintInput="e.g. 10"
submitInput={(inputText) => { this.sendInput(inputText); }}
closeDialog={() => { this.showDialog(false); }}
submitText={i18n.t('submit')}
cancelText={i18n.t('cancel')}
/>
<Agenda
items={this.state.items}
loadItemsForMonth={this.loadItems}
renderItem={(item) => { return this.renderItem(item, this.props); }}
renderEmptyDate={this.renderEmptyDate}
rowHasChanged={this.rowHasChanged}
onRefresh={() => {
this.refreshCalendar();
}}
theme={{
calendarBackground: 'rgb(255,255,255)',
selectedDayBackgroundColor: 'rgba(156,211,215,1)',
agendaDayTextColor: 'black',
agendaDayNumColor: 'black',
agendaKnobColor: 'rgba(156,211,215,1)'
}}
/>
<View>
<View style={{ flexDirection: 'row', position: 'absolute' }}>
<TouchableOpacity
style={styles.touchable}
onPress={() => {
firebase.auth().signOut();
}}
>
<Text>{i18n.t('logOut')}</Text>
</TouchableOpacity>
<TouchableOpacity
style={styles.touchable}
onPress={() => {
this.showDialog(true);
}}
>
<Text>{i18n.t('adjustTime')}</Text>
</TouchableOpacity>
</View>
</View>
</View>
);
}
}
| DashboardScreen | identifier_name |
index.js | /* eslint-disable no-alert */
import {
YellowBox,
View, Text, TouchableOpacity, Alert, Platform, AsyncStorage
} from 'react-native';
import _ from 'lodash';
/* eslint-disable no-plusplus */
import React, { Component } from 'react';
import DialogInput from 'react-native-dialog-input';
import firebase from 'firebase';
import i18n from 'i18n-js';
import { Agenda } from 'react-native-calendars';
import * as Permissions from 'expo-permissions';
import { Notifications } from 'expo';
import styles from './styles';
YellowBox.ignoreWarnings(['Setting a timer']);
const _console = _.clone(console);
console.warn = (message) => {
if (message.indexOf('Setting a timer') <= -1) {
_console.warn(message);
}
};
export default class DashboardScreen extends Component {
_isMounted = false
constructor(props) {
super(props);
this.state = {
items: {},
isDialogVisible: false,
notifyEvents: this.notify(props.navigation.state.params.events),
pushNotficationToken: '',
timeToNotify: 1,
synchronizedEvents:
this.structureSynchronizedEvents(props.navigation.state.params.events.items)
};
}
async componentDidMount() {
this.registerForPushNotificationsAsync();
this._isMounted = true;
if (Platform.OS === 'android') {
Notifications.createChannelAndroidAsync('reminders', {
name: 'Reminders',
priority: 'max',
vibrate: [0, 250, 250, 250],
});
}
}
componentWillUnmount() {
this._isMounted = false;
}
/**
* Registers device to receive push notifications
*/
registerForPushNotificationsAsync = async () => {
const { status } = await Permissions.askAsync(Permissions.NOTIFICATIONS);
if (status !== 'granted') {
Alert.alert(i18n.t('permissionNotGranted'),
i18n.t('allowNotifications'),
[
{ text: 'ok' }
]);
}
}
/**
*
* @param {object} day - information of the current day
* formats items with information of the day
*/
loadItems = (day) => {
setTimeout(() => {
const uppderBoundForSync = 85;
const lowerBoundForSync = -15;
for (let i = lowerBoundForSync; i < uppderBoundForSync; i++) {
const time = day.timestamp + i * 24 * 60 * 60 * 1000;
const strTime = this.timeToString(time);
if (!this.state.items[strTime]) {
this.state.items[strTime] = [];
const todayEvents = this.state.synchronizedEvents
.filter((event) => { return strTime === event.date; });
const numItems = todayEvents.length;
for (let j = 0; j < numItems; j++) {
this.state.items[strTime].push({
name: todayEvents[j].title,
startTime: todayEvents[j].startTime,
endTime: todayEvents[j].endTime,
description: todayEvents[j].description,
address: todayEvents[j].address,
height: 80
});
}
}
}
const newItems = {};
Object.keys(this.state.items).forEach((key) => { newItems[key] = this.state.items[key]; });
if (this._isMounted) {
this.setState({
items: newItems
});
}
}, 1000);
}
/**
*
* @param {object} events - All the user events
* Formats the events to only return required events
*/
notify = (events) => {
const notifyArray = [];
events.items.forEach((element) => {
const date = new Date(element.start.dateTime);
if (element.summary) {
if (element.summary.includes('conpass') && date.getTime() > (new Date()).getTime()) |
}
});
return notifyArray;
};
/**
* Schedules push notifications to user upon adjusting the timer
*/
sendPushNotification = () => {
Notifications.cancelAllScheduledNotificationsAsync();
this.state.notifyEvents.forEach((element) => {
const localNotification = {
to: this.state.pushNotficationToken,
sound: 'default',
priority: 'high',
title: 'Conpass Notification',
body: element.summary,
channelId: 'reminders',
ios: { _displayInForeground: true }
};
const date = new Date(element.startDate);
const t = date.getTime() - this.state.timeToNotify * 60 * 1000;
const schedulingOptions = {
time: t
};
Notifications.scheduleLocalNotificationAsync(localNotification, schedulingOptions);
});
return 'Notifications sent';
}
/**
* @param {boolean} boolean - True or false
* Shows or hides the dialong box of 'Adjust time' button
*/
showDialog=(boolean) => {
if (this._isMounted) {
this.setState({ isDialogVisible: boolean });
}
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
sendInput = (number) => {
if (/^\d+$/.test(number.toString())) {
if (this._isMounted) {
this.setState({ timeToNotify: number });
this.setState({ isDialogVisible: false });
}
} else {
// your call back function
Alert.alert(i18n.t('numbersOnly'),
'',
[
{ text: 'ok' }
]);
return;
}
setTimeout(() => {
this.sendPushNotification();
}, 100);
}
/**
* Fetches new events on google calendar
*/
refreshCalendar =async () => {
const accessToken = await AsyncStorage.getItem('accessToken');
const userInfoResponse = await fetch('https://www.googleapis.com/calendar/v3/calendars/primary/events?key=AIzaSyBAHObp5Ic3CbJpkX2500tNhf53e_3wBMA&timeMin=2020-01-01T01:00:00.000Z', {
headers: { Authorization: `Bearer ${accessToken}` },
});
const jsonFile = await userInfoResponse.json();
const { error } = jsonFile;
if (error) {
firebase.auth().signOut();
this.props.navigation.navigate('LoginScreen');
Alert.alert(i18n.t('logInAgain'),
'',
[
{ text: 'ok' }
]);
return;
}
const stringFile = JSON.stringify(jsonFile);
AsyncStorage.setItem('events', stringFile);
this.props.navigation.navigate('FetchScreen');
}
/**
* @param {String} description - A location to go to
* Navigates to the home component
*/
sendDirections = (description) => {
if (!description) {
Alert.alert(i18n.t('noAddress'),
'',
[
{ text: 'ok' }
]);
return '';
}
this.props.navigation.navigate('HomeScreen', { description });
return 'address sent';
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
rowHasChanged(r1, r2) {
return r1.name !== r2.name;
}
/**
* @param {integer} time - time of the event
* restructure time in a certain format
*/
timeToString(time) {
const date = new Date(time);
return date.toISOString().split('T')[0];
}
/**
* @param {object} events - All the events the user has
* Structures all the events the user has
*/
structureSynchronizedEvents(events) {
const tempArray = [];
events.forEach((event) => {
tempArray.push(
{
date: event.start.dateTime != null ? event.start.dateTime.substring(0, event.start.dateTime.indexOf('T')) : event.start.date,
title: event.summary != null ? event.summary : 'No Title For this Event',
startTime: event.start.dateTime != null ? event.start.dateTime : event.start.date,
endTime: event.end.dateTime != null ? event.end.dateTime : event.end.date,
description: event.description != null ? event.description : '',
address: event.location != null ? event.location : ''
}
);
});
if (this._isMounted) {
this.setState({
synchronizedEvents: this.tempArray
});
}
return tempArray;
}
/**
* @param {object} item - information of item
* present event in the agenda
*/
renderItem(item) {
const { address } = item;
const { description } = item;
return (
<TouchableOpacity
style={[styles.item, { height: item.height }]}
onPress={() => {
return Alert.alert(item.name,
`${item.startTime} - ${item.endTime}\n${item.description}\n${item.address}`,
[
{ text: i18n.t('cancel') },
{
text: i18n.t('getDirections'),
onPress: () => {
if (address) { this.sendDirections(address.split(',')[0]); } else { this.sendDirections(description.split('\n')[0]); }
}
},
],
{ cancelable: false });
}}
>
<Text style={{ color: 'white' }}>{item.name}</Text>
</TouchableOpacity>
);
}
/**
* add line to empty day
*/
renderEmptyDate = () => {
return (
<View
style={{
borderBottomColor: 'rgba(105,105,105,0.1)',
borderBottomWidth: 1,
}}
/>
);
}
render() {
return (
<View
style={{ height: '100%', width: '100%', position: 'absolute' }}
>
<DialogInput
isDialogVisible={this.state.isDialogVisible}
title={i18n.t('setReminderTime')}
keyboardType="numeric"
message={i18n.t('reminderMessage')}
hintInput="e.g. 10"
submitInput={(inputText) => { this.sendInput(inputText); }}
closeDialog={() => { this.showDialog(false); }}
submitText={i18n.t('submit')}
cancelText={i18n.t('cancel')}
/>
<Agenda
items={this.state.items}
loadItemsForMonth={this.loadItems}
renderItem={(item) => { return this.renderItem(item, this.props); }}
renderEmptyDate={this.renderEmptyDate}
rowHasChanged={this.rowHasChanged}
onRefresh={() => {
this.refreshCalendar();
}}
theme={{
calendarBackground: 'rgb(255,255,255)',
selectedDayBackgroundColor: 'rgba(156,211,215,1)',
agendaDayTextColor: 'black',
agendaDayNumColor: 'black',
agendaKnobColor: 'rgba(156,211,215,1)'
}}
/>
<View>
<View style={{ flexDirection: 'row', position: 'absolute' }}>
<TouchableOpacity
style={styles.touchable}
onPress={() => {
firebase.auth().signOut();
}}
>
<Text>{i18n.t('logOut')}</Text>
</TouchableOpacity>
<TouchableOpacity
style={styles.touchable}
onPress={() => {
this.showDialog(true);
}}
>
<Text>{i18n.t('adjustTime')}</Text>
</TouchableOpacity>
</View>
</View>
</View>
);
}
}
| {
notifyArray.push({
startDate: element.start.dateTime,
summary: element.summary,
});
} | conditional_block |
index.js | /* eslint-disable no-alert */
import {
YellowBox,
View, Text, TouchableOpacity, Alert, Platform, AsyncStorage
} from 'react-native';
import _ from 'lodash';
/* eslint-disable no-plusplus */
import React, { Component } from 'react';
import DialogInput from 'react-native-dialog-input';
import firebase from 'firebase';
import i18n from 'i18n-js';
import { Agenda } from 'react-native-calendars';
import * as Permissions from 'expo-permissions';
import { Notifications } from 'expo';
import styles from './styles';
YellowBox.ignoreWarnings(['Setting a timer']);
const _console = _.clone(console);
console.warn = (message) => {
if (message.indexOf('Setting a timer') <= -1) {
_console.warn(message);
}
};
export default class DashboardScreen extends Component {
_isMounted = false
constructor(props) {
super(props);
this.state = {
items: {},
isDialogVisible: false,
notifyEvents: this.notify(props.navigation.state.params.events),
pushNotficationToken: '',
timeToNotify: 1,
synchronizedEvents:
this.structureSynchronizedEvents(props.navigation.state.params.events.items)
};
}
async componentDidMount() {
this.registerForPushNotificationsAsync();
this._isMounted = true;
if (Platform.OS === 'android') {
Notifications.createChannelAndroidAsync('reminders', {
name: 'Reminders',
priority: 'max',
vibrate: [0, 250, 250, 250],
});
}
}
componentWillUnmount() {
this._isMounted = false;
}
/**
* Registers device to receive push notifications
*/
registerForPushNotificationsAsync = async () => {
const { status } = await Permissions.askAsync(Permissions.NOTIFICATIONS);
if (status !== 'granted') {
Alert.alert(i18n.t('permissionNotGranted'),
i18n.t('allowNotifications'),
[
{ text: 'ok' }
]);
}
}
/**
*
* @param {object} day - information of the current day
* formats items with information of the day
*/
loadItems = (day) => {
setTimeout(() => {
const uppderBoundForSync = 85;
const lowerBoundForSync = -15;
for (let i = lowerBoundForSync; i < uppderBoundForSync; i++) {
const time = day.timestamp + i * 24 * 60 * 60 * 1000;
const strTime = this.timeToString(time);
if (!this.state.items[strTime]) {
this.state.items[strTime] = [];
const todayEvents = this.state.synchronizedEvents
.filter((event) => { return strTime === event.date; });
const numItems = todayEvents.length;
for (let j = 0; j < numItems; j++) {
this.state.items[strTime].push({
name: todayEvents[j].title,
startTime: todayEvents[j].startTime,
endTime: todayEvents[j].endTime,
description: todayEvents[j].description,
address: todayEvents[j].address,
height: 80
});
}
}
}
const newItems = {};
Object.keys(this.state.items).forEach((key) => { newItems[key] = this.state.items[key]; });
if (this._isMounted) {
this.setState({
items: newItems
});
}
}, 1000);
}
/**
*
* @param {object} events - All the user events
* Formats the events to only return required events
*/
notify = (events) => {
const notifyArray = [];
events.items.forEach((element) => {
const date = new Date(element.start.dateTime);
if (element.summary) {
if (element.summary.includes('conpass') && date.getTime() > (new Date()).getTime()) {
notifyArray.push({
startDate: element.start.dateTime,
summary: element.summary,
});
}
}
});
return notifyArray;
};
/**
* Schedules push notifications to user upon adjusting the timer
*/
sendPushNotification = () => {
Notifications.cancelAllScheduledNotificationsAsync();
this.state.notifyEvents.forEach((element) => {
const localNotification = {
to: this.state.pushNotficationToken,
sound: 'default',
priority: 'high',
title: 'Conpass Notification',
body: element.summary,
channelId: 'reminders',
ios: { _displayInForeground: true }
};
const date = new Date(element.startDate);
const t = date.getTime() - this.state.timeToNotify * 60 * 1000;
const schedulingOptions = {
time: t
};
Notifications.scheduleLocalNotificationAsync(localNotification, schedulingOptions);
});
return 'Notifications sent';
}
/**
* @param {boolean} boolean - True or false
* Shows or hides the dialong box of 'Adjust time' button
*/
showDialog=(boolean) => {
if (this._isMounted) {
this.setState({ isDialogVisible: boolean });
}
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
sendInput = (number) => {
if (/^\d+$/.test(number.toString())) {
if (this._isMounted) {
this.setState({ timeToNotify: number });
this.setState({ isDialogVisible: false });
}
} else {
// your call back function
Alert.alert(i18n.t('numbersOnly'),
'',
[
{ text: 'ok' }
]);
return;
}
setTimeout(() => {
this.sendPushNotification();
}, 100);
}
/**
* Fetches new events on google calendar
*/
refreshCalendar =async () => {
const accessToken = await AsyncStorage.getItem('accessToken');
const userInfoResponse = await fetch('https://www.googleapis.com/calendar/v3/calendars/primary/events?key=AIzaSyBAHObp5Ic3CbJpkX2500tNhf53e_3wBMA&timeMin=2020-01-01T01:00:00.000Z', {
headers: { Authorization: `Bearer ${accessToken}` },
});
const jsonFile = await userInfoResponse.json();
const { error } = jsonFile;
if (error) {
firebase.auth().signOut();
this.props.navigation.navigate('LoginScreen');
Alert.alert(i18n.t('logInAgain'),
'',
[
{ text: 'ok' }
]);
return;
}
const stringFile = JSON.stringify(jsonFile);
AsyncStorage.setItem('events', stringFile);
this.props.navigation.navigate('FetchScreen');
}
/**
* @param {String} description - A location to go to
* Navigates to the home component
*/
sendDirections = (description) => {
if (!description) {
Alert.alert(i18n.t('noAddress'),
'',
[
{ text: 'ok' }
]);
return '';
}
this.props.navigation.navigate('HomeScreen', { description });
return 'address sent';
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
rowHasChanged(r1, r2) |
/**
* @param {integer} time - time of the event
* restructure time in a certain format
*/
timeToString(time) {
const date = new Date(time);
return date.toISOString().split('T')[0];
}
/**
* @param {object} events - All the events the user has
* Structures all the events the user has
*/
structureSynchronizedEvents(events) {
const tempArray = [];
events.forEach((event) => {
tempArray.push(
{
date: event.start.dateTime != null ? event.start.dateTime.substring(0, event.start.dateTime.indexOf('T')) : event.start.date,
title: event.summary != null ? event.summary : 'No Title For this Event',
startTime: event.start.dateTime != null ? event.start.dateTime : event.start.date,
endTime: event.end.dateTime != null ? event.end.dateTime : event.end.date,
description: event.description != null ? event.description : '',
address: event.location != null ? event.location : ''
}
);
});
if (this._isMounted) {
this.setState({
synchronizedEvents: this.tempArray
});
}
return tempArray;
}
/**
* @param {object} item - information of item
* present event in the agenda
*/
renderItem(item) {
const { address } = item;
const { description } = item;
return (
<TouchableOpacity
style={[styles.item, { height: item.height }]}
onPress={() => {
return Alert.alert(item.name,
`${item.startTime} - ${item.endTime}\n${item.description}\n${item.address}`,
[
{ text: i18n.t('cancel') },
{
text: i18n.t('getDirections'),
onPress: () => {
if (address) { this.sendDirections(address.split(',')[0]); } else { this.sendDirections(description.split('\n')[0]); }
}
},
],
{ cancelable: false });
}}
>
<Text style={{ color: 'white' }}>{item.name}</Text>
</TouchableOpacity>
);
}
/**
* add line to empty day
*/
renderEmptyDate = () => {
return (
<View
style={{
borderBottomColor: 'rgba(105,105,105,0.1)',
borderBottomWidth: 1,
}}
/>
);
}
render() {
return (
<View
style={{ height: '100%', width: '100%', position: 'absolute' }}
>
<DialogInput
isDialogVisible={this.state.isDialogVisible}
title={i18n.t('setReminderTime')}
keyboardType="numeric"
message={i18n.t('reminderMessage')}
hintInput="e.g. 10"
submitInput={(inputText) => { this.sendInput(inputText); }}
closeDialog={() => { this.showDialog(false); }}
submitText={i18n.t('submit')}
cancelText={i18n.t('cancel')}
/>
<Agenda
items={this.state.items}
loadItemsForMonth={this.loadItems}
renderItem={(item) => { return this.renderItem(item, this.props); }}
renderEmptyDate={this.renderEmptyDate}
rowHasChanged={this.rowHasChanged}
onRefresh={() => {
this.refreshCalendar();
}}
theme={{
calendarBackground: 'rgb(255,255,255)',
selectedDayBackgroundColor: 'rgba(156,211,215,1)',
agendaDayTextColor: 'black',
agendaDayNumColor: 'black',
agendaKnobColor: 'rgba(156,211,215,1)'
}}
/>
<View>
<View style={{ flexDirection: 'row', position: 'absolute' }}>
<TouchableOpacity
style={styles.touchable}
onPress={() => {
firebase.auth().signOut();
}}
>
<Text>{i18n.t('logOut')}</Text>
</TouchableOpacity>
<TouchableOpacity
style={styles.touchable}
onPress={() => {
this.showDialog(true);
}}
>
<Text>{i18n.t('adjustTime')}</Text>
</TouchableOpacity>
</View>
</View>
</View>
);
}
}
| {
return r1.name !== r2.name;
} | identifier_body |
index.js | /* eslint-disable no-alert */
import {
YellowBox,
View, Text, TouchableOpacity, Alert, Platform, AsyncStorage
} from 'react-native';
import _ from 'lodash';
/* eslint-disable no-plusplus */
import React, { Component } from 'react';
import DialogInput from 'react-native-dialog-input';
import firebase from 'firebase';
import i18n from 'i18n-js';
import { Agenda } from 'react-native-calendars';
import * as Permissions from 'expo-permissions';
import { Notifications } from 'expo';
import styles from './styles';
YellowBox.ignoreWarnings(['Setting a timer']);
const _console = _.clone(console);
console.warn = (message) => {
if (message.indexOf('Setting a timer') <= -1) {
_console.warn(message);
}
};
export default class DashboardScreen extends Component {
_isMounted = false
constructor(props) {
super(props);
this.state = {
items: {},
isDialogVisible: false,
notifyEvents: this.notify(props.navigation.state.params.events),
pushNotficationToken: '',
timeToNotify: 1,
synchronizedEvents:
this.structureSynchronizedEvents(props.navigation.state.params.events.items)
};
}
async componentDidMount() {
this.registerForPushNotificationsAsync();
this._isMounted = true;
if (Platform.OS === 'android') {
Notifications.createChannelAndroidAsync('reminders', {
name: 'Reminders',
priority: 'max',
vibrate: [0, 250, 250, 250],
});
}
}
componentWillUnmount() {
this._isMounted = false;
}
/**
* Registers device to receive push notifications
*/
registerForPushNotificationsAsync = async () => {
const { status } = await Permissions.askAsync(Permissions.NOTIFICATIONS);
if (status !== 'granted') {
Alert.alert(i18n.t('permissionNotGranted'),
i18n.t('allowNotifications'),
[ |
/**
*
* @param {object} day - information of the current day
* formats items with information of the day
*/
loadItems = (day) => {
setTimeout(() => {
const uppderBoundForSync = 85;
const lowerBoundForSync = -15;
for (let i = lowerBoundForSync; i < uppderBoundForSync; i++) {
const time = day.timestamp + i * 24 * 60 * 60 * 1000;
const strTime = this.timeToString(time);
if (!this.state.items[strTime]) {
this.state.items[strTime] = [];
const todayEvents = this.state.synchronizedEvents
.filter((event) => { return strTime === event.date; });
const numItems = todayEvents.length;
for (let j = 0; j < numItems; j++) {
this.state.items[strTime].push({
name: todayEvents[j].title,
startTime: todayEvents[j].startTime,
endTime: todayEvents[j].endTime,
description: todayEvents[j].description,
address: todayEvents[j].address,
height: 80
});
}
}
}
const newItems = {};
Object.keys(this.state.items).forEach((key) => { newItems[key] = this.state.items[key]; });
if (this._isMounted) {
this.setState({
items: newItems
});
}
}, 1000);
}
/**
*
* @param {object} events - All the user events
* Formats the events to only return required events
*/
notify = (events) => {
const notifyArray = [];
events.items.forEach((element) => {
const date = new Date(element.start.dateTime);
if (element.summary) {
if (element.summary.includes('conpass') && date.getTime() > (new Date()).getTime()) {
notifyArray.push({
startDate: element.start.dateTime,
summary: element.summary,
});
}
}
});
return notifyArray;
};
/**
* Schedules push notifications to user upon adjusting the timer
*/
sendPushNotification = () => {
Notifications.cancelAllScheduledNotificationsAsync();
this.state.notifyEvents.forEach((element) => {
const localNotification = {
to: this.state.pushNotficationToken,
sound: 'default',
priority: 'high',
title: 'Conpass Notification',
body: element.summary,
channelId: 'reminders',
ios: { _displayInForeground: true }
};
const date = new Date(element.startDate);
const t = date.getTime() - this.state.timeToNotify * 60 * 1000;
const schedulingOptions = {
time: t
};
Notifications.scheduleLocalNotificationAsync(localNotification, schedulingOptions);
});
return 'Notifications sent';
}
/**
* @param {boolean} boolean - True or false
* Shows or hides the dialong box of 'Adjust time' button
*/
showDialog=(boolean) => {
if (this._isMounted) {
this.setState({ isDialogVisible: boolean });
}
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
sendInput = (number) => {
if (/^\d+$/.test(number.toString())) {
if (this._isMounted) {
this.setState({ timeToNotify: number });
this.setState({ isDialogVisible: false });
}
} else {
// your call back function
Alert.alert(i18n.t('numbersOnly'),
'',
[
{ text: 'ok' }
]);
return;
}
setTimeout(() => {
this.sendPushNotification();
}, 100);
}
/**
* Fetches new events on google calendar
*/
refreshCalendar =async () => {
const accessToken = await AsyncStorage.getItem('accessToken');
const userInfoResponse = await fetch('https://www.googleapis.com/calendar/v3/calendars/primary/events?key=AIzaSyBAHObp5Ic3CbJpkX2500tNhf53e_3wBMA&timeMin=2020-01-01T01:00:00.000Z', {
headers: { Authorization: `Bearer ${accessToken}` },
});
const jsonFile = await userInfoResponse.json();
const { error } = jsonFile;
if (error) {
firebase.auth().signOut();
this.props.navigation.navigate('LoginScreen');
Alert.alert(i18n.t('logInAgain'),
'',
[
{ text: 'ok' }
]);
return;
}
const stringFile = JSON.stringify(jsonFile);
AsyncStorage.setItem('events', stringFile);
this.props.navigation.navigate('FetchScreen');
}
/**
* @param {String} description - A location to go to
* Navigates to the home component
*/
sendDirections = (description) => {
if (!description) {
Alert.alert(i18n.t('noAddress'),
'',
[
{ text: 'ok' }
]);
return '';
}
this.props.navigation.navigate('HomeScreen', { description });
return 'address sent';
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
rowHasChanged(r1, r2) {
return r1.name !== r2.name;
}
/**
* @param {integer} time - time of the event
* restructure time in a certain format
*/
timeToString(time) {
const date = new Date(time);
return date.toISOString().split('T')[0];
}
/**
* @param {object} events - All the events the user has
* Structures all the events the user has
*/
structureSynchronizedEvents(events) {
const tempArray = [];
events.forEach((event) => {
tempArray.push(
{
date: event.start.dateTime != null ? event.start.dateTime.substring(0, event.start.dateTime.indexOf('T')) : event.start.date,
title: event.summary != null ? event.summary : 'No Title For this Event',
startTime: event.start.dateTime != null ? event.start.dateTime : event.start.date,
endTime: event.end.dateTime != null ? event.end.dateTime : event.end.date,
description: event.description != null ? event.description : '',
address: event.location != null ? event.location : ''
}
);
});
if (this._isMounted) {
this.setState({
synchronizedEvents: this.tempArray
});
}
return tempArray;
}
/**
* @param {object} item - information of item
* present event in the agenda
*/
renderItem(item) {
const { address } = item;
const { description } = item;
return (
<TouchableOpacity
style={[styles.item, { height: item.height }]}
onPress={() => {
return Alert.alert(item.name,
`${item.startTime} - ${item.endTime}\n${item.description}\n${item.address}`,
[
{ text: i18n.t('cancel') },
{
text: i18n.t('getDirections'),
onPress: () => {
if (address) { this.sendDirections(address.split(',')[0]); } else { this.sendDirections(description.split('\n')[0]); }
}
},
],
{ cancelable: false });
}}
>
<Text style={{ color: 'white' }}>{item.name}</Text>
</TouchableOpacity>
);
}
/**
* add line to empty day
*/
renderEmptyDate = () => {
return (
<View
style={{
borderBottomColor: 'rgba(105,105,105,0.1)',
borderBottomWidth: 1,
}}
/>
);
}
render() {
return (
<View
style={{ height: '100%', width: '100%', position: 'absolute' }}
>
<DialogInput
isDialogVisible={this.state.isDialogVisible}
title={i18n.t('setReminderTime')}
keyboardType="numeric"
message={i18n.t('reminderMessage')}
hintInput="e.g. 10"
submitInput={(inputText) => { this.sendInput(inputText); }}
closeDialog={() => { this.showDialog(false); }}
submitText={i18n.t('submit')}
cancelText={i18n.t('cancel')}
/>
<Agenda
items={this.state.items}
loadItemsForMonth={this.loadItems}
renderItem={(item) => { return this.renderItem(item, this.props); }}
renderEmptyDate={this.renderEmptyDate}
rowHasChanged={this.rowHasChanged}
onRefresh={() => {
this.refreshCalendar();
}}
theme={{
calendarBackground: 'rgb(255,255,255)',
selectedDayBackgroundColor: 'rgba(156,211,215,1)',
agendaDayTextColor: 'black',
agendaDayNumColor: 'black',
agendaKnobColor: 'rgba(156,211,215,1)'
}}
/>
<View>
<View style={{ flexDirection: 'row', position: 'absolute' }}>
<TouchableOpacity
style={styles.touchable}
onPress={() => {
firebase.auth().signOut();
}}
>
<Text>{i18n.t('logOut')}</Text>
</TouchableOpacity>
<TouchableOpacity
style={styles.touchable}
onPress={() => {
this.showDialog(true);
}}
>
<Text>{i18n.t('adjustTime')}</Text>
</TouchableOpacity>
</View>
</View>
</View>
);
}
} | { text: 'ok' }
]);
}
} | random_line_split |
browse.ts | import { Component, OnInit, ViewChild } from '@angular/core';
import { IonicPage, NavController, NavParams,LoadingController, ToastController, InfiniteScroll, ActionSheetController, Content, App, IonicApp, PopoverController } from 'ionic-angular';
import { ApiProvider } from '../../../providers/api/api';
import { SettingsProvider } from '../../../providers/settings/settings';
import { Network } from '@ionic-native/network';
import { CoursePage } from '../course/course';
/**
* Generated class for the BrowsePage page.
*
* See https://ionicframework.com/docs/components/#navigation for more info on
* Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-browse',
templateUrl: 'browse.html',
})
export class BrowsePage {
@ViewChild(InfiniteScroll) infiniteScroll:InfiniteScroll;
@ViewChild(Content) content: Content;
public isOn: boolean = false;
public isCategory: boolean = false;
public courses;
public error:boolean = false;
private loading;
public settings;
public currency;
public exchangeRate;
public params= {
filter:'',
sort:'',
group:'',
type:''
};
public currentPage=1;
public showLoading:boolean = false;
public sortLib ={
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
public catLib = {};
public networkPresent= true;
public showRetry=false;
constructor(public popoverCtrl: PopoverController,public app:App,public navCtrl: NavController, public navParams: NavParams, private apiService:ApiProvider,public loadingController: LoadingController,public toastController: ToastController,public settingService:SettingsProvider,public actionSheetController: ActionSheetController,public network:Network) {
// this.params = {
// filter:'',
// sort:'',
// group:'',
// type:''
// };
}
private clearParams(){
this.params = {
filter:'',
sort:'',
group:'',
type:'',
};
}
async ionViewWillEnter(){
this.currency = await this.settingService.getCurrency();
console.log('Entered browse view');
}
async ngOnInit() {
this.params = {
filter:'',
sort:'',
group:'',
type:''
};
this.sortLib = {
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
console.log(this.params);
console.log(this.sortLib);
// let data = await this.settingService.getSetting('widgets');
this.networkCheck();
//get courses
this.showLoading= true;
this.loadCourses(1).subscribe(response=>{
this.courses= response['records'];
console.log(response);
this.currentPage = 1;
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
this.currency = await this.settingService.getCurrency();
this.settingService.getSettings().then(val => {
this.settings = val;
//load category library
for(let i=0; i < this.settings.categories.length; i++){
this.catLib[this.settings.categories[i].session_category_id] = this.settings.categories[i].category_name;
}
console.log('showing cat lib');
console.log(this.catLib);
// let currency = val.student_currency;
// //get exchange rate for currency
// let currencyList = val.currencies;
// let currencyObj:any;
// for(let i=0; i<currencyList.length;i++){
// let obj = currencyList[i];
// if(obj.currency_id==currency){
// currencyObj = obj;
// }
// }
// this.currency = currencyObj;
});
}
networkCheck(){
// watch network for a disconnection
let disconnectSubscription = this.network.onDisconnect().subscribe(() => {
console.log('network was disconnected :-(');
this.networkPresent= false;
});
// stop disconnect watch
disconnectSubscription.unsubscribe();
// watch network for a connection
let connectSubscription = this.network.onConnect().subscribe(() => {
console.log('network connected!');
// We just got a connection but we need to wait briefly
// before we determine the connection type. Might need to wait.
// prior to doing any api requests as well.
setTimeout(() => {
if (this.network.type === 'wifi') {
console.log('we got a wifi connection, woohoo!');
this.networkPresent= true;
}
}, 3000);
});
}
search(){
this.showLoading = true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
private loadCourses(page){
this.showRetry= false;
// this.currentPage = page;
//this.infiniteScroll.enable(true);
return this.apiService.getSessions(page,30,this.params.group,this.params.filter,this.params.sort,this.params.type);
}
async | () {
this.loading = await this.loadingController.create({
content: 'Loading. Please wait...',
duration: 2000000000
});
return await this.loading.present();
}
getButtonText(): string {
return `Switch ${ this.isOn ? 'Off' : 'On' }`;
}
setState(): void {
this.isOn = !this.isOn;
}
toggleCategory(){
this.content.scrollToTop();
this.isCategory = !this.isCategory;
}
toggleDetails() {
this.isOn = !this.isOn;
if(!this.isOn && this.params.filter != ""){
this.params.filter = "";
this.showLoading= true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading= false;
});
}
}
clearSearch(){
console.log('clear clicked');
}
loadData(event){
this.currentPage++;
console.log('scroll starting: '+this.currentPage);
this.loadCourses(this.currentPage).subscribe(response=>{
//console.log(response['records']);
this.courses= this.courses.concat(response['records']);
console.log(this.courses);
event.complete();
//determine if this was the last page
let totalPages = Math.ceil((response['total']/response['rows_per_page']));
console.log(totalPages);
if(this.currentPage >= totalPages){
// this.currentPage--;
// event.enable(false);
}
}, err => {
event.complete();
this.presentToast('Network error! Please check your Internet Connection and try again');
console.log(err.message);
})
}
reloadCourses(page){
this.content.scrollToTop();
this.showLoading=true;
this.currentPage=page;
this.loadCourses(page).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading=false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
async presentActionSheet() {
const actionSheet = await this.actionSheetController.create({
title: 'Sort By',
buttons: [{
text: 'Alphabetical (asc)',
icon: 'arrow-round-up',
handler: () => {
this.params.sort = "asc";
this.reloadCourses(1);
}
}, {
text: 'Alphabetical (desc)',
icon: 'arrow-round-down',
handler: () => {
this.params.sort = "desc";
this.reloadCourses(1);
}
}, {
text: 'Most Recent',
icon: 'time',
handler: () => {
this.params.sort = "recent";
this.reloadCourses(1);
}
}, {
text: 'Price (Lowest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceAsc";
this.reloadCourses(1);
}
},{
text: 'Price (Highest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceDesc";
this.reloadCourses(1);
}
},
{
text: 'Online Courses',
icon: 'globe',
handler: () => {
this.params.type = "c";
this.reloadCourses(1);
}
},{
text: 'Training Sessions',
icon: 'people',
handler: () => {
this.params.type = "s-b";
this.reloadCourses(1);
}
}, {
text: 'Reset',
icon: 'refresh',
handler: () => {
this.params.type = "";
this.params.sort = "";
this.reloadCourses(1);
}
}]
});
await actionSheet.present();
}
loadCategory(category){
this.params.group= category;
this.isCategory = false;
this.reloadCourses(1);
}
async presentToast(message:string) {
const toast = await this.toastController.create({
message: message,
duration: 3000
});
toast.present();
}
loadCourse(id){
// this.app.getRootNav.pu('CoursePage',{id:id});
let nav = this.app.getRootNav();
nav.push('CoursePage',{id:id});
}
presentPopover(myEvent) {
let popover = this.popoverCtrl.create('CurrencyPage');
popover.present({
ev: myEvent
});
popover.onDidDismiss(()=>{
this.settingService.getCurrency().then(resp=>{
this.currency = resp;
// this.reloadCourses(1);
});
//this.reloadCourses(1);
});
}
ionViewDidLoad() {
console.log('ionViewDidLoad BrowsePage');
}
}
| presentLoading | identifier_name |
browse.ts | import { Component, OnInit, ViewChild } from '@angular/core';
import { IonicPage, NavController, NavParams,LoadingController, ToastController, InfiniteScroll, ActionSheetController, Content, App, IonicApp, PopoverController } from 'ionic-angular';
import { ApiProvider } from '../../../providers/api/api';
import { SettingsProvider } from '../../../providers/settings/settings';
import { Network } from '@ionic-native/network';
import { CoursePage } from '../course/course';
/**
* Generated class for the BrowsePage page.
*
* See https://ionicframework.com/docs/components/#navigation for more info on
* Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-browse',
templateUrl: 'browse.html',
})
export class BrowsePage {
@ViewChild(InfiniteScroll) infiniteScroll:InfiniteScroll;
@ViewChild(Content) content: Content;
public isOn: boolean = false;
public isCategory: boolean = false;
public courses;
public error:boolean = false;
private loading;
public settings;
public currency;
public exchangeRate;
public params= {
filter:'',
sort:'',
group:'',
type:''
};
public currentPage=1;
public showLoading:boolean = false;
public sortLib ={
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
public catLib = {};
public networkPresent= true;
public showRetry=false;
constructor(public popoverCtrl: PopoverController,public app:App,public navCtrl: NavController, public navParams: NavParams, private apiService:ApiProvider,public loadingController: LoadingController,public toastController: ToastController,public settingService:SettingsProvider,public actionSheetController: ActionSheetController,public network:Network) {
// this.params = {
// filter:'',
// sort:'',
// group:'',
// type:''
// };
}
private clearParams(){
this.params = {
filter:'',
sort:'',
group:'',
type:'',
};
}
async ionViewWillEnter(){
this.currency = await this.settingService.getCurrency();
console.log('Entered browse view');
}
async ngOnInit() {
this.params = {
filter:'',
sort:'',
group:'',
type:''
};
this.sortLib = {
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
console.log(this.params);
console.log(this.sortLib);
// let data = await this.settingService.getSetting('widgets');
this.networkCheck();
//get courses
this.showLoading= true;
this.loadCourses(1).subscribe(response=>{
this.courses= response['records'];
console.log(response);
this.currentPage = 1;
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
this.currency = await this.settingService.getCurrency();
this.settingService.getSettings().then(val => {
this.settings = val;
//load category library
for(let i=0; i < this.settings.categories.length; i++){
this.catLib[this.settings.categories[i].session_category_id] = this.settings.categories[i].category_name;
}
console.log('showing cat lib');
console.log(this.catLib);
// let currency = val.student_currency;
// //get exchange rate for currency
// let currencyList = val.currencies;
// let currencyObj:any;
// for(let i=0; i<currencyList.length;i++){
// let obj = currencyList[i];
// if(obj.currency_id==currency){
// currencyObj = obj;
// }
// }
// this.currency = currencyObj;
});
}
networkCheck(){
// watch network for a disconnection
let disconnectSubscription = this.network.onDisconnect().subscribe(() => {
console.log('network was disconnected :-(');
this.networkPresent= false;
});
// stop disconnect watch
disconnectSubscription.unsubscribe();
// watch network for a connection
let connectSubscription = this.network.onConnect().subscribe(() => {
console.log('network connected!');
// We just got a connection but we need to wait briefly
// before we determine the connection type. Might need to wait.
// prior to doing any api requests as well.
setTimeout(() => {
if (this.network.type === 'wifi') {
console.log('we got a wifi connection, woohoo!');
this.networkPresent= true;
}
}, 3000);
});
}
search(){
this.showLoading = true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
private loadCourses(page){
this.showRetry= false;
// this.currentPage = page;
//this.infiniteScroll.enable(true);
return this.apiService.getSessions(page,30,this.params.group,this.params.filter,this.params.sort,this.params.type);
}
async presentLoading() {
this.loading = await this.loadingController.create({
content: 'Loading. Please wait...',
duration: 2000000000
});
return await this.loading.present();
}
getButtonText(): string {
return `Switch ${ this.isOn ? 'Off' : 'On' }`;
}
setState(): void {
this.isOn = !this.isOn;
}
toggleCategory(){
this.content.scrollToTop();
this.isCategory = !this.isCategory;
}
toggleDetails() {
this.isOn = !this.isOn;
if(!this.isOn && this.params.filter != ""){
this.params.filter = "";
this.showLoading= true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading= false;
});
}
}
clearSearch(){
console.log('clear clicked');
}
loadData(event){
this.currentPage++;
console.log('scroll starting: '+this.currentPage);
this.loadCourses(this.currentPage).subscribe(response=>{
//console.log(response['records']);
this.courses= this.courses.concat(response['records']);
console.log(this.courses);
event.complete();
//determine if this was the last page
let totalPages = Math.ceil((response['total']/response['rows_per_page']));
console.log(totalPages);
if(this.currentPage >= totalPages){
// this.currentPage--;
// event.enable(false);
}
}, err => {
event.complete();
this.presentToast('Network error! Please check your Internet Connection and try again');
console.log(err.message);
})
}
reloadCourses(page){
this.content.scrollToTop();
this.showLoading=true;
this.currentPage=page;
this.loadCourses(page).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading=false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
async presentActionSheet() |
loadCategory(category){
this.params.group= category;
this.isCategory = false;
this.reloadCourses(1);
}
async presentToast(message:string) {
const toast = await this.toastController.create({
message: message,
duration: 3000
});
toast.present();
}
loadCourse(id){
// this.app.getRootNav.pu('CoursePage',{id:id});
let nav = this.app.getRootNav();
nav.push('CoursePage',{id:id});
}
presentPopover(myEvent) {
let popover = this.popoverCtrl.create('CurrencyPage');
popover.present({
ev: myEvent
});
popover.onDidDismiss(()=>{
this.settingService.getCurrency().then(resp=>{
this.currency = resp;
// this.reloadCourses(1);
});
//this.reloadCourses(1);
});
}
ionViewDidLoad() {
console.log('ionViewDidLoad BrowsePage');
}
}
| {
const actionSheet = await this.actionSheetController.create({
title: 'Sort By',
buttons: [{
text: 'Alphabetical (asc)',
icon: 'arrow-round-up',
handler: () => {
this.params.sort = "asc";
this.reloadCourses(1);
}
}, {
text: 'Alphabetical (desc)',
icon: 'arrow-round-down',
handler: () => {
this.params.sort = "desc";
this.reloadCourses(1);
}
}, {
text: 'Most Recent',
icon: 'time',
handler: () => {
this.params.sort = "recent";
this.reloadCourses(1);
}
}, {
text: 'Price (Lowest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceAsc";
this.reloadCourses(1);
}
},{
text: 'Price (Highest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceDesc";
this.reloadCourses(1);
}
},
{
text: 'Online Courses',
icon: 'globe',
handler: () => {
this.params.type = "c";
this.reloadCourses(1);
}
},{
text: 'Training Sessions',
icon: 'people',
handler: () => {
this.params.type = "s-b";
this.reloadCourses(1);
}
}, {
text: 'Reset',
icon: 'refresh',
handler: () => {
this.params.type = "";
this.params.sort = "";
this.reloadCourses(1);
}
}]
});
await actionSheet.present();
} | identifier_body |
browse.ts | import { Component, OnInit, ViewChild } from '@angular/core';
import { IonicPage, NavController, NavParams,LoadingController, ToastController, InfiniteScroll, ActionSheetController, Content, App, IonicApp, PopoverController } from 'ionic-angular';
import { ApiProvider } from '../../../providers/api/api';
import { SettingsProvider } from '../../../providers/settings/settings';
import { Network } from '@ionic-native/network';
import { CoursePage } from '../course/course';
/**
* Generated class for the BrowsePage page.
*
* See https://ionicframework.com/docs/components/#navigation for more info on
* Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-browse',
templateUrl: 'browse.html',
})
export class BrowsePage {
@ViewChild(InfiniteScroll) infiniteScroll:InfiniteScroll;
@ViewChild(Content) content: Content;
public isOn: boolean = false;
public isCategory: boolean = false;
public courses;
public error:boolean = false;
private loading;
public settings;
public currency;
public exchangeRate;
public params= {
filter:'',
sort:'',
group:'',
type:''
};
public currentPage=1;
public showLoading:boolean = false;
public sortLib ={
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
public catLib = {};
public networkPresent= true;
public showRetry=false;
constructor(public popoverCtrl: PopoverController,public app:App,public navCtrl: NavController, public navParams: NavParams, private apiService:ApiProvider,public loadingController: LoadingController,public toastController: ToastController,public settingService:SettingsProvider,public actionSheetController: ActionSheetController,public network:Network) {
// this.params = {
// filter:'',
// sort:'',
// group:'',
// type:''
// };
}
private clearParams(){
this.params = {
filter:'',
sort:'',
group:'',
type:'',
};
}
async ionViewWillEnter(){
this.currency = await this.settingService.getCurrency();
console.log('Entered browse view');
}
async ngOnInit() {
this.params = {
filter:'',
sort:'',
group:'',
type:''
};
this.sortLib = {
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
console.log(this.params);
console.log(this.sortLib);
// let data = await this.settingService.getSetting('widgets');
this.networkCheck();
//get courses
this.showLoading= true;
this.loadCourses(1).subscribe(response=>{
this.courses= response['records'];
console.log(response);
this.currentPage = 1;
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
this.currency = await this.settingService.getCurrency();
this.settingService.getSettings().then(val => {
this.settings = val;
//load category library
for(let i=0; i < this.settings.categories.length; i++){
this.catLib[this.settings.categories[i].session_category_id] = this.settings.categories[i].category_name;
}
console.log('showing cat lib');
console.log(this.catLib);
// let currency = val.student_currency;
// //get exchange rate for currency
// let currencyList = val.currencies;
// let currencyObj:any;
// for(let i=0; i<currencyList.length;i++){
// let obj = currencyList[i];
// if(obj.currency_id==currency){
// currencyObj = obj;
// }
// }
// this.currency = currencyObj;
});
}
networkCheck(){
// watch network for a disconnection
let disconnectSubscription = this.network.onDisconnect().subscribe(() => {
console.log('network was disconnected :-(');
this.networkPresent= false;
});
// stop disconnect watch
disconnectSubscription.unsubscribe();
// watch network for a connection
let connectSubscription = this.network.onConnect().subscribe(() => {
console.log('network connected!');
// We just got a connection but we need to wait briefly
// before we determine the connection type. Might need to wait.
// prior to doing any api requests as well.
setTimeout(() => {
if (this.network.type === 'wifi') |
}, 3000);
});
}
search(){
this.showLoading = true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
private loadCourses(page){
this.showRetry= false;
// this.currentPage = page;
//this.infiniteScroll.enable(true);
return this.apiService.getSessions(page,30,this.params.group,this.params.filter,this.params.sort,this.params.type);
}
async presentLoading() {
this.loading = await this.loadingController.create({
content: 'Loading. Please wait...',
duration: 2000000000
});
return await this.loading.present();
}
getButtonText(): string {
return `Switch ${ this.isOn ? 'Off' : 'On' }`;
}
setState(): void {
this.isOn = !this.isOn;
}
toggleCategory(){
this.content.scrollToTop();
this.isCategory = !this.isCategory;
}
toggleDetails() {
this.isOn = !this.isOn;
if(!this.isOn && this.params.filter != ""){
this.params.filter = "";
this.showLoading= true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading= false;
});
}
}
clearSearch(){
console.log('clear clicked');
}
loadData(event){
this.currentPage++;
console.log('scroll starting: '+this.currentPage);
this.loadCourses(this.currentPage).subscribe(response=>{
//console.log(response['records']);
this.courses= this.courses.concat(response['records']);
console.log(this.courses);
event.complete();
//determine if this was the last page
let totalPages = Math.ceil((response['total']/response['rows_per_page']));
console.log(totalPages);
if(this.currentPage >= totalPages){
// this.currentPage--;
// event.enable(false);
}
}, err => {
event.complete();
this.presentToast('Network error! Please check your Internet Connection and try again');
console.log(err.message);
})
}
reloadCourses(page){
this.content.scrollToTop();
this.showLoading=true;
this.currentPage=page;
this.loadCourses(page).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading=false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
async presentActionSheet() {
const actionSheet = await this.actionSheetController.create({
title: 'Sort By',
buttons: [{
text: 'Alphabetical (asc)',
icon: 'arrow-round-up',
handler: () => {
this.params.sort = "asc";
this.reloadCourses(1);
}
}, {
text: 'Alphabetical (desc)',
icon: 'arrow-round-down',
handler: () => {
this.params.sort = "desc";
this.reloadCourses(1);
}
}, {
text: 'Most Recent',
icon: 'time',
handler: () => {
this.params.sort = "recent";
this.reloadCourses(1);
}
}, {
text: 'Price (Lowest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceAsc";
this.reloadCourses(1);
}
},{
text: 'Price (Highest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceDesc";
this.reloadCourses(1);
}
},
{
text: 'Online Courses',
icon: 'globe',
handler: () => {
this.params.type = "c";
this.reloadCourses(1);
}
},{
text: 'Training Sessions',
icon: 'people',
handler: () => {
this.params.type = "s-b";
this.reloadCourses(1);
}
}, {
text: 'Reset',
icon: 'refresh',
handler: () => {
this.params.type = "";
this.params.sort = "";
this.reloadCourses(1);
}
}]
});
await actionSheet.present();
}
loadCategory(category){
this.params.group= category;
this.isCategory = false;
this.reloadCourses(1);
}
async presentToast(message:string) {
const toast = await this.toastController.create({
message: message,
duration: 3000
});
toast.present();
}
loadCourse(id){
// this.app.getRootNav.pu('CoursePage',{id:id});
let nav = this.app.getRootNav();
nav.push('CoursePage',{id:id});
}
presentPopover(myEvent) {
let popover = this.popoverCtrl.create('CurrencyPage');
popover.present({
ev: myEvent
});
popover.onDidDismiss(()=>{
this.settingService.getCurrency().then(resp=>{
this.currency = resp;
// this.reloadCourses(1);
});
//this.reloadCourses(1);
});
}
ionViewDidLoad() {
console.log('ionViewDidLoad BrowsePage');
}
}
| {
console.log('we got a wifi connection, woohoo!');
this.networkPresent= true;
} | conditional_block |
browse.ts | import { Component, OnInit, ViewChild } from '@angular/core';
import { IonicPage, NavController, NavParams,LoadingController, ToastController, InfiniteScroll, ActionSheetController, Content, App, IonicApp, PopoverController } from 'ionic-angular';
import { ApiProvider } from '../../../providers/api/api';
import { SettingsProvider } from '../../../providers/settings/settings';
import { Network } from '@ionic-native/network';
import { CoursePage } from '../course/course';
/**
* Generated class for the BrowsePage page.
*
* See https://ionicframework.com/docs/components/#navigation for more info on
* Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-browse',
templateUrl: 'browse.html',
})
export class BrowsePage {
@ViewChild(InfiniteScroll) infiniteScroll:InfiniteScroll;
@ViewChild(Content) content: Content;
public isOn: boolean = false;
public isCategory: boolean = false;
public courses;
public error:boolean = false;
private loading;
public settings;
public currency;
public exchangeRate;
public params= {
filter:'',
sort:'',
group:'',
type:''
};
public currentPage=1;
public showLoading:boolean = false;
public sortLib ={
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
public catLib = {};
public networkPresent= true;
public showRetry=false;
constructor(public popoverCtrl: PopoverController,public app:App,public navCtrl: NavController, public navParams: NavParams, private apiService:ApiProvider,public loadingController: LoadingController,public toastController: ToastController,public settingService:SettingsProvider,public actionSheetController: ActionSheetController,public network:Network) {
// this.params = {
// filter:'',
// sort:'',
// group:'',
// type:''
// };
}
private clearParams(){
this.params = {
filter:'',
sort:'',
group:'',
type:'',
};
}
async ionViewWillEnter(){
this.currency = await this.settingService.getCurrency();
console.log('Entered browse view');
}
async ngOnInit() {
this.params = {
filter:'',
sort:'', | type:''
};
this.sortLib = {
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
console.log(this.params);
console.log(this.sortLib);
// let data = await this.settingService.getSetting('widgets');
this.networkCheck();
//get courses
this.showLoading= true;
this.loadCourses(1).subscribe(response=>{
this.courses= response['records'];
console.log(response);
this.currentPage = 1;
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
this.currency = await this.settingService.getCurrency();
this.settingService.getSettings().then(val => {
this.settings = val;
//load category library
for(let i=0; i < this.settings.categories.length; i++){
this.catLib[this.settings.categories[i].session_category_id] = this.settings.categories[i].category_name;
}
console.log('showing cat lib');
console.log(this.catLib);
// let currency = val.student_currency;
// //get exchange rate for currency
// let currencyList = val.currencies;
// let currencyObj:any;
// for(let i=0; i<currencyList.length;i++){
// let obj = currencyList[i];
// if(obj.currency_id==currency){
// currencyObj = obj;
// }
// }
// this.currency = currencyObj;
});
}
networkCheck(){
// watch network for a disconnection
let disconnectSubscription = this.network.onDisconnect().subscribe(() => {
console.log('network was disconnected :-(');
this.networkPresent= false;
});
// stop disconnect watch
disconnectSubscription.unsubscribe();
// watch network for a connection
let connectSubscription = this.network.onConnect().subscribe(() => {
console.log('network connected!');
// We just got a connection but we need to wait briefly
// before we determine the connection type. Might need to wait.
// prior to doing any api requests as well.
setTimeout(() => {
if (this.network.type === 'wifi') {
console.log('we got a wifi connection, woohoo!');
this.networkPresent= true;
}
}, 3000);
});
}
search(){
this.showLoading = true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
private loadCourses(page){
this.showRetry= false;
// this.currentPage = page;
//this.infiniteScroll.enable(true);
return this.apiService.getSessions(page,30,this.params.group,this.params.filter,this.params.sort,this.params.type);
}
async presentLoading() {
this.loading = await this.loadingController.create({
content: 'Loading. Please wait...',
duration: 2000000000
});
return await this.loading.present();
}
getButtonText(): string {
return `Switch ${ this.isOn ? 'Off' : 'On' }`;
}
setState(): void {
this.isOn = !this.isOn;
}
toggleCategory(){
this.content.scrollToTop();
this.isCategory = !this.isCategory;
}
toggleDetails() {
this.isOn = !this.isOn;
if(!this.isOn && this.params.filter != ""){
this.params.filter = "";
this.showLoading= true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading= false;
});
}
}
clearSearch(){
console.log('clear clicked');
}
loadData(event){
this.currentPage++;
console.log('scroll starting: '+this.currentPage);
this.loadCourses(this.currentPage).subscribe(response=>{
//console.log(response['records']);
this.courses= this.courses.concat(response['records']);
console.log(this.courses);
event.complete();
//determine if this was the last page
let totalPages = Math.ceil((response['total']/response['rows_per_page']));
console.log(totalPages);
if(this.currentPage >= totalPages){
// this.currentPage--;
// event.enable(false);
}
}, err => {
event.complete();
this.presentToast('Network error! Please check your Internet Connection and try again');
console.log(err.message);
})
}
reloadCourses(page){
this.content.scrollToTop();
this.showLoading=true;
this.currentPage=page;
this.loadCourses(page).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading=false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
async presentActionSheet() {
const actionSheet = await this.actionSheetController.create({
title: 'Sort By',
buttons: [{
text: 'Alphabetical (asc)',
icon: 'arrow-round-up',
handler: () => {
this.params.sort = "asc";
this.reloadCourses(1);
}
}, {
text: 'Alphabetical (desc)',
icon: 'arrow-round-down',
handler: () => {
this.params.sort = "desc";
this.reloadCourses(1);
}
}, {
text: 'Most Recent',
icon: 'time',
handler: () => {
this.params.sort = "recent";
this.reloadCourses(1);
}
}, {
text: 'Price (Lowest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceAsc";
this.reloadCourses(1);
}
},{
text: 'Price (Highest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceDesc";
this.reloadCourses(1);
}
},
{
text: 'Online Courses',
icon: 'globe',
handler: () => {
this.params.type = "c";
this.reloadCourses(1);
}
},{
text: 'Training Sessions',
icon: 'people',
handler: () => {
this.params.type = "s-b";
this.reloadCourses(1);
}
}, {
text: 'Reset',
icon: 'refresh',
handler: () => {
this.params.type = "";
this.params.sort = "";
this.reloadCourses(1);
}
}]
});
await actionSheet.present();
}
loadCategory(category){
this.params.group= category;
this.isCategory = false;
this.reloadCourses(1);
}
async presentToast(message:string) {
const toast = await this.toastController.create({
message: message,
duration: 3000
});
toast.present();
}
loadCourse(id){
// this.app.getRootNav.pu('CoursePage',{id:id});
let nav = this.app.getRootNav();
nav.push('CoursePage',{id:id});
}
presentPopover(myEvent) {
let popover = this.popoverCtrl.create('CurrencyPage');
popover.present({
ev: myEvent
});
popover.onDidDismiss(()=>{
this.settingService.getCurrency().then(resp=>{
this.currency = resp;
// this.reloadCourses(1);
});
//this.reloadCourses(1);
});
}
ionViewDidLoad() {
console.log('ionViewDidLoad BrowsePage');
}
} | group:'', | random_line_split |
lib.rs | use itertools::Itertools;
use std::convert::TryFrom;
use std::fmt;
use std::fs;
pub const N_ROUNDS: usize = 6;
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
#[derive(Debug, Clone, PartialEq)]
pub struct Position {
coords: Vec<i32>,
}
impl Position {
/// Return linear index of position, given `edge` length of grid.
#[must_use]
pub fn index_for(&self, edge: i32) -> usize {
let mut i = 0_i32;
for c in &self.coords {
i = i * edge + c;
}
usize::try_from(i).unwrap()
}
#[must_use]
/// Construct from n-dimensional coordinates.
pub fn from(coords: &[i32]) -> Self {
Self { coords: coords.iter().copied().collect() }
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ConwayCube {
Void,
Inactive,
Active,
}
impl ConwayCube {
/// Construct cube from input character.
#[must_use]
pub fn from_char(cube: char) -> ConwayCube {
match cube {
'.' => ConwayCube::Inactive,
'#' => ConwayCube::Active,
_ => {
let e = format!("invalid cube character {}", cube);
panic!(e);
}
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct InfiniteGrid {
cubes: Vec<ConwayCube>,
dim: usize,
edge: i32,
}
impl fmt::Display for InfiniteGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let half_edge = self.edge / 2;
let mut s = String::new();
for coords in (0..self.dim).map(|_i| 0..self.edge).multi_cartesian_product() {
if coords[self.dim - 2] == 0 && coords[self.dim - 1] == 0 {
let axes = vec!['z', 'w', 'v', 'u'];
for d in 2..self.dim {
if d > 2 {
s += ", ";
}
let label = format!("{}={}", axes[d - 2], coords[self.dim - d - 1] - half_edge);
s += &label;
}
s += "\n";
}
let pos = Position::from(&coords);
s += match self.cube_at(&pos) {
ConwayCube::Void => "_",
ConwayCube::Inactive => ".",
ConwayCube::Active => "#",
};
if coords[self.dim - 1] >= self.edge - 1 {
s += "\n";
}
}
write!(f, "{}", s)
}
}
pub struct InfiniteGridIter {
layout: InfiniteGrid,
}
impl Iterator for InfiniteGridIter {
type Item = InfiniteGrid;
fn next(&mut self) -> Option<Self::Item> {
let prev_layout = self.layout.clone();
self.layout = prev_layout.cube_round();
Some(self.layout.clone())
}
}
impl InfiniteGrid {
/// Return edge length.
#[must_use]
pub fn edge(&self) -> i32 {
self.edge
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from file at `path`.
///
/// # Errors
///
/// Returns `Err` if the input file cannot be opened, or if line is
/// found with invalid format.
pub fn read_from_file(path: &str, dim: usize) -> Result<InfiniteGrid> {
let s: String = fs::read_to_string(path)?;
InfiniteGrid::from_input(&s, dim)
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from `input` string.
///
/// # Errors
///
/// Returns `Err` if a line is found with invalid format.
pub fn from_input(input: &str, dim: usize) -> Result<InfiniteGrid> {
let edge = i32::try_from(input.lines().next().unwrap().len()).unwrap();
let half_edge = edge / 2;
// parse and store initial 2D grid plane:
let cubes_2d: Vec<ConwayCube> = input
.lines()
.flat_map(|line| line.trim().chars().map(ConwayCube::from_char))
.collect();
// create empty nD grid, and copy initial 2D grid plane to it
// (in the middle of each dimension beyond the first two):
let max_i = InfiniteGrid::index_size(edge, dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for y in 0..edge {
for x in 0..edge {
let from_i = usize::try_from(y * edge + x).unwrap();
let mut dims = vec![half_edge; dim];
dims[dim-1] = x;
dims[dim-2] = y;
let to_i = Position::from(&dims).index_for(edge);
cubes[to_i] = cubes_2d[from_i];
}
}
Ok(Self { cubes, dim, edge })
}
#[must_use]
fn index_size(edge: i32, dim: usize) -> usize {
let mut s = 1_i32;
for _i in 0..dim {
s *= edge;
}
usize::try_from(s).unwrap()
}
#[must_use]
pub fn iter(&self) -> InfiniteGridIter {
InfiniteGridIter {
layout: self.clone(),
}
}
/// Return `ConwayCube` at the given position.
#[must_use]
pub fn cube_at(&self, pos: &Position) -> ConwayCube {
for c in &pos.coords {
if *c < 0 || *c >= self.edge {
return ConwayCube::Void;
}
}
self.cubes[pos.index_for(self.edge)]
}
/// Return count of active cubes.
#[must_use]
pub fn active_cubes(&self) -> usize {
self.cubes.iter().filter(|&c| *c == ConwayCube::Active).count()
}
/// Return count of active neighbor cubes of the cube at the given
/// position.
#[must_use]
pub fn active_neighbors_at(&self, pos: &Position) -> usize {
let mut n_active: usize = 0;
for deltas in (0..self.dim).map(|_i| -1..=1).multi_cartesian_product() {
if deltas.iter().all(|dc| *dc == 0) {
continue;
}
let d_coords: Vec<i32> = pos.coords
.iter()
.enumerate()
.map(|(i, dc)| *dc + deltas[i])
.collect();
let d_pos = Position::from(&d_coords);
if self.cube_at(&d_pos) == ConwayCube::Active {
n_active += 1;
}
}
n_active
}
/// Do one round of cube life. Returns a new grid which is one unit
/// bigger in all directions (its edge increases by 2 in all
/// dimensions).
#[must_use]
pub fn cube_round(&self) -> InfiniteGrid {
let edge = self.edge + 2;
let max_i = InfiniteGrid::index_size(edge, self.dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for coords in (0..self.dim).map(|_i| 0..edge).multi_cartesian_product() {
let from_coords: Vec<i32> = coords.iter().map(|c| c - 1).collect();
let from_pos = Position::from(&from_coords);
let pos = Position::from(&coords);
cubes[pos.index_for(edge)] = self.new_cube_at(&from_pos);
}
InfiniteGrid { cubes, dim: self.dim, edge }
}
fn new_cube_at(&self, pos: &Position) -> ConwayCube {
match self.cube_at(&pos) {
ConwayCube::Active => {
let n = self.active_neighbors_at(&pos);
if n == 2 || n == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
_ => {
if self.active_neighbors_at(&pos) == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const TINY_LAYOUT: &'static str = ".#.\n..#\n###\n";
#[test]
fn test_from_input() |
#[test]
fn test_grid_indexing() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, 1]);
assert_eq!(ConwayCube::Active, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 0]);
assert_eq!(ConwayCube::Inactive, grid.cube_at(&pos));
let pos = Position::from(&[0, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_z() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[-1, 0, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[3, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_y() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, -1, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 3, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_x() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, -1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 3]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_6_rounds() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= N_ROUNDS - 1 {
n_active = g.active_cubes();
break;
}
}
assert_eq!(112, n_active);
}
#[test]
fn test_6_rounds_4d() {
// the full N_ROUNDS takes a while so:
let n_rounds = 1; // = N_ROUNDS;
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 4).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= n_rounds - 1 {
n_active = g.active_cubes();
break;
}
}
let expect = if n_rounds == 1 { 3*4 + 5 + 3*4 } else { 848 };
assert_eq!(expect, n_active);
}
}
| {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
assert_eq!(3, grid.edge);
assert_eq!(5, grid.active_cubes());
} | identifier_body |
lib.rs | use itertools::Itertools;
use std::convert::TryFrom;
use std::fmt;
use std::fs;
pub const N_ROUNDS: usize = 6;
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
#[derive(Debug, Clone, PartialEq)]
pub struct Position {
coords: Vec<i32>,
}
impl Position {
/// Return linear index of position, given `edge` length of grid.
#[must_use]
pub fn index_for(&self, edge: i32) -> usize {
let mut i = 0_i32;
for c in &self.coords {
i = i * edge + c;
}
usize::try_from(i).unwrap()
}
#[must_use]
/// Construct from n-dimensional coordinates.
pub fn from(coords: &[i32]) -> Self {
Self { coords: coords.iter().copied().collect() }
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ConwayCube {
Void,
Inactive,
Active,
}
impl ConwayCube {
/// Construct cube from input character.
#[must_use]
pub fn from_char(cube: char) -> ConwayCube {
match cube {
'.' => ConwayCube::Inactive,
'#' => ConwayCube::Active,
_ => {
let e = format!("invalid cube character {}", cube);
panic!(e);
}
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct InfiniteGrid {
cubes: Vec<ConwayCube>,
dim: usize,
edge: i32,
}
impl fmt::Display for InfiniteGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let half_edge = self.edge / 2;
let mut s = String::new();
for coords in (0..self.dim).map(|_i| 0..self.edge).multi_cartesian_product() {
if coords[self.dim - 2] == 0 && coords[self.dim - 1] == 0 {
let axes = vec!['z', 'w', 'v', 'u'];
for d in 2..self.dim {
if d > 2 {
s += ", ";
}
let label = format!("{}={}", axes[d - 2], coords[self.dim - d - 1] - half_edge);
s += &label;
}
s += "\n";
}
let pos = Position::from(&coords);
s += match self.cube_at(&pos) {
ConwayCube::Void => "_",
ConwayCube::Inactive => ".",
ConwayCube::Active => "#",
};
if coords[self.dim - 1] >= self.edge - 1 {
s += "\n";
}
}
write!(f, "{}", s)
}
}
pub struct InfiniteGridIter {
layout: InfiniteGrid,
}
impl Iterator for InfiniteGridIter {
type Item = InfiniteGrid;
fn next(&mut self) -> Option<Self::Item> {
let prev_layout = self.layout.clone();
self.layout = prev_layout.cube_round();
Some(self.layout.clone())
}
}
impl InfiniteGrid {
/// Return edge length.
#[must_use]
pub fn edge(&self) -> i32 {
self.edge
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from file at `path`.
///
/// # Errors
///
/// Returns `Err` if the input file cannot be opened, or if line is
/// found with invalid format.
pub fn read_from_file(path: &str, dim: usize) -> Result<InfiniteGrid> {
let s: String = fs::read_to_string(path)?;
InfiniteGrid::from_input(&s, dim)
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from `input` string.
///
/// # Errors
///
/// Returns `Err` if a line is found with invalid format.
pub fn from_input(input: &str, dim: usize) -> Result<InfiniteGrid> {
let edge = i32::try_from(input.lines().next().unwrap().len()).unwrap();
let half_edge = edge / 2;
// parse and store initial 2D grid plane:
let cubes_2d: Vec<ConwayCube> = input | // create empty nD grid, and copy initial 2D grid plane to it
// (in the middle of each dimension beyond the first two):
let max_i = InfiniteGrid::index_size(edge, dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for y in 0..edge {
for x in 0..edge {
let from_i = usize::try_from(y * edge + x).unwrap();
let mut dims = vec![half_edge; dim];
dims[dim-1] = x;
dims[dim-2] = y;
let to_i = Position::from(&dims).index_for(edge);
cubes[to_i] = cubes_2d[from_i];
}
}
Ok(Self { cubes, dim, edge })
}
#[must_use]
fn index_size(edge: i32, dim: usize) -> usize {
let mut s = 1_i32;
for _i in 0..dim {
s *= edge;
}
usize::try_from(s).unwrap()
}
#[must_use]
pub fn iter(&self) -> InfiniteGridIter {
InfiniteGridIter {
layout: self.clone(),
}
}
/// Return `ConwayCube` at the given position.
#[must_use]
pub fn cube_at(&self, pos: &Position) -> ConwayCube {
for c in &pos.coords {
if *c < 0 || *c >= self.edge {
return ConwayCube::Void;
}
}
self.cubes[pos.index_for(self.edge)]
}
/// Return count of active cubes.
#[must_use]
pub fn active_cubes(&self) -> usize {
self.cubes.iter().filter(|&c| *c == ConwayCube::Active).count()
}
/// Return count of active neighbor cubes of the cube at the given
/// position.
#[must_use]
pub fn active_neighbors_at(&self, pos: &Position) -> usize {
let mut n_active: usize = 0;
for deltas in (0..self.dim).map(|_i| -1..=1).multi_cartesian_product() {
if deltas.iter().all(|dc| *dc == 0) {
continue;
}
let d_coords: Vec<i32> = pos.coords
.iter()
.enumerate()
.map(|(i, dc)| *dc + deltas[i])
.collect();
let d_pos = Position::from(&d_coords);
if self.cube_at(&d_pos) == ConwayCube::Active {
n_active += 1;
}
}
n_active
}
/// Do one round of cube life. Returns a new grid which is one unit
/// bigger in all directions (its edge increases by 2 in all
/// dimensions).
#[must_use]
pub fn cube_round(&self) -> InfiniteGrid {
let edge = self.edge + 2;
let max_i = InfiniteGrid::index_size(edge, self.dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for coords in (0..self.dim).map(|_i| 0..edge).multi_cartesian_product() {
let from_coords: Vec<i32> = coords.iter().map(|c| c - 1).collect();
let from_pos = Position::from(&from_coords);
let pos = Position::from(&coords);
cubes[pos.index_for(edge)] = self.new_cube_at(&from_pos);
}
InfiniteGrid { cubes, dim: self.dim, edge }
}
fn new_cube_at(&self, pos: &Position) -> ConwayCube {
match self.cube_at(&pos) {
ConwayCube::Active => {
let n = self.active_neighbors_at(&pos);
if n == 2 || n == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
_ => {
if self.active_neighbors_at(&pos) == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const TINY_LAYOUT: &'static str = ".#.\n..#\n###\n";
#[test]
fn test_from_input() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
assert_eq!(3, grid.edge);
assert_eq!(5, grid.active_cubes());
}
#[test]
fn test_grid_indexing() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, 1]);
assert_eq!(ConwayCube::Active, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 0]);
assert_eq!(ConwayCube::Inactive, grid.cube_at(&pos));
let pos = Position::from(&[0, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_z() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[-1, 0, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[3, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_y() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, -1, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 3, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_x() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, -1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 3]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_6_rounds() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= N_ROUNDS - 1 {
n_active = g.active_cubes();
break;
}
}
assert_eq!(112, n_active);
}
#[test]
fn test_6_rounds_4d() {
// the full N_ROUNDS takes a while so:
let n_rounds = 1; // = N_ROUNDS;
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 4).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= n_rounds - 1 {
n_active = g.active_cubes();
break;
}
}
let expect = if n_rounds == 1 { 3*4 + 5 + 3*4 } else { 848 };
assert_eq!(expect, n_active);
}
} | .lines()
.flat_map(|line| line.trim().chars().map(ConwayCube::from_char))
.collect(); | random_line_split |
lib.rs | use itertools::Itertools;
use std::convert::TryFrom;
use std::fmt;
use std::fs;
pub const N_ROUNDS: usize = 6;
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
#[derive(Debug, Clone, PartialEq)]
pub struct Position {
coords: Vec<i32>,
}
impl Position {
/// Return linear index of position, given `edge` length of grid.
#[must_use]
pub fn index_for(&self, edge: i32) -> usize {
let mut i = 0_i32;
for c in &self.coords {
i = i * edge + c;
}
usize::try_from(i).unwrap()
}
#[must_use]
/// Construct from n-dimensional coordinates.
pub fn from(coords: &[i32]) -> Self {
Self { coords: coords.iter().copied().collect() }
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ConwayCube {
Void,
Inactive,
Active,
}
impl ConwayCube {
/// Construct cube from input character.
#[must_use]
pub fn from_char(cube: char) -> ConwayCube {
match cube {
'.' => ConwayCube::Inactive,
'#' => ConwayCube::Active,
_ => {
let e = format!("invalid cube character {}", cube);
panic!(e);
}
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct InfiniteGrid {
cubes: Vec<ConwayCube>,
dim: usize,
edge: i32,
}
impl fmt::Display for InfiniteGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let half_edge = self.edge / 2;
let mut s = String::new();
for coords in (0..self.dim).map(|_i| 0..self.edge).multi_cartesian_product() {
if coords[self.dim - 2] == 0 && coords[self.dim - 1] == 0 {
let axes = vec!['z', 'w', 'v', 'u'];
for d in 2..self.dim {
if d > 2 {
s += ", ";
}
let label = format!("{}={}", axes[d - 2], coords[self.dim - d - 1] - half_edge);
s += &label;
}
s += "\n";
}
let pos = Position::from(&coords);
s += match self.cube_at(&pos) {
ConwayCube::Void => "_",
ConwayCube::Inactive => ".",
ConwayCube::Active => "#",
};
if coords[self.dim - 1] >= self.edge - 1 {
s += "\n";
}
}
write!(f, "{}", s)
}
}
pub struct InfiniteGridIter {
layout: InfiniteGrid,
}
impl Iterator for InfiniteGridIter {
type Item = InfiniteGrid;
fn next(&mut self) -> Option<Self::Item> {
let prev_layout = self.layout.clone();
self.layout = prev_layout.cube_round();
Some(self.layout.clone())
}
}
impl InfiniteGrid {
/// Return edge length.
#[must_use]
pub fn edge(&self) -> i32 {
self.edge
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from file at `path`.
///
/// # Errors
///
/// Returns `Err` if the input file cannot be opened, or if line is
/// found with invalid format.
pub fn read_from_file(path: &str, dim: usize) -> Result<InfiniteGrid> {
let s: String = fs::read_to_string(path)?;
InfiniteGrid::from_input(&s, dim)
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from `input` string.
///
/// # Errors
///
/// Returns `Err` if a line is found with invalid format.
pub fn from_input(input: &str, dim: usize) -> Result<InfiniteGrid> {
let edge = i32::try_from(input.lines().next().unwrap().len()).unwrap();
let half_edge = edge / 2;
// parse and store initial 2D grid plane:
let cubes_2d: Vec<ConwayCube> = input
.lines()
.flat_map(|line| line.trim().chars().map(ConwayCube::from_char))
.collect();
// create empty nD grid, and copy initial 2D grid plane to it
// (in the middle of each dimension beyond the first two):
let max_i = InfiniteGrid::index_size(edge, dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for y in 0..edge {
for x in 0..edge {
let from_i = usize::try_from(y * edge + x).unwrap();
let mut dims = vec![half_edge; dim];
dims[dim-1] = x;
dims[dim-2] = y;
let to_i = Position::from(&dims).index_for(edge);
cubes[to_i] = cubes_2d[from_i];
}
}
Ok(Self { cubes, dim, edge })
}
#[must_use]
fn index_size(edge: i32, dim: usize) -> usize {
let mut s = 1_i32;
for _i in 0..dim {
s *= edge;
}
usize::try_from(s).unwrap()
}
#[must_use]
pub fn iter(&self) -> InfiniteGridIter {
InfiniteGridIter {
layout: self.clone(),
}
}
/// Return `ConwayCube` at the given position.
#[must_use]
pub fn cube_at(&self, pos: &Position) -> ConwayCube {
for c in &pos.coords {
if *c < 0 || *c >= self.edge {
return ConwayCube::Void;
}
}
self.cubes[pos.index_for(self.edge)]
}
/// Return count of active cubes.
#[must_use]
pub fn active_cubes(&self) -> usize {
self.cubes.iter().filter(|&c| *c == ConwayCube::Active).count()
}
/// Return count of active neighbor cubes of the cube at the given
/// position.
#[must_use]
pub fn active_neighbors_at(&self, pos: &Position) -> usize {
let mut n_active: usize = 0;
for deltas in (0..self.dim).map(|_i| -1..=1).multi_cartesian_product() {
if deltas.iter().all(|dc| *dc == 0) {
continue;
}
let d_coords: Vec<i32> = pos.coords
.iter()
.enumerate()
.map(|(i, dc)| *dc + deltas[i])
.collect();
let d_pos = Position::from(&d_coords);
if self.cube_at(&d_pos) == ConwayCube::Active {
n_active += 1;
}
}
n_active
}
/// Do one round of cube life. Returns a new grid which is one unit
/// bigger in all directions (its edge increases by 2 in all
/// dimensions).
#[must_use]
pub fn cube_round(&self) -> InfiniteGrid {
let edge = self.edge + 2;
let max_i = InfiniteGrid::index_size(edge, self.dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for coords in (0..self.dim).map(|_i| 0..edge).multi_cartesian_product() {
let from_coords: Vec<i32> = coords.iter().map(|c| c - 1).collect();
let from_pos = Position::from(&from_coords);
let pos = Position::from(&coords);
cubes[pos.index_for(edge)] = self.new_cube_at(&from_pos);
}
InfiniteGrid { cubes, dim: self.dim, edge }
}
fn new_cube_at(&self, pos: &Position) -> ConwayCube {
match self.cube_at(&pos) {
ConwayCube::Active => {
let n = self.active_neighbors_at(&pos);
if n == 2 || n == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
_ => {
if self.active_neighbors_at(&pos) == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const TINY_LAYOUT: &'static str = ".#.\n..#\n###\n";
#[test]
fn test_from_input() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
assert_eq!(3, grid.edge);
assert_eq!(5, grid.active_cubes());
}
#[test]
fn test_grid_indexing() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, 1]);
assert_eq!(ConwayCube::Active, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 0]);
assert_eq!(ConwayCube::Inactive, grid.cube_at(&pos));
let pos = Position::from(&[0, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_z() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[-1, 0, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[3, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_y() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, -1, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 3, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_x() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, -1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 3]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_6_rounds() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= N_ROUNDS - 1 {
n_active = g.active_cubes();
break;
}
}
assert_eq!(112, n_active);
}
#[test]
fn | () {
// the full N_ROUNDS takes a while so:
let n_rounds = 1; // = N_ROUNDS;
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 4).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= n_rounds - 1 {
n_active = g.active_cubes();
break;
}
}
let expect = if n_rounds == 1 { 3*4 + 5 + 3*4 } else { 848 };
assert_eq!(expect, n_active);
}
}
| test_6_rounds_4d | identifier_name |
lib.rs | use itertools::Itertools;
use std::convert::TryFrom;
use std::fmt;
use std::fs;
pub const N_ROUNDS: usize = 6;
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
#[derive(Debug, Clone, PartialEq)]
pub struct Position {
coords: Vec<i32>,
}
impl Position {
/// Return linear index of position, given `edge` length of grid.
#[must_use]
pub fn index_for(&self, edge: i32) -> usize {
let mut i = 0_i32;
for c in &self.coords {
i = i * edge + c;
}
usize::try_from(i).unwrap()
}
#[must_use]
/// Construct from n-dimensional coordinates.
pub fn from(coords: &[i32]) -> Self {
Self { coords: coords.iter().copied().collect() }
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ConwayCube {
Void,
Inactive,
Active,
}
impl ConwayCube {
/// Construct cube from input character.
#[must_use]
pub fn from_char(cube: char) -> ConwayCube {
match cube {
'.' => ConwayCube::Inactive,
'#' => ConwayCube::Active,
_ => {
let e = format!("invalid cube character {}", cube);
panic!(e);
}
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct InfiniteGrid {
cubes: Vec<ConwayCube>,
dim: usize,
edge: i32,
}
impl fmt::Display for InfiniteGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let half_edge = self.edge / 2;
let mut s = String::new();
for coords in (0..self.dim).map(|_i| 0..self.edge).multi_cartesian_product() {
if coords[self.dim - 2] == 0 && coords[self.dim - 1] == 0 {
let axes = vec!['z', 'w', 'v', 'u'];
for d in 2..self.dim {
if d > 2 {
s += ", ";
}
let label = format!("{}={}", axes[d - 2], coords[self.dim - d - 1] - half_edge);
s += &label;
}
s += "\n";
}
let pos = Position::from(&coords);
s += match self.cube_at(&pos) {
ConwayCube::Void => "_",
ConwayCube::Inactive => ".",
ConwayCube::Active => "#",
};
if coords[self.dim - 1] >= self.edge - 1 {
s += "\n";
}
}
write!(f, "{}", s)
}
}
pub struct InfiniteGridIter {
layout: InfiniteGrid,
}
impl Iterator for InfiniteGridIter {
type Item = InfiniteGrid;
fn next(&mut self) -> Option<Self::Item> {
let prev_layout = self.layout.clone();
self.layout = prev_layout.cube_round();
Some(self.layout.clone())
}
}
impl InfiniteGrid {
/// Return edge length.
#[must_use]
pub fn edge(&self) -> i32 {
self.edge
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from file at `path`.
///
/// # Errors
///
/// Returns `Err` if the input file cannot be opened, or if line is
/// found with invalid format.
pub fn read_from_file(path: &str, dim: usize) -> Result<InfiniteGrid> {
let s: String = fs::read_to_string(path)?;
InfiniteGrid::from_input(&s, dim)
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from `input` string.
///
/// # Errors
///
/// Returns `Err` if a line is found with invalid format.
pub fn from_input(input: &str, dim: usize) -> Result<InfiniteGrid> {
let edge = i32::try_from(input.lines().next().unwrap().len()).unwrap();
let half_edge = edge / 2;
// parse and store initial 2D grid plane:
let cubes_2d: Vec<ConwayCube> = input
.lines()
.flat_map(|line| line.trim().chars().map(ConwayCube::from_char))
.collect();
// create empty nD grid, and copy initial 2D grid plane to it
// (in the middle of each dimension beyond the first two):
let max_i = InfiniteGrid::index_size(edge, dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for y in 0..edge {
for x in 0..edge {
let from_i = usize::try_from(y * edge + x).unwrap();
let mut dims = vec![half_edge; dim];
dims[dim-1] = x;
dims[dim-2] = y;
let to_i = Position::from(&dims).index_for(edge);
cubes[to_i] = cubes_2d[from_i];
}
}
Ok(Self { cubes, dim, edge })
}
#[must_use]
fn index_size(edge: i32, dim: usize) -> usize {
let mut s = 1_i32;
for _i in 0..dim {
s *= edge;
}
usize::try_from(s).unwrap()
}
#[must_use]
pub fn iter(&self) -> InfiniteGridIter {
InfiniteGridIter {
layout: self.clone(),
}
}
/// Return `ConwayCube` at the given position.
#[must_use]
pub fn cube_at(&self, pos: &Position) -> ConwayCube {
for c in &pos.coords {
if *c < 0 || *c >= self.edge {
return ConwayCube::Void;
}
}
self.cubes[pos.index_for(self.edge)]
}
/// Return count of active cubes.
#[must_use]
pub fn active_cubes(&self) -> usize {
self.cubes.iter().filter(|&c| *c == ConwayCube::Active).count()
}
/// Return count of active neighbor cubes of the cube at the given
/// position.
#[must_use]
pub fn active_neighbors_at(&self, pos: &Position) -> usize {
let mut n_active: usize = 0;
for deltas in (0..self.dim).map(|_i| -1..=1).multi_cartesian_product() {
if deltas.iter().all(|dc| *dc == 0) {
continue;
}
let d_coords: Vec<i32> = pos.coords
.iter()
.enumerate()
.map(|(i, dc)| *dc + deltas[i])
.collect();
let d_pos = Position::from(&d_coords);
if self.cube_at(&d_pos) == ConwayCube::Active {
n_active += 1;
}
}
n_active
}
/// Do one round of cube life. Returns a new grid which is one unit
/// bigger in all directions (its edge increases by 2 in all
/// dimensions).
#[must_use]
pub fn cube_round(&self) -> InfiniteGrid {
let edge = self.edge + 2;
let max_i = InfiniteGrid::index_size(edge, self.dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for coords in (0..self.dim).map(|_i| 0..edge).multi_cartesian_product() {
let from_coords: Vec<i32> = coords.iter().map(|c| c - 1).collect();
let from_pos = Position::from(&from_coords);
let pos = Position::from(&coords);
cubes[pos.index_for(edge)] = self.new_cube_at(&from_pos);
}
InfiniteGrid { cubes, dim: self.dim, edge }
}
fn new_cube_at(&self, pos: &Position) -> ConwayCube {
match self.cube_at(&pos) {
ConwayCube::Active => {
let n = self.active_neighbors_at(&pos);
if n == 2 || n == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
_ => {
if self.active_neighbors_at(&pos) == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const TINY_LAYOUT: &'static str = ".#.\n..#\n###\n";
#[test]
fn test_from_input() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
assert_eq!(3, grid.edge);
assert_eq!(5, grid.active_cubes());
}
#[test]
fn test_grid_indexing() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, 1]);
assert_eq!(ConwayCube::Active, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 0]);
assert_eq!(ConwayCube::Inactive, grid.cube_at(&pos));
let pos = Position::from(&[0, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_z() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[-1, 0, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[3, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_y() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, -1, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 3, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_x() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, -1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 3]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_6_rounds() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= N_ROUNDS - 1 {
n_active = g.active_cubes();
break;
}
}
assert_eq!(112, n_active);
}
#[test]
fn test_6_rounds_4d() {
// the full N_ROUNDS takes a while so:
let n_rounds = 1; // = N_ROUNDS;
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 4).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= n_rounds - 1 {
n_active = g.active_cubes();
break;
}
}
let expect = if n_rounds == 1 | else { 848 };
assert_eq!(expect, n_active);
}
}
| { 3*4 + 5 + 3*4 } | conditional_block |
encoder.rs | //! Encoding functionality
//!
//!
pub use crate::encoder_config::{AV1EncoderConfig, AomUsage, BitstreamProfile, TileCodingMode};
use crate::common::AOMCodec;
use crate::ffi::*;
use std::mem::{self, MaybeUninit};
use std::ptr;
use av_data::frame::{Frame, FrameBufferConv, MediaKind};
use av_data::packet::Packet;
use av_data::pixel::formats::YUV420;
use av_data::pixel::Formaton;
#[derive(Clone, Debug, PartialEq)]
pub struct PSNR {
pub samples: [u32; 4],
pub sse: [u64; 4],
pub psnr: [f64; 4],
}
/// Safe wrapper around `aom_codec_cx_pkt`
#[derive(Clone, Debug)]
pub enum AOMPacket {
Packet(Packet),
Stats(Vec<u8>),
MBStats(Vec<u8>),
PSNR(PSNR),
Custom(Vec<u8>),
}
fn to_buffer(buf: aom_fixed_buf_t) -> Vec<u8> {
let mut v: Vec<u8> = Vec::with_capacity(buf.sz);
unsafe {
ptr::copy_nonoverlapping(buf.buf as *const u8, v.as_mut_ptr(), buf.sz);
v.set_len(buf.sz);
}
v
}
impl AOMPacket {
fn new(pkt: aom_codec_cx_pkt) -> AOMPacket {
match pkt.kind {
aom_codec_cx_pkt_kind::AOM_CODEC_CX_FRAME_PKT => {
let f = unsafe { pkt.data.frame };
let mut p = Packet::with_capacity(f.sz);
unsafe {
ptr::copy_nonoverlapping(f.buf as *const u8, p.data.as_mut_ptr(), f.sz);
p.data.set_len(f.sz);
}
p.t.pts = Some(f.pts);
p.is_key = (f.flags & AOM_FRAME_IS_KEY) != 0;
AOMPacket::Packet(p)
}
aom_codec_cx_pkt_kind::AOM_CODEC_STATS_PKT => {
let b = to_buffer(unsafe { pkt.data.twopass_stats });
AOMPacket::Stats(b)
}
aom_codec_cx_pkt_kind::AOM_CODEC_FPMB_STATS_PKT => {
let b = to_buffer(unsafe { pkt.data.firstpass_mb_stats });
AOMPacket::MBStats(b)
}
aom_codec_cx_pkt_kind::AOM_CODEC_PSNR_PKT => {
let p = unsafe { pkt.data.psnr };
AOMPacket::PSNR(PSNR {
samples: p.samples,
sse: p.sse,
psnr: p.psnr,
})
}
aom_codec_cx_pkt_kind::AOM_CODEC_CUSTOM_PKT => {
let b = to_buffer(unsafe { pkt.data.raw });
AOMPacket::Custom(b)
}
_ => panic!("No packet defined"),
}
}
}
#[cfg(target_os = "windows")]
fn map_fmt_to_img(img: &mut aom_image, fmt: &Formaton) {
img.cp = fmt.get_primaries() as i32;
img.tc = fmt.get_xfer() as i32;
img.mc = fmt.get_matrix() as i32;
}
#[cfg(not(target_os = "windows"))]
fn map_fmt_to_img(img: &mut aom_image, fmt: &Formaton) {
img.cp = fmt.get_primaries() as u32;
img.tc = fmt.get_xfer() as u32;
img.mc = fmt.get_matrix() as u32;
}
// TODO: Extend
fn map_formaton(img: &mut aom_image, fmt: &Formaton) {
if fmt == YUV420 {
img.fmt = aom_img_fmt::AOM_IMG_FMT_I420;
} else {
unimplemented!();
}
img.bit_depth = 8;
img.bps = 12;
img.x_chroma_shift = 1;
img.y_chroma_shift = 1;
map_fmt_to_img(img, fmt);
}
fn img_from_frame(frame: &Frame) -> aom_image {
let mut img: aom_image = unsafe { mem::zeroed() };
if let MediaKind::Video(ref v) = frame.kind {
map_formaton(&mut img, &v.format);
img.w = v.width as u32;
img.h = v.height as u32;
img.d_w = v.width as u32;
img.d_h = v.height as u32;
}
// populate the buffers
for i in 0..frame.buf.count() {
let s: &[u8] = frame.buf.as_slice(i).unwrap();
img.planes[i] = s.as_ptr() as *mut u8;
img.stride[i] = frame.buf.linesize(i).unwrap() as i32;
}
img
}
/// AV1 Encoder
pub struct AV1Encoder {
pub(crate) ctx: aom_codec_ctx_t,
pub(crate) iter: aom_codec_iter_t,
}
unsafe impl Send for AV1Encoder {} // TODO: Make sure it cannot be abused
impl AV1Encoder {
/// Create a new encoder using the provided configuration
///
/// You may use `get_encoder` instead.
pub fn new(cfg: &mut AV1EncoderConfig) -> Result<AV1Encoder, aom_codec_err_t::Type> {
let mut ctx = MaybeUninit::uninit();
let ret = unsafe {
aom_codec_enc_init_ver(
ctx.as_mut_ptr(),
aom_codec_av1_cx(),
cfg.cfg(),
0,
AOM_ENCODER_ABI_VERSION as i32,
)
};
match ret {
aom_codec_err_t::AOM_CODEC_OK => {
let ctx = unsafe { ctx.assume_init() };
let mut enc = AV1Encoder {
ctx,
iter: ptr::null(),
};
// Apparently aom 2.0 would crash if a CPUUSED is not set explicitly.
enc.control(aome_enc_control_id::AOME_SET_CPUUSED, 2)
.expect("Cannot set CPUUSED");
Ok(enc)
}
_ => Err(ret),
}
}
/// Update the encoder parameters after-creation
///
/// It calls `aom_codec_control_`
pub fn control(
&mut self,
id: aome_enc_control_id::Type,
val: i32,
) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_control(&mut self.ctx, id as i32, val) };
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
// TODO: Cache the image information
//
/// Send an uncompressed frame to the encoder
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode`.
///
/// [`get_packet`]: #method.get_packet
pub fn encode(&mut self, frame: &Frame) -> Result<(), aom_codec_err_t::Type> {
let img = img_from_frame(frame);
let ret = unsafe { aom_codec_encode(&mut self.ctx, &img, frame.t.pts.unwrap(), 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Notify the encoder that no more data will be sent
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode` with NULL arguments.
///
/// [`get_packet`]: #method.get_packet
pub fn flush(&mut self) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_encode(&mut self.ctx, ptr::null_mut(), 0, 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Retrieve the compressed data
///
/// To be called until it returns `None`.
///
/// It calls `aom_codec_get_cx_data`.
pub fn get_packet(&mut self) -> Option<AOMPacket> {
let pkt = unsafe { aom_codec_get_cx_data(&mut self.ctx, &mut self.iter) };
if pkt.is_null() {
None
} else {
Some(AOMPacket::new(unsafe { *pkt }))
}
}
}
impl Drop for AV1Encoder {
fn drop(&mut self) {
unsafe { aom_codec_destroy(&mut self.ctx) };
}
}
impl AOMCodec for AV1Encoder {
fn get_context(&mut self) -> &mut aom_codec_ctx {
&mut self.ctx
}
}
#[cfg(feature = "codec-trait")]
mod encoder_trait {
use super::*;
use av_codec::encoder::*;
use av_codec::error::*;
use av_data::frame::ArcFrame;
use av_data::params::{CodecParams, MediaKind, VideoInfo};
use av_data::value::Value;
pub struct Des {
descr: Descr,
}
pub struct Enc {
cfg: AV1EncoderConfig,
enc: Option<AV1Encoder>,
}
impl Descriptor for Des {
type OutputEncoder = Enc;
fn create(&self) -> Self::OutputEncoder {
Enc {
cfg: AV1EncoderConfig::new().unwrap(),
enc: None,
}
}
fn describe(&self) -> &Descr {
&self.descr
}
}
impl Encoder for Enc {
fn | (&mut self) -> Result<()> {
if self.enc.is_none() {
self.cfg
.get_encoder()
.map(|enc| {
self.enc = Some(enc);
})
.map_err(|_err| Error::ConfigurationIncomplete)
} else {
unimplemented!()
}
}
// TODO: have it as default impl?
fn get_extradata(&self) -> Option<Vec<u8>> {
None
}
fn send_frame(&mut self, frame: &ArcFrame) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.encode(frame).map_err(|_| unimplemented!())
}
fn receive_packet(&mut self) -> Result<Packet> {
let enc = self.enc.as_mut().unwrap();
if let Some(p) = enc.get_packet() {
match p {
AOMPacket::Packet(pkt) => Ok(pkt),
_ => unimplemented!(),
}
} else {
Err(Error::MoreDataNeeded)
}
}
fn flush(&mut self) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.flush().map_err(|_| unimplemented!())
}
fn set_option<'a>(&mut self, key: &str, val: Value<'a>) -> Result<()> {
match (key, val) {
("w", Value::U64(v)) => self.cfg.g_w = v as u32,
("h", Value::U64(v)) => self.cfg.g_h = v as u32,
("qmin", Value::U64(v)) => self.cfg.rc_min_quantizer = v as u32,
("qmax", Value::U64(v)) => self.cfg.rc_max_quantizer = v as u32,
("timebase", Value::Pair(num, den)) => {
self.cfg.g_timebase.num = num as i32;
self.cfg.g_timebase.den = den as i32;
}
_ => unimplemented!(),
}
Ok(())
}
fn get_params(&self) -> Result<CodecParams> {
use std::sync::Arc;
Ok(CodecParams {
kind: Some(MediaKind::Video(VideoInfo {
height: self.cfg.g_h as usize,
width: self.cfg.g_w as usize,
format: Some(Arc::new(*YUV420)), // TODO: support more formats
})),
codec_id: Some("av1".to_owned()),
extradata: None,
bit_rate: 0, // TODO: expose the information
convergence_window: 0,
delay: 0,
})
}
fn set_params(&mut self, params: &CodecParams) -> Result<()> {
if let Some(MediaKind::Video(ref info)) = params.kind {
self.cfg.g_w = info.width as u32;
self.cfg.g_h = info.height as u32;
}
Ok(())
}
}
/// AV1 Encoder
///
/// To be used with [av-codec](https://docs.rs/av-codec) `Encoder Context`.
pub const AV1_DESCR: &Des = &Des {
descr: Descr {
codec: "av1",
name: "aom",
desc: "libaom AV1 encoder",
mime: "video/AV1",
},
};
}
#[cfg(feature = "codec-trait")]
pub use self::encoder_trait::AV1_DESCR;
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn init() {
let mut c = AV1EncoderConfig::new().unwrap();
let mut e = c.get_encoder().unwrap();
println!("{}", e.error_to_str());
}
use av_data::rational::*;
use av_data::timeinfo::TimeInfo;
pub fn setup(w: u32, h: u32, t: &TimeInfo) -> AV1Encoder {
if (w % 2) != 0 || (h % 2) != 0 {
panic!("Invalid frame size: w: {} h: {}", w, h);
}
let mut cfg = AV1EncoderConfig::new()
.unwrap()
.width(w)
.height(h)
.timebase(t.timebase.unwrap())
.rc_min_quantizer(0)
.rc_min_quantizer(0)
.threads(4)
.pass(aom_enc_pass::AOM_RC_ONE_PASS)
.rc_end_usage(aom_rc_mode::AOM_CQ);
let mut enc = cfg.get_encoder().unwrap();
enc.control(aome_enc_control_id::AOME_SET_CQ_LEVEL, 4)
.unwrap();
enc.control(aome_enc_control_id::AOME_SET_CPUUSED, 2)
.unwrap();
enc
}
pub fn setup_frame(w: u32, h: u32, t: &TimeInfo) -> Frame {
use av_data::frame::*;
use av_data::pixel::formats;
use std::sync::Arc;
let v = VideoInfo::new(
w as usize,
h as usize,
false,
FrameType::OTHER,
Arc::new(*formats::YUV420),
);
Frame::new_default_frame(v, Some(t.clone()))
}
#[test]
fn encode() {
let w = 200;
let h = 200;
let t = TimeInfo {
pts: Some(0),
dts: Some(0),
duration: Some(1),
timebase: Some(Rational64::new(1, 1000)),
user_private: None,
};
let mut e = setup(w, h, &t);
let mut f = setup_frame(w, h, &t);
let mut out = 0;
// TODO write some pattern
for i in 0..100 {
e.encode(&f).unwrap();
f.t.pts = Some(i);
// println!("{:#?}", f);
loop {
let p = e.get_packet();
if p.is_none() {
break;
} else {
out = 1;
// println!("{:#?}", p.unwrap());
}
}
}
if out != 1 {
panic!("No packet produced");
}
}
#[cfg(all(test, feature = "codec-trait"))]
#[test]
fn encode_codec_trait() {
use super::AV1_DESCR;
use av_codec::common::CodecList;
use av_codec::encoder::*;
use av_codec::error::*;
use std::sync::Arc;
let encoders = Codecs::from_list(&[AV1_DESCR]);
let mut ctx = Context::by_name(&encoders, "av1").unwrap();
let w = 200;
let h = 200;
ctx.set_option("w", u64::from(w)).unwrap();
ctx.set_option("h", u64::from(h)).unwrap();
ctx.set_option("timebase", (1, 1000)).unwrap();
ctx.set_option("qmin", 0u64).unwrap();
ctx.set_option("qmax", 0u64).unwrap();
let t = TimeInfo {
pts: Some(0),
dts: Some(0),
duration: Some(1),
timebase: Some(Rational64::new(1, 1000)),
user_private: None,
};
ctx.configure().unwrap();
let mut f = Arc::new(setup_frame(w, h, &t));
let mut out = 0;
for i in 0..100 {
Arc::get_mut(&mut f).unwrap().t.pts = Some(i);
println!("Sending {}", i);
ctx.send_frame(&f).unwrap();
loop {
match ctx.receive_packet() {
Ok(p) => {
println!("{:#?}", p);
out = 1
}
Err(e) => match e {
Error::MoreDataNeeded => break,
_ => unimplemented!(),
},
}
}
}
ctx.flush().unwrap();
loop {
match ctx.receive_packet() {
Ok(p) => {
println!("{:#?}", p);
out = 1
}
Err(e) => match e {
Error::MoreDataNeeded => break,
_ => unimplemented!(),
},
}
}
if out != 1 {
panic!("No packet produced");
}
}
}
| configure | identifier_name |
encoder.rs | //! Encoding functionality
//!
//!
pub use crate::encoder_config::{AV1EncoderConfig, AomUsage, BitstreamProfile, TileCodingMode};
use crate::common::AOMCodec;
use crate::ffi::*;
use std::mem::{self, MaybeUninit};
use std::ptr;
use av_data::frame::{Frame, FrameBufferConv, MediaKind};
use av_data::packet::Packet;
use av_data::pixel::formats::YUV420;
use av_data::pixel::Formaton;
#[derive(Clone, Debug, PartialEq)]
pub struct PSNR {
pub samples: [u32; 4],
pub sse: [u64; 4],
pub psnr: [f64; 4],
}
/// Safe wrapper around `aom_codec_cx_pkt`
#[derive(Clone, Debug)]
pub enum AOMPacket {
Packet(Packet),
Stats(Vec<u8>),
MBStats(Vec<u8>),
PSNR(PSNR),
Custom(Vec<u8>),
}
fn to_buffer(buf: aom_fixed_buf_t) -> Vec<u8> {
let mut v: Vec<u8> = Vec::with_capacity(buf.sz);
unsafe {
ptr::copy_nonoverlapping(buf.buf as *const u8, v.as_mut_ptr(), buf.sz);
v.set_len(buf.sz);
}
v
}
impl AOMPacket {
fn new(pkt: aom_codec_cx_pkt) -> AOMPacket {
match pkt.kind {
aom_codec_cx_pkt_kind::AOM_CODEC_CX_FRAME_PKT => {
let f = unsafe { pkt.data.frame };
let mut p = Packet::with_capacity(f.sz);
unsafe {
ptr::copy_nonoverlapping(f.buf as *const u8, p.data.as_mut_ptr(), f.sz);
p.data.set_len(f.sz);
}
p.t.pts = Some(f.pts);
p.is_key = (f.flags & AOM_FRAME_IS_KEY) != 0;
AOMPacket::Packet(p)
}
aom_codec_cx_pkt_kind::AOM_CODEC_STATS_PKT => {
let b = to_buffer(unsafe { pkt.data.twopass_stats });
AOMPacket::Stats(b)
}
aom_codec_cx_pkt_kind::AOM_CODEC_FPMB_STATS_PKT => {
let b = to_buffer(unsafe { pkt.data.firstpass_mb_stats });
AOMPacket::MBStats(b)
}
aom_codec_cx_pkt_kind::AOM_CODEC_PSNR_PKT => {
let p = unsafe { pkt.data.psnr };
AOMPacket::PSNR(PSNR {
samples: p.samples,
sse: p.sse,
psnr: p.psnr,
})
}
aom_codec_cx_pkt_kind::AOM_CODEC_CUSTOM_PKT => {
let b = to_buffer(unsafe { pkt.data.raw });
AOMPacket::Custom(b)
}
_ => panic!("No packet defined"),
}
}
}
#[cfg(target_os = "windows")]
fn map_fmt_to_img(img: &mut aom_image, fmt: &Formaton) {
img.cp = fmt.get_primaries() as i32;
img.tc = fmt.get_xfer() as i32;
img.mc = fmt.get_matrix() as i32;
}
#[cfg(not(target_os = "windows"))]
fn map_fmt_to_img(img: &mut aom_image, fmt: &Formaton) {
img.cp = fmt.get_primaries() as u32;
img.tc = fmt.get_xfer() as u32;
img.mc = fmt.get_matrix() as u32;
}
// TODO: Extend
fn map_formaton(img: &mut aom_image, fmt: &Formaton) {
if fmt == YUV420 {
img.fmt = aom_img_fmt::AOM_IMG_FMT_I420;
} else {
unimplemented!();
}
img.bit_depth = 8;
img.bps = 12;
img.x_chroma_shift = 1;
img.y_chroma_shift = 1;
map_fmt_to_img(img, fmt);
}
fn img_from_frame(frame: &Frame) -> aom_image {
let mut img: aom_image = unsafe { mem::zeroed() };
if let MediaKind::Video(ref v) = frame.kind {
map_formaton(&mut img, &v.format);
img.w = v.width as u32;
img.h = v.height as u32;
img.d_w = v.width as u32;
img.d_h = v.height as u32;
}
// populate the buffers
for i in 0..frame.buf.count() {
let s: &[u8] = frame.buf.as_slice(i).unwrap();
img.planes[i] = s.as_ptr() as *mut u8;
img.stride[i] = frame.buf.linesize(i).unwrap() as i32;
}
img
}
/// AV1 Encoder
pub struct AV1Encoder {
pub(crate) ctx: aom_codec_ctx_t,
pub(crate) iter: aom_codec_iter_t,
}
unsafe impl Send for AV1Encoder {} // TODO: Make sure it cannot be abused
impl AV1Encoder {
/// Create a new encoder using the provided configuration
///
/// You may use `get_encoder` instead.
pub fn new(cfg: &mut AV1EncoderConfig) -> Result<AV1Encoder, aom_codec_err_t::Type> {
let mut ctx = MaybeUninit::uninit();
let ret = unsafe {
aom_codec_enc_init_ver(
ctx.as_mut_ptr(),
aom_codec_av1_cx(),
cfg.cfg(),
0,
AOM_ENCODER_ABI_VERSION as i32,
)
};
match ret {
aom_codec_err_t::AOM_CODEC_OK => {
let ctx = unsafe { ctx.assume_init() };
let mut enc = AV1Encoder {
ctx,
iter: ptr::null(),
};
// Apparently aom 2.0 would crash if a CPUUSED is not set explicitly.
enc.control(aome_enc_control_id::AOME_SET_CPUUSED, 2)
.expect("Cannot set CPUUSED");
Ok(enc)
}
_ => Err(ret),
}
}
/// Update the encoder parameters after-creation
///
/// It calls `aom_codec_control_`
pub fn control(
&mut self,
id: aome_enc_control_id::Type,
val: i32,
) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_control(&mut self.ctx, id as i32, val) };
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
// TODO: Cache the image information
//
/// Send an uncompressed frame to the encoder
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode`.
///
/// [`get_packet`]: #method.get_packet
pub fn encode(&mut self, frame: &Frame) -> Result<(), aom_codec_err_t::Type> {
let img = img_from_frame(frame);
let ret = unsafe { aom_codec_encode(&mut self.ctx, &img, frame.t.pts.unwrap(), 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Notify the encoder that no more data will be sent
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode` with NULL arguments.
///
/// [`get_packet`]: #method.get_packet
pub fn flush(&mut self) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_encode(&mut self.ctx, ptr::null_mut(), 0, 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Retrieve the compressed data
///
/// To be called until it returns `None`.
///
/// It calls `aom_codec_get_cx_data`.
pub fn get_packet(&mut self) -> Option<AOMPacket> {
let pkt = unsafe { aom_codec_get_cx_data(&mut self.ctx, &mut self.iter) };
if pkt.is_null() {
None
} else {
Some(AOMPacket::new(unsafe { *pkt }))
}
}
}
impl Drop for AV1Encoder {
fn drop(&mut self) {
unsafe { aom_codec_destroy(&mut self.ctx) };
}
}
impl AOMCodec for AV1Encoder {
fn get_context(&mut self) -> &mut aom_codec_ctx {
&mut self.ctx
}
}
#[cfg(feature = "codec-trait")]
mod encoder_trait {
use super::*;
use av_codec::encoder::*;
use av_codec::error::*;
use av_data::frame::ArcFrame;
use av_data::params::{CodecParams, MediaKind, VideoInfo};
use av_data::value::Value;
pub struct Des {
descr: Descr,
}
pub struct Enc {
cfg: AV1EncoderConfig,
enc: Option<AV1Encoder>,
}
impl Descriptor for Des {
type OutputEncoder = Enc;
fn create(&self) -> Self::OutputEncoder {
Enc {
cfg: AV1EncoderConfig::new().unwrap(),
enc: None,
}
}
fn describe(&self) -> &Descr {
&self.descr
}
}
impl Encoder for Enc {
fn configure(&mut self) -> Result<()> {
if self.enc.is_none() {
self.cfg
.get_encoder()
.map(|enc| {
self.enc = Some(enc);
})
.map_err(|_err| Error::ConfigurationIncomplete)
} else {
unimplemented!()
}
}
// TODO: have it as default impl?
fn get_extradata(&self) -> Option<Vec<u8>> {
None
}
fn send_frame(&mut self, frame: &ArcFrame) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.encode(frame).map_err(|_| unimplemented!())
}
fn receive_packet(&mut self) -> Result<Packet> {
let enc = self.enc.as_mut().unwrap();
if let Some(p) = enc.get_packet() {
match p {
AOMPacket::Packet(pkt) => Ok(pkt),
_ => unimplemented!(),
}
} else {
Err(Error::MoreDataNeeded)
}
}
fn flush(&mut self) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.flush().map_err(|_| unimplemented!())
}
fn set_option<'a>(&mut self, key: &str, val: Value<'a>) -> Result<()> {
match (key, val) {
("w", Value::U64(v)) => self.cfg.g_w = v as u32,
("h", Value::U64(v)) => self.cfg.g_h = v as u32,
("qmin", Value::U64(v)) => self.cfg.rc_min_quantizer = v as u32,
("qmax", Value::U64(v)) => self.cfg.rc_max_quantizer = v as u32,
("timebase", Value::Pair(num, den)) => {
self.cfg.g_timebase.num = num as i32;
self.cfg.g_timebase.den = den as i32;
}
_ => unimplemented!(),
}
Ok(())
}
fn get_params(&self) -> Result<CodecParams> {
use std::sync::Arc;
Ok(CodecParams {
kind: Some(MediaKind::Video(VideoInfo {
height: self.cfg.g_h as usize,
width: self.cfg.g_w as usize,
format: Some(Arc::new(*YUV420)), // TODO: support more formats
})),
codec_id: Some("av1".to_owned()),
extradata: None,
bit_rate: 0, // TODO: expose the information
convergence_window: 0,
delay: 0,
})
}
fn set_params(&mut self, params: &CodecParams) -> Result<()> {
if let Some(MediaKind::Video(ref info)) = params.kind {
self.cfg.g_w = info.width as u32;
self.cfg.g_h = info.height as u32;
}
Ok(())
}
}
/// AV1 Encoder
///
/// To be used with [av-codec](https://docs.rs/av-codec) `Encoder Context`.
pub const AV1_DESCR: &Des = &Des {
descr: Descr {
codec: "av1",
name: "aom",
desc: "libaom AV1 encoder",
mime: "video/AV1",
},
};
}
#[cfg(feature = "codec-trait")]
pub use self::encoder_trait::AV1_DESCR;
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn init() {
let mut c = AV1EncoderConfig::new().unwrap();
let mut e = c.get_encoder().unwrap();
println!("{}", e.error_to_str());
}
use av_data::rational::*;
use av_data::timeinfo::TimeInfo;
pub fn setup(w: u32, h: u32, t: &TimeInfo) -> AV1Encoder {
if (w % 2) != 0 || (h % 2) != 0 {
panic!("Invalid frame size: w: {} h: {}", w, h);
}
let mut cfg = AV1EncoderConfig::new()
.unwrap()
.width(w)
.height(h)
.timebase(t.timebase.unwrap())
.rc_min_quantizer(0)
.rc_min_quantizer(0)
.threads(4)
.pass(aom_enc_pass::AOM_RC_ONE_PASS)
.rc_end_usage(aom_rc_mode::AOM_CQ);
let mut enc = cfg.get_encoder().unwrap();
enc.control(aome_enc_control_id::AOME_SET_CQ_LEVEL, 4)
.unwrap();
enc.control(aome_enc_control_id::AOME_SET_CPUUSED, 2)
.unwrap();
enc
}
pub fn setup_frame(w: u32, h: u32, t: &TimeInfo) -> Frame {
use av_data::frame::*;
use av_data::pixel::formats;
use std::sync::Arc;
let v = VideoInfo::new(
w as usize,
h as usize,
false,
FrameType::OTHER,
Arc::new(*formats::YUV420),
);
Frame::new_default_frame(v, Some(t.clone()))
}
#[test]
fn encode() {
let w = 200;
let h = 200;
let t = TimeInfo {
pts: Some(0),
dts: Some(0),
duration: Some(1),
timebase: Some(Rational64::new(1, 1000)),
user_private: None,
};
let mut e = setup(w, h, &t);
let mut f = setup_frame(w, h, &t);
let mut out = 0;
// TODO write some pattern
for i in 0..100 {
e.encode(&f).unwrap();
f.t.pts = Some(i);
// println!("{:#?}", f);
loop {
let p = e.get_packet();
if p.is_none() {
break;
} else {
out = 1;
// println!("{:#?}", p.unwrap());
}
}
}
if out != 1 {
panic!("No packet produced");
}
}
#[cfg(all(test, feature = "codec-trait"))]
#[test]
fn encode_codec_trait() |
}
| {
use super::AV1_DESCR;
use av_codec::common::CodecList;
use av_codec::encoder::*;
use av_codec::error::*;
use std::sync::Arc;
let encoders = Codecs::from_list(&[AV1_DESCR]);
let mut ctx = Context::by_name(&encoders, "av1").unwrap();
let w = 200;
let h = 200;
ctx.set_option("w", u64::from(w)).unwrap();
ctx.set_option("h", u64::from(h)).unwrap();
ctx.set_option("timebase", (1, 1000)).unwrap();
ctx.set_option("qmin", 0u64).unwrap();
ctx.set_option("qmax", 0u64).unwrap();
let t = TimeInfo {
pts: Some(0),
dts: Some(0),
duration: Some(1),
timebase: Some(Rational64::new(1, 1000)),
user_private: None,
};
ctx.configure().unwrap();
let mut f = Arc::new(setup_frame(w, h, &t));
let mut out = 0;
for i in 0..100 {
Arc::get_mut(&mut f).unwrap().t.pts = Some(i);
println!("Sending {}", i);
ctx.send_frame(&f).unwrap();
loop {
match ctx.receive_packet() {
Ok(p) => {
println!("{:#?}", p);
out = 1
}
Err(e) => match e {
Error::MoreDataNeeded => break,
_ => unimplemented!(),
},
}
}
}
ctx.flush().unwrap();
loop {
match ctx.receive_packet() {
Ok(p) => {
println!("{:#?}", p);
out = 1
}
Err(e) => match e {
Error::MoreDataNeeded => break,
_ => unimplemented!(),
},
}
}
if out != 1 {
panic!("No packet produced");
}
} | identifier_body |
encoder.rs | //! Encoding functionality
//!
//!
pub use crate::encoder_config::{AV1EncoderConfig, AomUsage, BitstreamProfile, TileCodingMode};
use crate::common::AOMCodec;
use crate::ffi::*;
use std::mem::{self, MaybeUninit};
use std::ptr;
use av_data::frame::{Frame, FrameBufferConv, MediaKind};
use av_data::packet::Packet;
use av_data::pixel::formats::YUV420;
use av_data::pixel::Formaton;
#[derive(Clone, Debug, PartialEq)]
pub struct PSNR {
pub samples: [u32; 4],
pub sse: [u64; 4],
pub psnr: [f64; 4],
}
/// Safe wrapper around `aom_codec_cx_pkt`
#[derive(Clone, Debug)]
pub enum AOMPacket {
Packet(Packet),
Stats(Vec<u8>),
MBStats(Vec<u8>),
PSNR(PSNR),
Custom(Vec<u8>),
}
fn to_buffer(buf: aom_fixed_buf_t) -> Vec<u8> {
let mut v: Vec<u8> = Vec::with_capacity(buf.sz);
unsafe {
ptr::copy_nonoverlapping(buf.buf as *const u8, v.as_mut_ptr(), buf.sz);
v.set_len(buf.sz);
}
v
}
impl AOMPacket {
fn new(pkt: aom_codec_cx_pkt) -> AOMPacket {
match pkt.kind {
aom_codec_cx_pkt_kind::AOM_CODEC_CX_FRAME_PKT => {
let f = unsafe { pkt.data.frame };
let mut p = Packet::with_capacity(f.sz);
unsafe {
ptr::copy_nonoverlapping(f.buf as *const u8, p.data.as_mut_ptr(), f.sz);
p.data.set_len(f.sz);
}
p.t.pts = Some(f.pts);
p.is_key = (f.flags & AOM_FRAME_IS_KEY) != 0;
AOMPacket::Packet(p)
}
aom_codec_cx_pkt_kind::AOM_CODEC_STATS_PKT => {
let b = to_buffer(unsafe { pkt.data.twopass_stats });
AOMPacket::Stats(b)
}
aom_codec_cx_pkt_kind::AOM_CODEC_FPMB_STATS_PKT => {
let b = to_buffer(unsafe { pkt.data.firstpass_mb_stats });
AOMPacket::MBStats(b)
}
aom_codec_cx_pkt_kind::AOM_CODEC_PSNR_PKT => {
let p = unsafe { pkt.data.psnr };
AOMPacket::PSNR(PSNR {
samples: p.samples,
sse: p.sse,
psnr: p.psnr,
})
}
aom_codec_cx_pkt_kind::AOM_CODEC_CUSTOM_PKT => {
let b = to_buffer(unsafe { pkt.data.raw });
AOMPacket::Custom(b)
}
_ => panic!("No packet defined"),
}
}
}
#[cfg(target_os = "windows")]
fn map_fmt_to_img(img: &mut aom_image, fmt: &Formaton) {
img.cp = fmt.get_primaries() as i32;
img.tc = fmt.get_xfer() as i32;
img.mc = fmt.get_matrix() as i32;
}
#[cfg(not(target_os = "windows"))]
fn map_fmt_to_img(img: &mut aom_image, fmt: &Formaton) {
img.cp = fmt.get_primaries() as u32;
img.tc = fmt.get_xfer() as u32;
img.mc = fmt.get_matrix() as u32;
}
// TODO: Extend
fn map_formaton(img: &mut aom_image, fmt: &Formaton) {
if fmt == YUV420 {
img.fmt = aom_img_fmt::AOM_IMG_FMT_I420;
} else {
unimplemented!();
}
img.bit_depth = 8;
img.bps = 12;
img.x_chroma_shift = 1;
img.y_chroma_shift = 1;
map_fmt_to_img(img, fmt);
}
fn img_from_frame(frame: &Frame) -> aom_image {
let mut img: aom_image = unsafe { mem::zeroed() };
if let MediaKind::Video(ref v) = frame.kind {
map_formaton(&mut img, &v.format);
img.w = v.width as u32;
img.h = v.height as u32;
img.d_w = v.width as u32;
img.d_h = v.height as u32;
}
// populate the buffers
for i in 0..frame.buf.count() {
let s: &[u8] = frame.buf.as_slice(i).unwrap();
img.planes[i] = s.as_ptr() as *mut u8;
img.stride[i] = frame.buf.linesize(i).unwrap() as i32;
}
img
}
/// AV1 Encoder
pub struct AV1Encoder {
pub(crate) ctx: aom_codec_ctx_t,
pub(crate) iter: aom_codec_iter_t,
}
unsafe impl Send for AV1Encoder {} // TODO: Make sure it cannot be abused
impl AV1Encoder {
/// Create a new encoder using the provided configuration
///
/// You may use `get_encoder` instead.
pub fn new(cfg: &mut AV1EncoderConfig) -> Result<AV1Encoder, aom_codec_err_t::Type> {
let mut ctx = MaybeUninit::uninit();
let ret = unsafe {
aom_codec_enc_init_ver(
ctx.as_mut_ptr(),
aom_codec_av1_cx(),
cfg.cfg(),
0,
AOM_ENCODER_ABI_VERSION as i32,
)
};
match ret {
aom_codec_err_t::AOM_CODEC_OK => {
let ctx = unsafe { ctx.assume_init() };
let mut enc = AV1Encoder {
ctx,
iter: ptr::null(),
};
// Apparently aom 2.0 would crash if a CPUUSED is not set explicitly.
enc.control(aome_enc_control_id::AOME_SET_CPUUSED, 2)
.expect("Cannot set CPUUSED");
Ok(enc)
}
_ => Err(ret),
}
}
/// Update the encoder parameters after-creation
///
/// It calls `aom_codec_control_`
pub fn control(
&mut self,
id: aome_enc_control_id::Type,
val: i32,
) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_control(&mut self.ctx, id as i32, val) };
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
// TODO: Cache the image information
//
/// Send an uncompressed frame to the encoder
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode`.
///
/// [`get_packet`]: #method.get_packet
pub fn encode(&mut self, frame: &Frame) -> Result<(), aom_codec_err_t::Type> {
let img = img_from_frame(frame);
let ret = unsafe { aom_codec_encode(&mut self.ctx, &img, frame.t.pts.unwrap(), 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Notify the encoder that no more data will be sent
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode` with NULL arguments.
///
/// [`get_packet`]: #method.get_packet
pub fn flush(&mut self) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_encode(&mut self.ctx, ptr::null_mut(), 0, 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Retrieve the compressed data
///
/// To be called until it returns `None`.
///
/// It calls `aom_codec_get_cx_data`.
pub fn get_packet(&mut self) -> Option<AOMPacket> {
let pkt = unsafe { aom_codec_get_cx_data(&mut self.ctx, &mut self.iter) };
if pkt.is_null() {
None
} else {
Some(AOMPacket::new(unsafe { *pkt }))
}
}
}
impl Drop for AV1Encoder {
fn drop(&mut self) {
unsafe { aom_codec_destroy(&mut self.ctx) };
}
}
impl AOMCodec for AV1Encoder {
fn get_context(&mut self) -> &mut aom_codec_ctx {
&mut self.ctx
}
}
#[cfg(feature = "codec-trait")]
mod encoder_trait {
use super::*;
use av_codec::encoder::*;
use av_codec::error::*;
use av_data::frame::ArcFrame;
use av_data::params::{CodecParams, MediaKind, VideoInfo};
use av_data::value::Value;
pub struct Des {
descr: Descr,
}
pub struct Enc {
cfg: AV1EncoderConfig,
enc: Option<AV1Encoder>,
}
impl Descriptor for Des {
type OutputEncoder = Enc;
fn create(&self) -> Self::OutputEncoder {
Enc {
cfg: AV1EncoderConfig::new().unwrap(),
enc: None,
}
}
fn describe(&self) -> &Descr {
&self.descr
}
}
impl Encoder for Enc {
fn configure(&mut self) -> Result<()> {
if self.enc.is_none() {
self.cfg
.get_encoder()
.map(|enc| {
self.enc = Some(enc);
})
.map_err(|_err| Error::ConfigurationIncomplete)
} else {
unimplemented!()
}
}
// TODO: have it as default impl?
fn get_extradata(&self) -> Option<Vec<u8>> {
None
}
fn send_frame(&mut self, frame: &ArcFrame) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.encode(frame).map_err(|_| unimplemented!())
}
fn receive_packet(&mut self) -> Result<Packet> {
let enc = self.enc.as_mut().unwrap();
if let Some(p) = enc.get_packet() {
match p {
AOMPacket::Packet(pkt) => Ok(pkt),
_ => unimplemented!(),
}
} else {
Err(Error::MoreDataNeeded)
}
}
fn flush(&mut self) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.flush().map_err(|_| unimplemented!())
}
fn set_option<'a>(&mut self, key: &str, val: Value<'a>) -> Result<()> {
match (key, val) {
("w", Value::U64(v)) => self.cfg.g_w = v as u32,
("h", Value::U64(v)) => self.cfg.g_h = v as u32,
("qmin", Value::U64(v)) => self.cfg.rc_min_quantizer = v as u32,
("qmax", Value::U64(v)) => self.cfg.rc_max_quantizer = v as u32,
("timebase", Value::Pair(num, den)) => {
self.cfg.g_timebase.num = num as i32;
self.cfg.g_timebase.den = den as i32;
}
_ => unimplemented!(),
}
Ok(())
}
fn get_params(&self) -> Result<CodecParams> {
use std::sync::Arc;
Ok(CodecParams {
kind: Some(MediaKind::Video(VideoInfo {
height: self.cfg.g_h as usize,
width: self.cfg.g_w as usize,
format: Some(Arc::new(*YUV420)), // TODO: support more formats
})),
codec_id: Some("av1".to_owned()),
extradata: None,
bit_rate: 0, // TODO: expose the information
convergence_window: 0,
delay: 0,
})
}
fn set_params(&mut self, params: &CodecParams) -> Result<()> {
if let Some(MediaKind::Video(ref info)) = params.kind {
self.cfg.g_w = info.width as u32;
self.cfg.g_h = info.height as u32;
}
Ok(())
}
}
/// AV1 Encoder
///
/// To be used with [av-codec](https://docs.rs/av-codec) `Encoder Context`.
pub const AV1_DESCR: &Des = &Des {
descr: Descr {
codec: "av1",
name: "aom",
desc: "libaom AV1 encoder",
mime: "video/AV1",
},
};
}
#[cfg(feature = "codec-trait")]
pub use self::encoder_trait::AV1_DESCR;
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn init() {
let mut c = AV1EncoderConfig::new().unwrap();
let mut e = c.get_encoder().unwrap();
println!("{}", e.error_to_str());
}
use av_data::rational::*;
use av_data::timeinfo::TimeInfo;
pub fn setup(w: u32, h: u32, t: &TimeInfo) -> AV1Encoder {
if (w % 2) != 0 || (h % 2) != 0 {
panic!("Invalid frame size: w: {} h: {}", w, h);
}
let mut cfg = AV1EncoderConfig::new()
.unwrap()
.width(w)
.height(h)
.timebase(t.timebase.unwrap())
.rc_min_quantizer(0)
.rc_min_quantizer(0)
.threads(4)
.pass(aom_enc_pass::AOM_RC_ONE_PASS)
.rc_end_usage(aom_rc_mode::AOM_CQ);
let mut enc = cfg.get_encoder().unwrap();
enc.control(aome_enc_control_id::AOME_SET_CQ_LEVEL, 4)
.unwrap();
enc.control(aome_enc_control_id::AOME_SET_CPUUSED, 2)
.unwrap();
enc
}
pub fn setup_frame(w: u32, h: u32, t: &TimeInfo) -> Frame {
use av_data::frame::*;
use av_data::pixel::formats;
use std::sync::Arc;
let v = VideoInfo::new(
w as usize,
h as usize,
false,
FrameType::OTHER,
Arc::new(*formats::YUV420),
);
Frame::new_default_frame(v, Some(t.clone()))
}
#[test]
fn encode() {
let w = 200;
let h = 200;
let t = TimeInfo {
pts: Some(0),
dts: Some(0),
duration: Some(1),
timebase: Some(Rational64::new(1, 1000)),
user_private: None,
}; |
let mut e = setup(w, h, &t);
let mut f = setup_frame(w, h, &t);
let mut out = 0;
// TODO write some pattern
for i in 0..100 {
e.encode(&f).unwrap();
f.t.pts = Some(i);
// println!("{:#?}", f);
loop {
let p = e.get_packet();
if p.is_none() {
break;
} else {
out = 1;
// println!("{:#?}", p.unwrap());
}
}
}
if out != 1 {
panic!("No packet produced");
}
}
#[cfg(all(test, feature = "codec-trait"))]
#[test]
fn encode_codec_trait() {
use super::AV1_DESCR;
use av_codec::common::CodecList;
use av_codec::encoder::*;
use av_codec::error::*;
use std::sync::Arc;
let encoders = Codecs::from_list(&[AV1_DESCR]);
let mut ctx = Context::by_name(&encoders, "av1").unwrap();
let w = 200;
let h = 200;
ctx.set_option("w", u64::from(w)).unwrap();
ctx.set_option("h", u64::from(h)).unwrap();
ctx.set_option("timebase", (1, 1000)).unwrap();
ctx.set_option("qmin", 0u64).unwrap();
ctx.set_option("qmax", 0u64).unwrap();
let t = TimeInfo {
pts: Some(0),
dts: Some(0),
duration: Some(1),
timebase: Some(Rational64::new(1, 1000)),
user_private: None,
};
ctx.configure().unwrap();
let mut f = Arc::new(setup_frame(w, h, &t));
let mut out = 0;
for i in 0..100 {
Arc::get_mut(&mut f).unwrap().t.pts = Some(i);
println!("Sending {}", i);
ctx.send_frame(&f).unwrap();
loop {
match ctx.receive_packet() {
Ok(p) => {
println!("{:#?}", p);
out = 1
}
Err(e) => match e {
Error::MoreDataNeeded => break,
_ => unimplemented!(),
},
}
}
}
ctx.flush().unwrap();
loop {
match ctx.receive_packet() {
Ok(p) => {
println!("{:#?}", p);
out = 1
}
Err(e) => match e {
Error::MoreDataNeeded => break,
_ => unimplemented!(),
},
}
}
if out != 1 {
panic!("No packet produced");
}
}
} | random_line_split | |
example_scenes.py | from manimlib import *
import numpy as np
# To watch one of these scenes, run the following:
# manimgl example_scenes.py OpeningManimExample
# Use -s to skip to the end and just save the final frame
# Use -w to write the animation to a file
# Use -o to write it to a file and open it once done
# Use -n <number> to skip ahead to the n'th animation of a scene.
class OpeningManimExample(Scene):
def construct(self):
intro_words = Text("""
The original motivation for manim was to
better illustrate mathematical functions
as transformations.
""")
intro_words.to_edge(UP)
self.play(Write(intro_words))
self.wait(2)
# Linear transform
grid = NumberPlane((-10, 10), (-5, 5))
matrix = [[1, 1], [0, 1]]
linear_transform_words = VGroup(
Text("This is what the matrix"),
IntegerMatrix(matrix, include_background_rectangle=True),
Text("looks like")
)
linear_transform_words.arrange(RIGHT)
linear_transform_words.to_edge(UP)
linear_transform_words.set_backstroke(width=5)
self.play(
ShowCreation(grid),
FadeTransform(intro_words, linear_transform_words)
)
self.wait()
self.play(grid.animate.apply_matrix(matrix), run_time=3)
self.wait()
# Complex map
c_grid = ComplexPlane()
moving_c_grid = c_grid.copy()
moving_c_grid.prepare_for_nonlinear_transform()
c_grid.set_stroke(BLUE_E, 1)
c_grid.add_coordinate_labels(font_size=24)
complex_map_words = TexText("""
Or thinking of the plane as $\\mathds{C}$,\\\\
this is the map $z \\rightarrow z^2$
""")
complex_map_words.to_corner(UR)
complex_map_words.set_backstroke(width=5)
self.play(
FadeOut(grid),
Write(c_grid, run_time=3),
FadeIn(moving_c_grid),
FadeTransform(linear_transform_words, complex_map_words),
)
self.wait()
self.play(
moving_c_grid.animate.apply_complex_function(lambda z: z**2),
run_time=6,
)
self.wait(2)
class AnimatingMethods(Scene):
def construct(self):
grid = Tex(R"\pi").get_grid(10, 10, height=4)
self.add(grid)
# You can animate the application of mobject methods with the
# ".animate" syntax:
self.play(grid.animate.shift(LEFT))
# Both of those will interpolate between the mobject's initial
# state and whatever happens when you apply that method.
# For this example, calling grid.shift(LEFT) would shift the
# grid one unit to the left, but both of the previous calls to
# "self.play" animate that motion.
# The same applies for any method, including those setting colors.
self.play(grid.animate.set_color(YELLOW))
self.wait()
self.play(grid.animate.set_submobject_colors_by_gradient(BLUE, GREEN))
self.wait()
self.play(grid.animate.set_height(TAU - MED_SMALL_BUFF))
self.wait()
# The method Mobject.apply_complex_function lets you apply arbitrary
# complex functions, treating the points defining the mobject as
# complex numbers.
self.play(grid.animate.apply_complex_function(np.exp), run_time=5)
self.wait()
# Even more generally, you could apply Mobject.apply_function,
# which takes in functions form R^3 to R^3
self.play(
grid.animate.apply_function(
lambda p: [
p[0] + 0.5 * math.sin(p[1]),
p[1] + 0.5 * math.sin(p[0]),
p[2]
]
),
run_time=5,
)
self.wait()
class TextExample(Scene):
def construct(self):
# To run this scene properly, you should have "Consolas" font in your computer
# for full usage, you can see https://github.com/3b1b/manim/pull/680
text = Text("Here is a text", font="Consolas", font_size=90)
difference = Text(
"""
The most important difference between Text and TexText is that\n
you can change the font more easily, but can't use the LaTeX grammar
""",
font="Arial", font_size=24,
# t2c is a dict that you can choose color for different text
t2c={"Text": BLUE, "TexText": BLUE, "LaTeX": ORANGE}
)
VGroup(text, difference).arrange(DOWN, buff=1)
self.play(Write(text))
self.play(FadeIn(difference, UP))
self.wait(3)
fonts = Text(
"And you can also set the font according to different words",
font="Arial",
t2f={"font": "Consolas", "words": "Consolas"},
t2c={"font": BLUE, "words": GREEN}
)
fonts.set_width(FRAME_WIDTH - 1)
slant = Text(
"And the same as slant and weight",
font="Consolas",
t2s={"slant": ITALIC},
t2w={"weight": BOLD},
t2c={"slant": ORANGE, "weight": RED}
)
VGroup(fonts, slant).arrange(DOWN, buff=0.8)
self.play(FadeOut(text), FadeOut(difference, shift=DOWN))
self.play(Write(fonts))
self.wait()
self.play(Write(slant))
self.wait()
class TexTransformExample(Scene):
def construct(self):
# Tex to color map
t2c = {
"A": BLUE,
"B": TEAL,
"C": GREEN,
}
# Configuration to pass along to each Tex mobject
kw = dict(font_size=72, t2c=t2c)
lines = VGroup(
Tex("A^2 + B^2 = C^2", **kw),
Tex("A^2 = C^2 - B^2", **kw),
Tex("A^2 = (C + B)(C - B)", **kw),
Tex(R"A = \sqrt{(C + B)(C - B)}", **kw),
)
lines.arrange(DOWN, buff=LARGE_BUFF)
self.add(lines[0])
# The animation TransformMatchingStrings will line up parts
# of the source and target which have matching substring strings.
# Here, giving it a little path_arc makes each part rotate into
# their final positions, which feels appropriate for the idea of
# rearranging an equation
self.play(
TransformMatchingStrings(
lines[0].copy(), lines[1],
# matched_keys specifies which substring should
# line up. If it's not specified, the animation
# will align the longest matching substrings.
# In this case, the substring "^2 = C^2" would
# trip it up
matched_keys=["A^2", "B^2", "C^2"],
# When you want a substring from the source
# to go to a non-equal substring from the target,
# use the key map.
key_map={"+": "-"},
path_arc=90 * DEGREES,
),
)
self.wait()
self.play(TransformMatchingStrings(
lines[1].copy(), lines[2],
matched_keys=["A^2"]
))
self.wait()
self.play(
TransformMatchingStrings(
lines[2].copy(), lines[3],
key_map={"2": R"\sqrt"},
path_arc=-30 * DEGREES,
),
)
self.wait(2)
self.play(LaggedStartMap(FadeOut, lines, shift=2 * RIGHT))
# TransformMatchingShapes will try to line up all pieces of a
# source mobject with those of a target, regardless of the
# what Mobject type they are.
source = Text("the morse code", height=1)
target = Text("here come dots", height=1)
saved_source = source.copy()
self.play(Write(source))
self.wait()
kw = dict(run_time=3, path_arc=PI / 2)
self.play(TransformMatchingShapes(source, target, **kw))
self.wait()
self.play(TransformMatchingShapes(target, saved_source, **kw))
self.wait()
class TexIndexing(Scene):
def construct(self):
# You can index into Tex mobject (or other StringMobjects) by substrings
equation = Tex(R"e^{\pi i} = -1", font_size=144)
self.add(equation)
self.play(FlashAround(equation["e"]))
self.wait()
self.play(Indicate(equation[R"\pi"]))
self.wait()
self.play(TransformFromCopy(
equation[R"e^{\pi i}"].copy().set_opacity(0.5),
equation["-1"],
path_arc=-PI / 2,
run_time=3
))
self.play(FadeOut(equation))
# Or regular expressions
equation = Tex("A^2 + B^2 = C^2", font_size=144)
self.play(Write(equation))
for part in equation[re.compile(r"\w\^2")]:
self.play(FlashAround(part))
self.wait()
self.play(FadeOut(equation))
# Indexing by substrings like this may not work when
# the order in which Latex draws symbols does not match
# the order in which they show up in the string.
# For example, here the infinity is drawn before the sigma
# so we don't get the desired behavior.
equation = Tex(R"\sum_{n = 1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}", font_size=72)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Doesn't hit the infinity
self.wait()
self.play(FadeOut(equation))
# However you can always fix this by explicitly passing in
# a string you might want to isolate later. Also, using
# \over instead of \frac helps to avoid the issue for fractions
equation = Tex(
R"\sum_{n = 1}^\infty {1 \over n^2} = {\pi^2 \over 6}",
# Explicitly mark "\infty" as a substring you might want to access
isolate=[R"\infty"],
font_size=72
)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Got it!
self.wait()
self.play(FadeOut(equation))
class UpdatersExample(Scene):
def construct(self):
square = Square()
square.set_fill(BLUE_E, 1)
# On all frames, the constructor Brace(square, UP) will
# be called, and the mobject brace will set its data to match
# that of the newly constructed object
brace = always_redraw(Brace, square, UP)
label = TexText("Width = 0.00")
number = label.make_number_changable("0.00")
# This ensures that the method deicmal.next_to(square)
# is called on every frame
always(label.next_to, brace, UP)
# You could also write the following equivalent line
# label.add_updater(lambda m: m.next_to(brace, UP))
# If the argument itself might change, you can use f_always,
# for which the arguments following the initial Mobject method
# should be functions returning arguments to that method.
# The following line ensures thst decimal.set_value(square.get_y())
# is called every frame
f_always(number.set_value, square.get_width)
# You could also write the following equivalent line
# number.add_updater(lambda m: m.set_value(square.get_width()))
self.add(square, brace, label)
# Notice that the brace and label track with the square
self.play(
square.animate.scale(2),
rate_func=there_and_back,
run_time=2,
)
self.wait()
self.play(
square.animate.set_width(5, stretch=True),
run_time=3,
)
self.wait()
self.play(
square.animate.set_width(2),
run_time=3
)
self.wait()
# In general, you can alway call Mobject.add_updater, and pass in
# a function that you want to be called on every frame. The function
# should take in either one argument, the mobject, or two arguments,
# the mobject and the amount of time since the last frame.
now = self.time
w0 = square.get_width()
square.add_updater(
lambda m: m.set_width(w0 * math.sin(self.time - now) + w0)
)
self.wait(4 * PI)
class CoordinateSystemExample(Scene):
def construct(self):
axes = Axes(
# x-axis ranges from -1 to 10, with a default step size of 1
x_range=(-1, 10),
# y-axis ranges from -2 to 2 with a step size of 0.5
y_range=(-2, 2, 0.5),
# The axes will be stretched so as to match the specified
# height and width
height=6,
width=10,
# Axes is made of two NumberLine mobjects. You can specify
# their configuration with axis_config
axis_config=dict(
stroke_color=GREY_A,
stroke_width=2,
numbers_to_exclude=[0],
),
# Alternatively, you can specify configuration for just one
# of them, like this.
y_axis_config=dict(
numbers_with_elongated_ticks=[-2, 2],
)
)
# Keyword arguments of add_coordinate_labels can be used to
# configure the DecimalNumber mobjects which it creates and
# adds to the axes
axes.add_coordinate_labels(
font_size=20,
num_decimal_places=1,
)
self.add(axes)
# Axes descends from the CoordinateSystem class, meaning
# you can call call axes.coords_to_point, abbreviated to
# axes.c2p, to associate a set of coordinates with a point,
# like so:
dot = Dot(color=RED)
dot.move_to(axes.c2p(0, 0))
self.play(FadeIn(dot, scale=0.5))
self.play(dot.animate.move_to(axes.c2p(3, 2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(5, 0.5)))
self.wait()
# Similarly, you can call axes.point_to_coords, or axes.p2c
# print(axes.p2c(dot.get_center()))
# We can draw lines from the axes to better mark the coordinates
# of a given point.
# Here, the always_redraw command means that on each new frame
# the lines will be redrawn
h_line = always_redraw(lambda: axes.get_h_line(dot.get_left()))
v_line = always_redraw(lambda: axes.get_v_line(dot.get_bottom()))
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
)
self.play(dot.animate.move_to(axes.c2p(3, -2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(1, 1)))
self.wait()
# If we tie the dot to a particular set of coordinates, notice
# that as we move the axes around it respects the coordinate
# system defined by them.
f_always(dot.move_to, lambda: axes.c2p(1, 1))
self.play(
axes.animate.scale(0.75).to_corner(UL),
run_time=2,
)
self.wait()
self.play(FadeOut(VGroup(axes, dot, h_line, v_line)))
# Other coordinate systems you can play around with include
# ThreeDAxes, NumberPlane, and ComplexPlane.
class GraphExample(Scene):
def construct(self):
axes = Axes((-3, 10), (-1, 8), height=6)
axes.add_coordinate_labels()
self.play(Write(axes, lag_ratio=0.01, run_time=1))
# Axes.get_graph will return the graph of a function
sin_graph = axes.get_graph(
lambda x: 2 * math.sin(x),
color=BLUE,
)
# By default, it draws it so as to somewhat smoothly interpolate
# between sampled points (x, f(x)). If the graph is meant to have
# a corner, though, you can set use_smoothing to False
relu_graph = axes.get_graph(
lambda x: max(x, 0),
use_smoothing=False,
color=YELLOW,
)
# For discontinuous functions, you can specify the point of
# discontinuity so that it does not try to draw over the gap.
step_graph = axes.get_graph(
lambda x: 2.0 if x > 3 else 1.0,
discontinuities=[3],
color=GREEN,
)
# Axes.get_graph_label takes in either a string or a mobject.
# If it's a string, it treats it as a LaTeX expression. By default
# it places the label next to the graph near the right side, and
# has it match the color of the graph
sin_label = axes.get_graph_label(sin_graph, "\\sin(x)")
relu_label = axes.get_graph_label(relu_graph, Text("ReLU"))
step_label = axes.get_graph_label(step_graph, Text("Step"), x=4)
self.play(
ShowCreation(sin_graph),
FadeIn(sin_label, RIGHT),
)
self.wait(2)
self.play(
ReplacementTransform(sin_graph, relu_graph),
FadeTransform(sin_label, relu_label),
)
self.wait()
self.play(
ReplacementTransform(relu_graph, step_graph),
FadeTransform(relu_label, step_label),
)
self.wait()
parabola = axes.get_graph(lambda x: 0.25 * x**2)
parabola.set_stroke(BLUE)
self.play(
FadeOut(step_graph),
FadeOut(step_label),
ShowCreation(parabola)
)
self.wait()
# You can use axes.input_to_graph_point, abbreviated
# to axes.i2gp, to find a particular point on a graph
dot = Dot(color=RED)
dot.move_to(axes.i2gp(2, parabola))
self.play(FadeIn(dot, scale=0.5))
# A value tracker lets us animate a parameter, usually
# with the intent of having other mobjects update based
# on the parameter
x_tracker = ValueTracker(2)
f_always(
dot.move_to,
lambda: axes.i2gp(x_tracker.get_value(), parabola)
)
self.play(x_tracker.animate.set_value(4), run_time=3)
self.play(x_tracker.animate.set_value(-2), run_time=3)
self.wait()
class TexAndNumbersExample(Scene):
def construct(self):
axes = Axes((-3, 3), (-3, 3), unit_size=1)
axes.to_edge(DOWN)
axes.add_coordinate_labels(font_size=16)
circle = Circle(radius=2)
circle.set_stroke(YELLOW, 3)
circle.move_to(axes.get_origin())
self.add(axes, circle)
# When numbers show up in tex, they can be readily
# replaced with DecimalMobjects so that methods like
# get_value and set_value can be called on them, and
# animations like ChangeDecimalToValue can be called
# on them.
tex = Tex("x^2 + y^2 = 4.00")
tex.next_to(axes, UP, buff=0.5)
value = tex.make_number_changable("4.00")
# This will tie the right hand side of our equation to
# the square of the radius of the circle
value.add_updater(lambda v: v.set_value(circle.get_radius()**2))
self.add(tex)
text = Text("""
You can manipulate numbers
in Tex mobjects
""", font_size=30)
text.next_to(tex, RIGHT, buff=1.5)
arrow = Arrow(text, tex)
self.add(text, arrow)
self.play(
circle.animate.set_height(2.0),
run_time=4,
rate_func=there_and_back,
)
# By default, tex.make_number_changable replaces the first occurance
# of the number,but by passing replace_all=True it replaces all and
# returns a group of the results
exponents = tex.make_number_changable("2", replace_all=True)
self.play(
LaggedStartMap(
FlashAround, exponents,
lag_ratio=0.2, buff=0.1, color=RED
),
exponents.animate.set_color(RED)
)
def func(x, y):
# Switch from manim coords to axes coords
xa, ya = axes.point_to_coords(np.array([x, y, 0]))
return xa**4 + ya**4 - 4
new_curve = ImplicitFunction(func)
new_curve.match_style(circle)
circle.rotate(angle_of_vector(new_curve.get_start())) # Align
value.clear_updaters()
self.play(
*(ChangeDecimalToValue(exp, 4) for exp in exponents),
ReplacementTransform(circle.copy(), new_curve),
circle.animate.set_stroke(width=1, opacity=0.5),
)
class SurfaceExample(ThreeDScene):
def construct(self):
surface_text = Text("For 3d scenes, try using surfaces")
surface_text.fix_in_frame()
surface_text.to_edge(UP)
self.add(surface_text)
self.wait(0.1)
torus1 = Torus(r1=1, r2=1)
torus2 = Torus(r1=3, r2=1)
sphere = Sphere(radius=3, resolution=torus1.resolution)
# You can texture a surface with up to two images, which will
# be interpreted as the side towards the light, and away from
# the light. These can be either urls, or paths to a local file
# in whatever you've set as the image directory in
# the custom_config.yml file
# day_texture = "EarthTextureMap"
# night_texture = "NightEarthTextureMap"
day_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Whole_world_-_land_and_oceans.jpg/1280px-Whole_world_-_land_and_oceans.jpg"
night_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/The_earth_at_night.jpg/1280px-The_earth_at_night.jpg"
surfaces = [
TexturedSurface(surface, day_texture, night_texture)
for surface in [sphere, torus1, torus2]
]
for mob in surfaces:
mob.shift(IN)
mob.mesh = SurfaceMesh(mob)
mob.mesh.set_stroke(BLUE, 1, opacity=0.5)
surface = surfaces[0]
self.play(
FadeIn(surface),
ShowCreation(surface.mesh, lag_ratio=0.01, run_time=3),
)
for mob in surfaces:
mob.add(mob.mesh)
surface.save_state()
self.play(Rotate(surface, PI / 2), run_time=2)
for mob in surfaces[1:]:
mob.rotate(PI / 2)
self.play(
Transform(surface, surfaces[1]),
run_time=3
)
self.play(
Transform(surface, surfaces[2]),
# Move camera frame during the transition
self.frame.animate.increment_phi(-10 * DEGREES),
self.frame.animate.increment_theta(-20 * DEGREES),
run_time=3
)
# Add ambient rotation
self.frame.add_updater(lambda m, dt: m.increment_theta(-0.1 * dt))
# Play around with where the light is
light_text = Text("You can move around the light source")
light_text.move_to(surface_text)
light_text.fix_in_frame()
self.play(FadeTransform(surface_text, light_text))
light = self.camera.light_source
self.add(light)
light.save_state()
self.play(light.animate.move_to(3 * IN), run_time=5)
self.play(light.animate.shift(10 * OUT), run_time=5)
drag_text = Text("Try moving the mouse while pressing d or f")
drag_text.move_to(light_text)
drag_text.fix_in_frame()
self.play(FadeTransform(light_text, drag_text))
self.wait()
class InteractiveDevelopment(Scene):
def construct(self):
circle = Circle()
circle.set_fill(BLUE, opacity=0.5)
circle.set_stroke(BLUE_E, width=4)
square = Square()
self.play(ShowCreation(square))
self.wait()
# This opens an iPython terminal where you can keep writing
# lines as if they were part of this construct method.
# In particular, 'square', 'circle' and 'self' will all be
# part of the local namespace in that terminal.
self.embed()
# Try copying and pasting some of the lines below into
# the interactive shell
self.play(ReplacementTransform(square, circle))
self.wait()
self.play(circle.animate.stretch(4, 0))
self.play(Rotate(circle, 90 * DEGREES))
self.play(circle.animate.shift(2 * RIGHT).scale(0.25))
text = Text("""
In general, using the interactive shell
is very helpful when developing new scenes
""")
self.play(Write(text))
# In the interactive shell, you can just type
# play, add, remove, clear, wait, save_state and restore,
# instead of self.play, self.add, self.remove, etc.
# To interact with the window, type touch(). You can then
# scroll in the window, or zoom by holding down 'z' while scrolling,
# and change camera perspective by holding down 'd' while moving
# the mouse. Press 'r' to reset to the standard camera position.
# Press 'q' to stop interacting with the window and go back to
# typing new commands into the shell.
# In principle you can customize a scene to be responsive to
# mouse and keyboard interactions
always(circle.move_to, self.mouse_point)
class ControlsExample(Scene):
drag_to_pan = False
| self.textbox = Textbox()
self.checkbox = Checkbox()
self.color_picker = ColorSliders()
self.panel = ControlPanel(
Text("Text", font_size=24), self.textbox, Line(),
Text("Show/Hide Text", font_size=24), self.checkbox, Line(),
Text("Color of Text", font_size=24), self.color_picker
)
self.add(self.panel)
def construct(self):
text = Text("text", font_size=96)
def text_updater(old_text):
assert(isinstance(old_text, Text))
new_text = Text(self.textbox.get_value(), font_size=old_text.font_size)
# new_text.align_data_and_family(old_text)
new_text.move_to(old_text)
if self.checkbox.get_value():
new_text.set_fill(
color=self.color_picker.get_picked_color(),
opacity=self.color_picker.get_picked_opacity()
)
else:
new_text.set_opacity(0)
old_text.become(new_text)
text.add_updater(text_updater)
self.add(MotionMobject(text))
self.textbox.set_value("Manim")
# self.wait(60)
# self.embed()
# See https://github.com/3b1b/videos for many, many more | def setup(self): | random_line_split |
example_scenes.py | from manimlib import *
import numpy as np
# To watch one of these scenes, run the following:
# manimgl example_scenes.py OpeningManimExample
# Use -s to skip to the end and just save the final frame
# Use -w to write the animation to a file
# Use -o to write it to a file and open it once done
# Use -n <number> to skip ahead to the n'th animation of a scene.
class OpeningManimExample(Scene):
def construct(self):
intro_words = Text("""
The original motivation for manim was to
better illustrate mathematical functions
as transformations.
""")
intro_words.to_edge(UP)
self.play(Write(intro_words))
self.wait(2)
# Linear transform
grid = NumberPlane((-10, 10), (-5, 5))
matrix = [[1, 1], [0, 1]]
linear_transform_words = VGroup(
Text("This is what the matrix"),
IntegerMatrix(matrix, include_background_rectangle=True),
Text("looks like")
)
linear_transform_words.arrange(RIGHT)
linear_transform_words.to_edge(UP)
linear_transform_words.set_backstroke(width=5)
self.play(
ShowCreation(grid),
FadeTransform(intro_words, linear_transform_words)
)
self.wait()
self.play(grid.animate.apply_matrix(matrix), run_time=3)
self.wait()
# Complex map
c_grid = ComplexPlane()
moving_c_grid = c_grid.copy()
moving_c_grid.prepare_for_nonlinear_transform()
c_grid.set_stroke(BLUE_E, 1)
c_grid.add_coordinate_labels(font_size=24)
complex_map_words = TexText("""
Or thinking of the plane as $\\mathds{C}$,\\\\
this is the map $z \\rightarrow z^2$
""")
complex_map_words.to_corner(UR)
complex_map_words.set_backstroke(width=5)
self.play(
FadeOut(grid),
Write(c_grid, run_time=3),
FadeIn(moving_c_grid),
FadeTransform(linear_transform_words, complex_map_words),
)
self.wait()
self.play(
moving_c_grid.animate.apply_complex_function(lambda z: z**2),
run_time=6,
)
self.wait(2)
class AnimatingMethods(Scene):
def construct(self):
grid = Tex(R"\pi").get_grid(10, 10, height=4)
self.add(grid)
# You can animate the application of mobject methods with the
# ".animate" syntax:
self.play(grid.animate.shift(LEFT))
# Both of those will interpolate between the mobject's initial
# state and whatever happens when you apply that method.
# For this example, calling grid.shift(LEFT) would shift the
# grid one unit to the left, but both of the previous calls to
# "self.play" animate that motion.
# The same applies for any method, including those setting colors.
self.play(grid.animate.set_color(YELLOW))
self.wait()
self.play(grid.animate.set_submobject_colors_by_gradient(BLUE, GREEN))
self.wait()
self.play(grid.animate.set_height(TAU - MED_SMALL_BUFF))
self.wait()
# The method Mobject.apply_complex_function lets you apply arbitrary
# complex functions, treating the points defining the mobject as
# complex numbers.
self.play(grid.animate.apply_complex_function(np.exp), run_time=5)
self.wait()
# Even more generally, you could apply Mobject.apply_function,
# which takes in functions form R^3 to R^3
self.play(
grid.animate.apply_function(
lambda p: [
p[0] + 0.5 * math.sin(p[1]),
p[1] + 0.5 * math.sin(p[0]),
p[2]
]
),
run_time=5,
)
self.wait()
class TextExample(Scene):
def construct(self):
# To run this scene properly, you should have "Consolas" font in your computer
# for full usage, you can see https://github.com/3b1b/manim/pull/680
|
class TexTransformExample(Scene):
def construct(self):
# Tex to color map
t2c = {
"A": BLUE,
"B": TEAL,
"C": GREEN,
}
# Configuration to pass along to each Tex mobject
kw = dict(font_size=72, t2c=t2c)
lines = VGroup(
Tex("A^2 + B^2 = C^2", **kw),
Tex("A^2 = C^2 - B^2", **kw),
Tex("A^2 = (C + B)(C - B)", **kw),
Tex(R"A = \sqrt{(C + B)(C - B)}", **kw),
)
lines.arrange(DOWN, buff=LARGE_BUFF)
self.add(lines[0])
# The animation TransformMatchingStrings will line up parts
# of the source and target which have matching substring strings.
# Here, giving it a little path_arc makes each part rotate into
# their final positions, which feels appropriate for the idea of
# rearranging an equation
self.play(
TransformMatchingStrings(
lines[0].copy(), lines[1],
# matched_keys specifies which substring should
# line up. If it's not specified, the animation
# will align the longest matching substrings.
# In this case, the substring "^2 = C^2" would
# trip it up
matched_keys=["A^2", "B^2", "C^2"],
# When you want a substring from the source
# to go to a non-equal substring from the target,
# use the key map.
key_map={"+": "-"},
path_arc=90 * DEGREES,
),
)
self.wait()
self.play(TransformMatchingStrings(
lines[1].copy(), lines[2],
matched_keys=["A^2"]
))
self.wait()
self.play(
TransformMatchingStrings(
lines[2].copy(), lines[3],
key_map={"2": R"\sqrt"},
path_arc=-30 * DEGREES,
),
)
self.wait(2)
self.play(LaggedStartMap(FadeOut, lines, shift=2 * RIGHT))
# TransformMatchingShapes will try to line up all pieces of a
# source mobject with those of a target, regardless of the
# what Mobject type they are.
source = Text("the morse code", height=1)
target = Text("here come dots", height=1)
saved_source = source.copy()
self.play(Write(source))
self.wait()
kw = dict(run_time=3, path_arc=PI / 2)
self.play(TransformMatchingShapes(source, target, **kw))
self.wait()
self.play(TransformMatchingShapes(target, saved_source, **kw))
self.wait()
class TexIndexing(Scene):
def construct(self):
# You can index into Tex mobject (or other StringMobjects) by substrings
equation = Tex(R"e^{\pi i} = -1", font_size=144)
self.add(equation)
self.play(FlashAround(equation["e"]))
self.wait()
self.play(Indicate(equation[R"\pi"]))
self.wait()
self.play(TransformFromCopy(
equation[R"e^{\pi i}"].copy().set_opacity(0.5),
equation["-1"],
path_arc=-PI / 2,
run_time=3
))
self.play(FadeOut(equation))
# Or regular expressions
equation = Tex("A^2 + B^2 = C^2", font_size=144)
self.play(Write(equation))
for part in equation[re.compile(r"\w\^2")]:
self.play(FlashAround(part))
self.wait()
self.play(FadeOut(equation))
# Indexing by substrings like this may not work when
# the order in which Latex draws symbols does not match
# the order in which they show up in the string.
# For example, here the infinity is drawn before the sigma
# so we don't get the desired behavior.
equation = Tex(R"\sum_{n = 1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}", font_size=72)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Doesn't hit the infinity
self.wait()
self.play(FadeOut(equation))
# However you can always fix this by explicitly passing in
# a string you might want to isolate later. Also, using
# \over instead of \frac helps to avoid the issue for fractions
equation = Tex(
R"\sum_{n = 1}^\infty {1 \over n^2} = {\pi^2 \over 6}",
# Explicitly mark "\infty" as a substring you might want to access
isolate=[R"\infty"],
font_size=72
)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Got it!
self.wait()
self.play(FadeOut(equation))
class UpdatersExample(Scene):
def construct(self):
square = Square()
square.set_fill(BLUE_E, 1)
# On all frames, the constructor Brace(square, UP) will
# be called, and the mobject brace will set its data to match
# that of the newly constructed object
brace = always_redraw(Brace, square, UP)
label = TexText("Width = 0.00")
number = label.make_number_changable("0.00")
# This ensures that the method deicmal.next_to(square)
# is called on every frame
always(label.next_to, brace, UP)
# You could also write the following equivalent line
# label.add_updater(lambda m: m.next_to(brace, UP))
# If the argument itself might change, you can use f_always,
# for which the arguments following the initial Mobject method
# should be functions returning arguments to that method.
# The following line ensures thst decimal.set_value(square.get_y())
# is called every frame
f_always(number.set_value, square.get_width)
# You could also write the following equivalent line
# number.add_updater(lambda m: m.set_value(square.get_width()))
self.add(square, brace, label)
# Notice that the brace and label track with the square
self.play(
square.animate.scale(2),
rate_func=there_and_back,
run_time=2,
)
self.wait()
self.play(
square.animate.set_width(5, stretch=True),
run_time=3,
)
self.wait()
self.play(
square.animate.set_width(2),
run_time=3
)
self.wait()
# In general, you can alway call Mobject.add_updater, and pass in
# a function that you want to be called on every frame. The function
# should take in either one argument, the mobject, or two arguments,
# the mobject and the amount of time since the last frame.
now = self.time
w0 = square.get_width()
square.add_updater(
lambda m: m.set_width(w0 * math.sin(self.time - now) + w0)
)
self.wait(4 * PI)
class CoordinateSystemExample(Scene):
def construct(self):
axes = Axes(
# x-axis ranges from -1 to 10, with a default step size of 1
x_range=(-1, 10),
# y-axis ranges from -2 to 2 with a step size of 0.5
y_range=(-2, 2, 0.5),
# The axes will be stretched so as to match the specified
# height and width
height=6,
width=10,
# Axes is made of two NumberLine mobjects. You can specify
# their configuration with axis_config
axis_config=dict(
stroke_color=GREY_A,
stroke_width=2,
numbers_to_exclude=[0],
),
# Alternatively, you can specify configuration for just one
# of them, like this.
y_axis_config=dict(
numbers_with_elongated_ticks=[-2, 2],
)
)
# Keyword arguments of add_coordinate_labels can be used to
# configure the DecimalNumber mobjects which it creates and
# adds to the axes
axes.add_coordinate_labels(
font_size=20,
num_decimal_places=1,
)
self.add(axes)
# Axes descends from the CoordinateSystem class, meaning
# you can call call axes.coords_to_point, abbreviated to
# axes.c2p, to associate a set of coordinates with a point,
# like so:
dot = Dot(color=RED)
dot.move_to(axes.c2p(0, 0))
self.play(FadeIn(dot, scale=0.5))
self.play(dot.animate.move_to(axes.c2p(3, 2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(5, 0.5)))
self.wait()
# Similarly, you can call axes.point_to_coords, or axes.p2c
# print(axes.p2c(dot.get_center()))
# We can draw lines from the axes to better mark the coordinates
# of a given point.
# Here, the always_redraw command means that on each new frame
# the lines will be redrawn
h_line = always_redraw(lambda: axes.get_h_line(dot.get_left()))
v_line = always_redraw(lambda: axes.get_v_line(dot.get_bottom()))
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
)
self.play(dot.animate.move_to(axes.c2p(3, -2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(1, 1)))
self.wait()
# If we tie the dot to a particular set of coordinates, notice
# that as we move the axes around it respects the coordinate
# system defined by them.
f_always(dot.move_to, lambda: axes.c2p(1, 1))
self.play(
axes.animate.scale(0.75).to_corner(UL),
run_time=2,
)
self.wait()
self.play(FadeOut(VGroup(axes, dot, h_line, v_line)))
# Other coordinate systems you can play around with include
# ThreeDAxes, NumberPlane, and ComplexPlane.
class GraphExample(Scene):
def construct(self):
axes = Axes((-3, 10), (-1, 8), height=6)
axes.add_coordinate_labels()
self.play(Write(axes, lag_ratio=0.01, run_time=1))
# Axes.get_graph will return the graph of a function
sin_graph = axes.get_graph(
lambda x: 2 * math.sin(x),
color=BLUE,
)
# By default, it draws it so as to somewhat smoothly interpolate
# between sampled points (x, f(x)). If the graph is meant to have
# a corner, though, you can set use_smoothing to False
relu_graph = axes.get_graph(
lambda x: max(x, 0),
use_smoothing=False,
color=YELLOW,
)
# For discontinuous functions, you can specify the point of
# discontinuity so that it does not try to draw over the gap.
step_graph = axes.get_graph(
lambda x: 2.0 if x > 3 else 1.0,
discontinuities=[3],
color=GREEN,
)
# Axes.get_graph_label takes in either a string or a mobject.
# If it's a string, it treats it as a LaTeX expression. By default
# it places the label next to the graph near the right side, and
# has it match the color of the graph
sin_label = axes.get_graph_label(sin_graph, "\\sin(x)")
relu_label = axes.get_graph_label(relu_graph, Text("ReLU"))
step_label = axes.get_graph_label(step_graph, Text("Step"), x=4)
self.play(
ShowCreation(sin_graph),
FadeIn(sin_label, RIGHT),
)
self.wait(2)
self.play(
ReplacementTransform(sin_graph, relu_graph),
FadeTransform(sin_label, relu_label),
)
self.wait()
self.play(
ReplacementTransform(relu_graph, step_graph),
FadeTransform(relu_label, step_label),
)
self.wait()
parabola = axes.get_graph(lambda x: 0.25 * x**2)
parabola.set_stroke(BLUE)
self.play(
FadeOut(step_graph),
FadeOut(step_label),
ShowCreation(parabola)
)
self.wait()
# You can use axes.input_to_graph_point, abbreviated
# to axes.i2gp, to find a particular point on a graph
dot = Dot(color=RED)
dot.move_to(axes.i2gp(2, parabola))
self.play(FadeIn(dot, scale=0.5))
# A value tracker lets us animate a parameter, usually
# with the intent of having other mobjects update based
# on the parameter
x_tracker = ValueTracker(2)
f_always(
dot.move_to,
lambda: axes.i2gp(x_tracker.get_value(), parabola)
)
self.play(x_tracker.animate.set_value(4), run_time=3)
self.play(x_tracker.animate.set_value(-2), run_time=3)
self.wait()
class TexAndNumbersExample(Scene):
def construct(self):
axes = Axes((-3, 3), (-3, 3), unit_size=1)
axes.to_edge(DOWN)
axes.add_coordinate_labels(font_size=16)
circle = Circle(radius=2)
circle.set_stroke(YELLOW, 3)
circle.move_to(axes.get_origin())
self.add(axes, circle)
# When numbers show up in tex, they can be readily
# replaced with DecimalMobjects so that methods like
# get_value and set_value can be called on them, and
# animations like ChangeDecimalToValue can be called
# on them.
tex = Tex("x^2 + y^2 = 4.00")
tex.next_to(axes, UP, buff=0.5)
value = tex.make_number_changable("4.00")
# This will tie the right hand side of our equation to
# the square of the radius of the circle
value.add_updater(lambda v: v.set_value(circle.get_radius()**2))
self.add(tex)
text = Text("""
You can manipulate numbers
in Tex mobjects
""", font_size=30)
text.next_to(tex, RIGHT, buff=1.5)
arrow = Arrow(text, tex)
self.add(text, arrow)
self.play(
circle.animate.set_height(2.0),
run_time=4,
rate_func=there_and_back,
)
# By default, tex.make_number_changable replaces the first occurance
# of the number,but by passing replace_all=True it replaces all and
# returns a group of the results
exponents = tex.make_number_changable("2", replace_all=True)
self.play(
LaggedStartMap(
FlashAround, exponents,
lag_ratio=0.2, buff=0.1, color=RED
),
exponents.animate.set_color(RED)
)
def func(x, y):
# Switch from manim coords to axes coords
xa, ya = axes.point_to_coords(np.array([x, y, 0]))
return xa**4 + ya**4 - 4
new_curve = ImplicitFunction(func)
new_curve.match_style(circle)
circle.rotate(angle_of_vector(new_curve.get_start())) # Align
value.clear_updaters()
self.play(
*(ChangeDecimalToValue(exp, 4) for exp in exponents),
ReplacementTransform(circle.copy(), new_curve),
circle.animate.set_stroke(width=1, opacity=0.5),
)
class SurfaceExample(ThreeDScene):
def construct(self):
surface_text = Text("For 3d scenes, try using surfaces")
surface_text.fix_in_frame()
surface_text.to_edge(UP)
self.add(surface_text)
self.wait(0.1)
torus1 = Torus(r1=1, r2=1)
torus2 = Torus(r1=3, r2=1)
sphere = Sphere(radius=3, resolution=torus1.resolution)
# You can texture a surface with up to two images, which will
# be interpreted as the side towards the light, and away from
# the light. These can be either urls, or paths to a local file
# in whatever you've set as the image directory in
# the custom_config.yml file
# day_texture = "EarthTextureMap"
# night_texture = "NightEarthTextureMap"
day_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Whole_world_-_land_and_oceans.jpg/1280px-Whole_world_-_land_and_oceans.jpg"
night_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/The_earth_at_night.jpg/1280px-The_earth_at_night.jpg"
surfaces = [
TexturedSurface(surface, day_texture, night_texture)
for surface in [sphere, torus1, torus2]
]
for mob in surfaces:
mob.shift(IN)
mob.mesh = SurfaceMesh(mob)
mob.mesh.set_stroke(BLUE, 1, opacity=0.5)
surface = surfaces[0]
self.play(
FadeIn(surface),
ShowCreation(surface.mesh, lag_ratio=0.01, run_time=3),
)
for mob in surfaces:
mob.add(mob.mesh)
surface.save_state()
self.play(Rotate(surface, PI / 2), run_time=2)
for mob in surfaces[1:]:
mob.rotate(PI / 2)
self.play(
Transform(surface, surfaces[1]),
run_time=3
)
self.play(
Transform(surface, surfaces[2]),
# Move camera frame during the transition
self.frame.animate.increment_phi(-10 * DEGREES),
self.frame.animate.increment_theta(-20 * DEGREES),
run_time=3
)
# Add ambient rotation
self.frame.add_updater(lambda m, dt: m.increment_theta(-0.1 * dt))
# Play around with where the light is
light_text = Text("You can move around the light source")
light_text.move_to(surface_text)
light_text.fix_in_frame()
self.play(FadeTransform(surface_text, light_text))
light = self.camera.light_source
self.add(light)
light.save_state()
self.play(light.animate.move_to(3 * IN), run_time=5)
self.play(light.animate.shift(10 * OUT), run_time=5)
drag_text = Text("Try moving the mouse while pressing d or f")
drag_text.move_to(light_text)
drag_text.fix_in_frame()
self.play(FadeTransform(light_text, drag_text))
self.wait()
class InteractiveDevelopment(Scene):
def construct(self):
circle = Circle()
circle.set_fill(BLUE, opacity=0.5)
circle.set_stroke(BLUE_E, width=4)
square = Square()
self.play(ShowCreation(square))
self.wait()
# This opens an iPython terminal where you can keep writing
# lines as if they were part of this construct method.
# In particular, 'square', 'circle' and 'self' will all be
# part of the local namespace in that terminal.
self.embed()
# Try copying and pasting some of the lines below into
# the interactive shell
self.play(ReplacementTransform(square, circle))
self.wait()
self.play(circle.animate.stretch(4, 0))
self.play(Rotate(circle, 90 * DEGREES))
self.play(circle.animate.shift(2 * RIGHT).scale(0.25))
text = Text("""
In general, using the interactive shell
is very helpful when developing new scenes
""")
self.play(Write(text))
# In the interactive shell, you can just type
# play, add, remove, clear, wait, save_state and restore,
# instead of self.play, self.add, self.remove, etc.
# To interact with the window, type touch(). You can then
# scroll in the window, or zoom by holding down 'z' while scrolling,
# and change camera perspective by holding down 'd' while moving
# the mouse. Press 'r' to reset to the standard camera position.
# Press 'q' to stop interacting with the window and go back to
# typing new commands into the shell.
# In principle you can customize a scene to be responsive to
# mouse and keyboard interactions
always(circle.move_to, self.mouse_point)
class ControlsExample(Scene):
drag_to_pan = False
def setup(self):
self.textbox = Textbox()
self.checkbox = Checkbox()
self.color_picker = ColorSliders()
self.panel = ControlPanel(
Text("Text", font_size=24), self.textbox, Line(),
Text("Show/Hide Text", font_size=24), self.checkbox, Line(),
Text("Color of Text", font_size=24), self.color_picker
)
self.add(self.panel)
def construct(self):
text = Text("text", font_size=96)
def text_updater(old_text):
assert(isinstance(old_text, Text))
new_text = Text(self.textbox.get_value(), font_size=old_text.font_size)
# new_text.align_data_and_family(old_text)
new_text.move_to(old_text)
if self.checkbox.get_value():
new_text.set_fill(
color=self.color_picker.get_picked_color(),
opacity=self.color_picker.get_picked_opacity()
)
else:
new_text.set_opacity(0)
old_text.become(new_text)
text.add_updater(text_updater)
self.add(MotionMobject(text))
self.textbox.set_value("Manim")
# self.wait(60)
# self.embed()
# See https://github.com/3b1b/videos for many, many more
| text = Text("Here is a text", font="Consolas", font_size=90)
difference = Text(
"""
The most important difference between Text and TexText is that\n
you can change the font more easily, but can't use the LaTeX grammar
""",
font="Arial", font_size=24,
# t2c is a dict that you can choose color for different text
t2c={"Text": BLUE, "TexText": BLUE, "LaTeX": ORANGE}
)
VGroup(text, difference).arrange(DOWN, buff=1)
self.play(Write(text))
self.play(FadeIn(difference, UP))
self.wait(3)
fonts = Text(
"And you can also set the font according to different words",
font="Arial",
t2f={"font": "Consolas", "words": "Consolas"},
t2c={"font": BLUE, "words": GREEN}
)
fonts.set_width(FRAME_WIDTH - 1)
slant = Text(
"And the same as slant and weight",
font="Consolas",
t2s={"slant": ITALIC},
t2w={"weight": BOLD},
t2c={"slant": ORANGE, "weight": RED}
)
VGroup(fonts, slant).arrange(DOWN, buff=0.8)
self.play(FadeOut(text), FadeOut(difference, shift=DOWN))
self.play(Write(fonts))
self.wait()
self.play(Write(slant))
self.wait() | identifier_body |
example_scenes.py | from manimlib import *
import numpy as np
# To watch one of these scenes, run the following:
# manimgl example_scenes.py OpeningManimExample
# Use -s to skip to the end and just save the final frame
# Use -w to write the animation to a file
# Use -o to write it to a file and open it once done
# Use -n <number> to skip ahead to the n'th animation of a scene.
class OpeningManimExample(Scene):
def construct(self):
intro_words = Text("""
The original motivation for manim was to
better illustrate mathematical functions
as transformations.
""")
intro_words.to_edge(UP)
self.play(Write(intro_words))
self.wait(2)
# Linear transform
grid = NumberPlane((-10, 10), (-5, 5))
matrix = [[1, 1], [0, 1]]
linear_transform_words = VGroup(
Text("This is what the matrix"),
IntegerMatrix(matrix, include_background_rectangle=True),
Text("looks like")
)
linear_transform_words.arrange(RIGHT)
linear_transform_words.to_edge(UP)
linear_transform_words.set_backstroke(width=5)
self.play(
ShowCreation(grid),
FadeTransform(intro_words, linear_transform_words)
)
self.wait()
self.play(grid.animate.apply_matrix(matrix), run_time=3)
self.wait()
# Complex map
c_grid = ComplexPlane()
moving_c_grid = c_grid.copy()
moving_c_grid.prepare_for_nonlinear_transform()
c_grid.set_stroke(BLUE_E, 1)
c_grid.add_coordinate_labels(font_size=24)
complex_map_words = TexText("""
Or thinking of the plane as $\\mathds{C}$,\\\\
this is the map $z \\rightarrow z^2$
""")
complex_map_words.to_corner(UR)
complex_map_words.set_backstroke(width=5)
self.play(
FadeOut(grid),
Write(c_grid, run_time=3),
FadeIn(moving_c_grid),
FadeTransform(linear_transform_words, complex_map_words),
)
self.wait()
self.play(
moving_c_grid.animate.apply_complex_function(lambda z: z**2),
run_time=6,
)
self.wait(2)
class AnimatingMethods(Scene):
def construct(self):
grid = Tex(R"\pi").get_grid(10, 10, height=4)
self.add(grid)
# You can animate the application of mobject methods with the
# ".animate" syntax:
self.play(grid.animate.shift(LEFT))
# Both of those will interpolate between the mobject's initial
# state and whatever happens when you apply that method.
# For this example, calling grid.shift(LEFT) would shift the
# grid one unit to the left, but both of the previous calls to
# "self.play" animate that motion.
# The same applies for any method, including those setting colors.
self.play(grid.animate.set_color(YELLOW))
self.wait()
self.play(grid.animate.set_submobject_colors_by_gradient(BLUE, GREEN))
self.wait()
self.play(grid.animate.set_height(TAU - MED_SMALL_BUFF))
self.wait()
# The method Mobject.apply_complex_function lets you apply arbitrary
# complex functions, treating the points defining the mobject as
# complex numbers.
self.play(grid.animate.apply_complex_function(np.exp), run_time=5)
self.wait()
# Even more generally, you could apply Mobject.apply_function,
# which takes in functions form R^3 to R^3
self.play(
grid.animate.apply_function(
lambda p: [
p[0] + 0.5 * math.sin(p[1]),
p[1] + 0.5 * math.sin(p[0]),
p[2]
]
),
run_time=5,
)
self.wait()
class TextExample(Scene):
def construct(self):
# To run this scene properly, you should have "Consolas" font in your computer
# for full usage, you can see https://github.com/3b1b/manim/pull/680
text = Text("Here is a text", font="Consolas", font_size=90)
difference = Text(
"""
The most important difference between Text and TexText is that\n
you can change the font more easily, but can't use the LaTeX grammar
""",
font="Arial", font_size=24,
# t2c is a dict that you can choose color for different text
t2c={"Text": BLUE, "TexText": BLUE, "LaTeX": ORANGE}
)
VGroup(text, difference).arrange(DOWN, buff=1)
self.play(Write(text))
self.play(FadeIn(difference, UP))
self.wait(3)
fonts = Text(
"And you can also set the font according to different words",
font="Arial",
t2f={"font": "Consolas", "words": "Consolas"},
t2c={"font": BLUE, "words": GREEN}
)
fonts.set_width(FRAME_WIDTH - 1)
slant = Text(
"And the same as slant and weight",
font="Consolas",
t2s={"slant": ITALIC},
t2w={"weight": BOLD},
t2c={"slant": ORANGE, "weight": RED}
)
VGroup(fonts, slant).arrange(DOWN, buff=0.8)
self.play(FadeOut(text), FadeOut(difference, shift=DOWN))
self.play(Write(fonts))
self.wait()
self.play(Write(slant))
self.wait()
class TexTransformExample(Scene):
def construct(self):
# Tex to color map
t2c = {
"A": BLUE,
"B": TEAL,
"C": GREEN,
}
# Configuration to pass along to each Tex mobject
kw = dict(font_size=72, t2c=t2c)
lines = VGroup(
Tex("A^2 + B^2 = C^2", **kw),
Tex("A^2 = C^2 - B^2", **kw),
Tex("A^2 = (C + B)(C - B)", **kw),
Tex(R"A = \sqrt{(C + B)(C - B)}", **kw),
)
lines.arrange(DOWN, buff=LARGE_BUFF)
self.add(lines[0])
# The animation TransformMatchingStrings will line up parts
# of the source and target which have matching substring strings.
# Here, giving it a little path_arc makes each part rotate into
# their final positions, which feels appropriate for the idea of
# rearranging an equation
self.play(
TransformMatchingStrings(
lines[0].copy(), lines[1],
# matched_keys specifies which substring should
# line up. If it's not specified, the animation
# will align the longest matching substrings.
# In this case, the substring "^2 = C^2" would
# trip it up
matched_keys=["A^2", "B^2", "C^2"],
# When you want a substring from the source
# to go to a non-equal substring from the target,
# use the key map.
key_map={"+": "-"},
path_arc=90 * DEGREES,
),
)
self.wait()
self.play(TransformMatchingStrings(
lines[1].copy(), lines[2],
matched_keys=["A^2"]
))
self.wait()
self.play(
TransformMatchingStrings(
lines[2].copy(), lines[3],
key_map={"2": R"\sqrt"},
path_arc=-30 * DEGREES,
),
)
self.wait(2)
self.play(LaggedStartMap(FadeOut, lines, shift=2 * RIGHT))
# TransformMatchingShapes will try to line up all pieces of a
# source mobject with those of a target, regardless of the
# what Mobject type they are.
source = Text("the morse code", height=1)
target = Text("here come dots", height=1)
saved_source = source.copy()
self.play(Write(source))
self.wait()
kw = dict(run_time=3, path_arc=PI / 2)
self.play(TransformMatchingShapes(source, target, **kw))
self.wait()
self.play(TransformMatchingShapes(target, saved_source, **kw))
self.wait()
class TexIndexing(Scene):
def construct(self):
# You can index into Tex mobject (or other StringMobjects) by substrings
equation = Tex(R"e^{\pi i} = -1", font_size=144)
self.add(equation)
self.play(FlashAround(equation["e"]))
self.wait()
self.play(Indicate(equation[R"\pi"]))
self.wait()
self.play(TransformFromCopy(
equation[R"e^{\pi i}"].copy().set_opacity(0.5),
equation["-1"],
path_arc=-PI / 2,
run_time=3
))
self.play(FadeOut(equation))
# Or regular expressions
equation = Tex("A^2 + B^2 = C^2", font_size=144)
self.play(Write(equation))
for part in equation[re.compile(r"\w\^2")]:
self.play(FlashAround(part))
self.wait()
self.play(FadeOut(equation))
# Indexing by substrings like this may not work when
# the order in which Latex draws symbols does not match
# the order in which they show up in the string.
# For example, here the infinity is drawn before the sigma
# so we don't get the desired behavior.
equation = Tex(R"\sum_{n = 1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}", font_size=72)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Doesn't hit the infinity
self.wait()
self.play(FadeOut(equation))
# However you can always fix this by explicitly passing in
# a string you might want to isolate later. Also, using
# \over instead of \frac helps to avoid the issue for fractions
equation = Tex(
R"\sum_{n = 1}^\infty {1 \over n^2} = {\pi^2 \over 6}",
# Explicitly mark "\infty" as a substring you might want to access
isolate=[R"\infty"],
font_size=72
)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Got it!
self.wait()
self.play(FadeOut(equation))
class UpdatersExample(Scene):
def construct(self):
square = Square()
square.set_fill(BLUE_E, 1)
# On all frames, the constructor Brace(square, UP) will
# be called, and the mobject brace will set its data to match
# that of the newly constructed object
brace = always_redraw(Brace, square, UP)
label = TexText("Width = 0.00")
number = label.make_number_changable("0.00")
# This ensures that the method deicmal.next_to(square)
# is called on every frame
always(label.next_to, brace, UP)
# You could also write the following equivalent line
# label.add_updater(lambda m: m.next_to(brace, UP))
# If the argument itself might change, you can use f_always,
# for which the arguments following the initial Mobject method
# should be functions returning arguments to that method.
# The following line ensures thst decimal.set_value(square.get_y())
# is called every frame
f_always(number.set_value, square.get_width)
# You could also write the following equivalent line
# number.add_updater(lambda m: m.set_value(square.get_width()))
self.add(square, brace, label)
# Notice that the brace and label track with the square
self.play(
square.animate.scale(2),
rate_func=there_and_back,
run_time=2,
)
self.wait()
self.play(
square.animate.set_width(5, stretch=True),
run_time=3,
)
self.wait()
self.play(
square.animate.set_width(2),
run_time=3
)
self.wait()
# In general, you can alway call Mobject.add_updater, and pass in
# a function that you want to be called on every frame. The function
# should take in either one argument, the mobject, or two arguments,
# the mobject and the amount of time since the last frame.
now = self.time
w0 = square.get_width()
square.add_updater(
lambda m: m.set_width(w0 * math.sin(self.time - now) + w0)
)
self.wait(4 * PI)
class CoordinateSystemExample(Scene):
def construct(self):
axes = Axes(
# x-axis ranges from -1 to 10, with a default step size of 1
x_range=(-1, 10),
# y-axis ranges from -2 to 2 with a step size of 0.5
y_range=(-2, 2, 0.5),
# The axes will be stretched so as to match the specified
# height and width
height=6,
width=10,
# Axes is made of two NumberLine mobjects. You can specify
# their configuration with axis_config
axis_config=dict(
stroke_color=GREY_A,
stroke_width=2,
numbers_to_exclude=[0],
),
# Alternatively, you can specify configuration for just one
# of them, like this.
y_axis_config=dict(
numbers_with_elongated_ticks=[-2, 2],
)
)
# Keyword arguments of add_coordinate_labels can be used to
# configure the DecimalNumber mobjects which it creates and
# adds to the axes
axes.add_coordinate_labels(
font_size=20,
num_decimal_places=1,
)
self.add(axes)
# Axes descends from the CoordinateSystem class, meaning
# you can call call axes.coords_to_point, abbreviated to
# axes.c2p, to associate a set of coordinates with a point,
# like so:
dot = Dot(color=RED)
dot.move_to(axes.c2p(0, 0))
self.play(FadeIn(dot, scale=0.5))
self.play(dot.animate.move_to(axes.c2p(3, 2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(5, 0.5)))
self.wait()
# Similarly, you can call axes.point_to_coords, or axes.p2c
# print(axes.p2c(dot.get_center()))
# We can draw lines from the axes to better mark the coordinates
# of a given point.
# Here, the always_redraw command means that on each new frame
# the lines will be redrawn
h_line = always_redraw(lambda: axes.get_h_line(dot.get_left()))
v_line = always_redraw(lambda: axes.get_v_line(dot.get_bottom()))
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
)
self.play(dot.animate.move_to(axes.c2p(3, -2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(1, 1)))
self.wait()
# If we tie the dot to a particular set of coordinates, notice
# that as we move the axes around it respects the coordinate
# system defined by them.
f_always(dot.move_to, lambda: axes.c2p(1, 1))
self.play(
axes.animate.scale(0.75).to_corner(UL),
run_time=2,
)
self.wait()
self.play(FadeOut(VGroup(axes, dot, h_line, v_line)))
# Other coordinate systems you can play around with include
# ThreeDAxes, NumberPlane, and ComplexPlane.
class GraphExample(Scene):
def construct(self):
axes = Axes((-3, 10), (-1, 8), height=6)
axes.add_coordinate_labels()
self.play(Write(axes, lag_ratio=0.01, run_time=1))
# Axes.get_graph will return the graph of a function
sin_graph = axes.get_graph(
lambda x: 2 * math.sin(x),
color=BLUE,
)
# By default, it draws it so as to somewhat smoothly interpolate
# between sampled points (x, f(x)). If the graph is meant to have
# a corner, though, you can set use_smoothing to False
relu_graph = axes.get_graph(
lambda x: max(x, 0),
use_smoothing=False,
color=YELLOW,
)
# For discontinuous functions, you can specify the point of
# discontinuity so that it does not try to draw over the gap.
step_graph = axes.get_graph(
lambda x: 2.0 if x > 3 else 1.0,
discontinuities=[3],
color=GREEN,
)
# Axes.get_graph_label takes in either a string or a mobject.
# If it's a string, it treats it as a LaTeX expression. By default
# it places the label next to the graph near the right side, and
# has it match the color of the graph
sin_label = axes.get_graph_label(sin_graph, "\\sin(x)")
relu_label = axes.get_graph_label(relu_graph, Text("ReLU"))
step_label = axes.get_graph_label(step_graph, Text("Step"), x=4)
self.play(
ShowCreation(sin_graph),
FadeIn(sin_label, RIGHT),
)
self.wait(2)
self.play(
ReplacementTransform(sin_graph, relu_graph),
FadeTransform(sin_label, relu_label),
)
self.wait()
self.play(
ReplacementTransform(relu_graph, step_graph),
FadeTransform(relu_label, step_label),
)
self.wait()
parabola = axes.get_graph(lambda x: 0.25 * x**2)
parabola.set_stroke(BLUE)
self.play(
FadeOut(step_graph),
FadeOut(step_label),
ShowCreation(parabola)
)
self.wait()
# You can use axes.input_to_graph_point, abbreviated
# to axes.i2gp, to find a particular point on a graph
dot = Dot(color=RED)
dot.move_to(axes.i2gp(2, parabola))
self.play(FadeIn(dot, scale=0.5))
# A value tracker lets us animate a parameter, usually
# with the intent of having other mobjects update based
# on the parameter
x_tracker = ValueTracker(2)
f_always(
dot.move_to,
lambda: axes.i2gp(x_tracker.get_value(), parabola)
)
self.play(x_tracker.animate.set_value(4), run_time=3)
self.play(x_tracker.animate.set_value(-2), run_time=3)
self.wait()
class TexAndNumbersExample(Scene):
def construct(self):
axes = Axes((-3, 3), (-3, 3), unit_size=1)
axes.to_edge(DOWN)
axes.add_coordinate_labels(font_size=16)
circle = Circle(radius=2)
circle.set_stroke(YELLOW, 3)
circle.move_to(axes.get_origin())
self.add(axes, circle)
# When numbers show up in tex, they can be readily
# replaced with DecimalMobjects so that methods like
# get_value and set_value can be called on them, and
# animations like ChangeDecimalToValue can be called
# on them.
tex = Tex("x^2 + y^2 = 4.00")
tex.next_to(axes, UP, buff=0.5)
value = tex.make_number_changable("4.00")
# This will tie the right hand side of our equation to
# the square of the radius of the circle
value.add_updater(lambda v: v.set_value(circle.get_radius()**2))
self.add(tex)
text = Text("""
You can manipulate numbers
in Tex mobjects
""", font_size=30)
text.next_to(tex, RIGHT, buff=1.5)
arrow = Arrow(text, tex)
self.add(text, arrow)
self.play(
circle.animate.set_height(2.0),
run_time=4,
rate_func=there_and_back,
)
# By default, tex.make_number_changable replaces the first occurance
# of the number,but by passing replace_all=True it replaces all and
# returns a group of the results
exponents = tex.make_number_changable("2", replace_all=True)
self.play(
LaggedStartMap(
FlashAround, exponents,
lag_ratio=0.2, buff=0.1, color=RED
),
exponents.animate.set_color(RED)
)
def func(x, y):
# Switch from manim coords to axes coords
xa, ya = axes.point_to_coords(np.array([x, y, 0]))
return xa**4 + ya**4 - 4
new_curve = ImplicitFunction(func)
new_curve.match_style(circle)
circle.rotate(angle_of_vector(new_curve.get_start())) # Align
value.clear_updaters()
self.play(
*(ChangeDecimalToValue(exp, 4) for exp in exponents),
ReplacementTransform(circle.copy(), new_curve),
circle.animate.set_stroke(width=1, opacity=0.5),
)
class SurfaceExample(ThreeDScene):
def construct(self):
surface_text = Text("For 3d scenes, try using surfaces")
surface_text.fix_in_frame()
surface_text.to_edge(UP)
self.add(surface_text)
self.wait(0.1)
torus1 = Torus(r1=1, r2=1)
torus2 = Torus(r1=3, r2=1)
sphere = Sphere(radius=3, resolution=torus1.resolution)
# You can texture a surface with up to two images, which will
# be interpreted as the side towards the light, and away from
# the light. These can be either urls, or paths to a local file
# in whatever you've set as the image directory in
# the custom_config.yml file
# day_texture = "EarthTextureMap"
# night_texture = "NightEarthTextureMap"
day_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Whole_world_-_land_and_oceans.jpg/1280px-Whole_world_-_land_and_oceans.jpg"
night_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/The_earth_at_night.jpg/1280px-The_earth_at_night.jpg"
surfaces = [
TexturedSurface(surface, day_texture, night_texture)
for surface in [sphere, torus1, torus2]
]
for mob in surfaces:
mob.shift(IN)
mob.mesh = SurfaceMesh(mob)
mob.mesh.set_stroke(BLUE, 1, opacity=0.5)
surface = surfaces[0]
self.play(
FadeIn(surface),
ShowCreation(surface.mesh, lag_ratio=0.01, run_time=3),
)
for mob in surfaces:
mob.add(mob.mesh)
surface.save_state()
self.play(Rotate(surface, PI / 2), run_time=2)
for mob in surfaces[1:]:
mob.rotate(PI / 2)
self.play(
Transform(surface, surfaces[1]),
run_time=3
)
self.play(
Transform(surface, surfaces[2]),
# Move camera frame during the transition
self.frame.animate.increment_phi(-10 * DEGREES),
self.frame.animate.increment_theta(-20 * DEGREES),
run_time=3
)
# Add ambient rotation
self.frame.add_updater(lambda m, dt: m.increment_theta(-0.1 * dt))
# Play around with where the light is
light_text = Text("You can move around the light source")
light_text.move_to(surface_text)
light_text.fix_in_frame()
self.play(FadeTransform(surface_text, light_text))
light = self.camera.light_source
self.add(light)
light.save_state()
self.play(light.animate.move_to(3 * IN), run_time=5)
self.play(light.animate.shift(10 * OUT), run_time=5)
drag_text = Text("Try moving the mouse while pressing d or f")
drag_text.move_to(light_text)
drag_text.fix_in_frame()
self.play(FadeTransform(light_text, drag_text))
self.wait()
class InteractiveDevelopment(Scene):
def construct(self):
circle = Circle()
circle.set_fill(BLUE, opacity=0.5)
circle.set_stroke(BLUE_E, width=4)
square = Square()
self.play(ShowCreation(square))
self.wait()
# This opens an iPython terminal where you can keep writing
# lines as if they were part of this construct method.
# In particular, 'square', 'circle' and 'self' will all be
# part of the local namespace in that terminal.
self.embed()
# Try copying and pasting some of the lines below into
# the interactive shell
self.play(ReplacementTransform(square, circle))
self.wait()
self.play(circle.animate.stretch(4, 0))
self.play(Rotate(circle, 90 * DEGREES))
self.play(circle.animate.shift(2 * RIGHT).scale(0.25))
text = Text("""
In general, using the interactive shell
is very helpful when developing new scenes
""")
self.play(Write(text))
# In the interactive shell, you can just type
# play, add, remove, clear, wait, save_state and restore,
# instead of self.play, self.add, self.remove, etc.
# To interact with the window, type touch(). You can then
# scroll in the window, or zoom by holding down 'z' while scrolling,
# and change camera perspective by holding down 'd' while moving
# the mouse. Press 'r' to reset to the standard camera position.
# Press 'q' to stop interacting with the window and go back to
# typing new commands into the shell.
# In principle you can customize a scene to be responsive to
# mouse and keyboard interactions
always(circle.move_to, self.mouse_point)
class ControlsExample(Scene):
drag_to_pan = False
def setup(self):
self.textbox = Textbox()
self.checkbox = Checkbox()
self.color_picker = ColorSliders()
self.panel = ControlPanel(
Text("Text", font_size=24), self.textbox, Line(),
Text("Show/Hide Text", font_size=24), self.checkbox, Line(),
Text("Color of Text", font_size=24), self.color_picker
)
self.add(self.panel)
def construct(self):
text = Text("text", font_size=96)
def text_updater(old_text):
assert(isinstance(old_text, Text))
new_text = Text(self.textbox.get_value(), font_size=old_text.font_size)
# new_text.align_data_and_family(old_text)
new_text.move_to(old_text)
if self.checkbox.get_value():
|
else:
new_text.set_opacity(0)
old_text.become(new_text)
text.add_updater(text_updater)
self.add(MotionMobject(text))
self.textbox.set_value("Manim")
# self.wait(60)
# self.embed()
# See https://github.com/3b1b/videos for many, many more
| new_text.set_fill(
color=self.color_picker.get_picked_color(),
opacity=self.color_picker.get_picked_opacity()
) | conditional_block |
example_scenes.py | from manimlib import *
import numpy as np
# To watch one of these scenes, run the following:
# manimgl example_scenes.py OpeningManimExample
# Use -s to skip to the end and just save the final frame
# Use -w to write the animation to a file
# Use -o to write it to a file and open it once done
# Use -n <number> to skip ahead to the n'th animation of a scene.
class OpeningManimExample(Scene):
def construct(self):
intro_words = Text("""
The original motivation for manim was to
better illustrate mathematical functions
as transformations.
""")
intro_words.to_edge(UP)
self.play(Write(intro_words))
self.wait(2)
# Linear transform
grid = NumberPlane((-10, 10), (-5, 5))
matrix = [[1, 1], [0, 1]]
linear_transform_words = VGroup(
Text("This is what the matrix"),
IntegerMatrix(matrix, include_background_rectangle=True),
Text("looks like")
)
linear_transform_words.arrange(RIGHT)
linear_transform_words.to_edge(UP)
linear_transform_words.set_backstroke(width=5)
self.play(
ShowCreation(grid),
FadeTransform(intro_words, linear_transform_words)
)
self.wait()
self.play(grid.animate.apply_matrix(matrix), run_time=3)
self.wait()
# Complex map
c_grid = ComplexPlane()
moving_c_grid = c_grid.copy()
moving_c_grid.prepare_for_nonlinear_transform()
c_grid.set_stroke(BLUE_E, 1)
c_grid.add_coordinate_labels(font_size=24)
complex_map_words = TexText("""
Or thinking of the plane as $\\mathds{C}$,\\\\
this is the map $z \\rightarrow z^2$
""")
complex_map_words.to_corner(UR)
complex_map_words.set_backstroke(width=5)
self.play(
FadeOut(grid),
Write(c_grid, run_time=3),
FadeIn(moving_c_grid),
FadeTransform(linear_transform_words, complex_map_words),
)
self.wait()
self.play(
moving_c_grid.animate.apply_complex_function(lambda z: z**2),
run_time=6,
)
self.wait(2)
class AnimatingMethods(Scene):
def construct(self):
grid = Tex(R"\pi").get_grid(10, 10, height=4)
self.add(grid)
# You can animate the application of mobject methods with the
# ".animate" syntax:
self.play(grid.animate.shift(LEFT))
# Both of those will interpolate between the mobject's initial
# state and whatever happens when you apply that method.
# For this example, calling grid.shift(LEFT) would shift the
# grid one unit to the left, but both of the previous calls to
# "self.play" animate that motion.
# The same applies for any method, including those setting colors.
self.play(grid.animate.set_color(YELLOW))
self.wait()
self.play(grid.animate.set_submobject_colors_by_gradient(BLUE, GREEN))
self.wait()
self.play(grid.animate.set_height(TAU - MED_SMALL_BUFF))
self.wait()
# The method Mobject.apply_complex_function lets you apply arbitrary
# complex functions, treating the points defining the mobject as
# complex numbers.
self.play(grid.animate.apply_complex_function(np.exp), run_time=5)
self.wait()
# Even more generally, you could apply Mobject.apply_function,
# which takes in functions form R^3 to R^3
self.play(
grid.animate.apply_function(
lambda p: [
p[0] + 0.5 * math.sin(p[1]),
p[1] + 0.5 * math.sin(p[0]),
p[2]
]
),
run_time=5,
)
self.wait()
class TextExample(Scene):
def construct(self):
# To run this scene properly, you should have "Consolas" font in your computer
# for full usage, you can see https://github.com/3b1b/manim/pull/680
text = Text("Here is a text", font="Consolas", font_size=90)
difference = Text(
"""
The most important difference between Text and TexText is that\n
you can change the font more easily, but can't use the LaTeX grammar
""",
font="Arial", font_size=24,
# t2c is a dict that you can choose color for different text
t2c={"Text": BLUE, "TexText": BLUE, "LaTeX": ORANGE}
)
VGroup(text, difference).arrange(DOWN, buff=1)
self.play(Write(text))
self.play(FadeIn(difference, UP))
self.wait(3)
fonts = Text(
"And you can also set the font according to different words",
font="Arial",
t2f={"font": "Consolas", "words": "Consolas"},
t2c={"font": BLUE, "words": GREEN}
)
fonts.set_width(FRAME_WIDTH - 1)
slant = Text(
"And the same as slant and weight",
font="Consolas",
t2s={"slant": ITALIC},
t2w={"weight": BOLD},
t2c={"slant": ORANGE, "weight": RED}
)
VGroup(fonts, slant).arrange(DOWN, buff=0.8)
self.play(FadeOut(text), FadeOut(difference, shift=DOWN))
self.play(Write(fonts))
self.wait()
self.play(Write(slant))
self.wait()
class TexTransformExample(Scene):
def construct(self):
# Tex to color map
t2c = {
"A": BLUE,
"B": TEAL,
"C": GREEN,
}
# Configuration to pass along to each Tex mobject
kw = dict(font_size=72, t2c=t2c)
lines = VGroup(
Tex("A^2 + B^2 = C^2", **kw),
Tex("A^2 = C^2 - B^2", **kw),
Tex("A^2 = (C + B)(C - B)", **kw),
Tex(R"A = \sqrt{(C + B)(C - B)}", **kw),
)
lines.arrange(DOWN, buff=LARGE_BUFF)
self.add(lines[0])
# The animation TransformMatchingStrings will line up parts
# of the source and target which have matching substring strings.
# Here, giving it a little path_arc makes each part rotate into
# their final positions, which feels appropriate for the idea of
# rearranging an equation
self.play(
TransformMatchingStrings(
lines[0].copy(), lines[1],
# matched_keys specifies which substring should
# line up. If it's not specified, the animation
# will align the longest matching substrings.
# In this case, the substring "^2 = C^2" would
# trip it up
matched_keys=["A^2", "B^2", "C^2"],
# When you want a substring from the source
# to go to a non-equal substring from the target,
# use the key map.
key_map={"+": "-"},
path_arc=90 * DEGREES,
),
)
self.wait()
self.play(TransformMatchingStrings(
lines[1].copy(), lines[2],
matched_keys=["A^2"]
))
self.wait()
self.play(
TransformMatchingStrings(
lines[2].copy(), lines[3],
key_map={"2": R"\sqrt"},
path_arc=-30 * DEGREES,
),
)
self.wait(2)
self.play(LaggedStartMap(FadeOut, lines, shift=2 * RIGHT))
# TransformMatchingShapes will try to line up all pieces of a
# source mobject with those of a target, regardless of the
# what Mobject type they are.
source = Text("the morse code", height=1)
target = Text("here come dots", height=1)
saved_source = source.copy()
self.play(Write(source))
self.wait()
kw = dict(run_time=3, path_arc=PI / 2)
self.play(TransformMatchingShapes(source, target, **kw))
self.wait()
self.play(TransformMatchingShapes(target, saved_source, **kw))
self.wait()
class TexIndexing(Scene):
def construct(self):
# You can index into Tex mobject (or other StringMobjects) by substrings
equation = Tex(R"e^{\pi i} = -1", font_size=144)
self.add(equation)
self.play(FlashAround(equation["e"]))
self.wait()
self.play(Indicate(equation[R"\pi"]))
self.wait()
self.play(TransformFromCopy(
equation[R"e^{\pi i}"].copy().set_opacity(0.5),
equation["-1"],
path_arc=-PI / 2,
run_time=3
))
self.play(FadeOut(equation))
# Or regular expressions
equation = Tex("A^2 + B^2 = C^2", font_size=144)
self.play(Write(equation))
for part in equation[re.compile(r"\w\^2")]:
self.play(FlashAround(part))
self.wait()
self.play(FadeOut(equation))
# Indexing by substrings like this may not work when
# the order in which Latex draws symbols does not match
# the order in which they show up in the string.
# For example, here the infinity is drawn before the sigma
# so we don't get the desired behavior.
equation = Tex(R"\sum_{n = 1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}", font_size=72)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Doesn't hit the infinity
self.wait()
self.play(FadeOut(equation))
# However you can always fix this by explicitly passing in
# a string you might want to isolate later. Also, using
# \over instead of \frac helps to avoid the issue for fractions
equation = Tex(
R"\sum_{n = 1}^\infty {1 \over n^2} = {\pi^2 \over 6}",
# Explicitly mark "\infty" as a substring you might want to access
isolate=[R"\infty"],
font_size=72
)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Got it!
self.wait()
self.play(FadeOut(equation))
class UpdatersExample(Scene):
def construct(self):
square = Square()
square.set_fill(BLUE_E, 1)
# On all frames, the constructor Brace(square, UP) will
# be called, and the mobject brace will set its data to match
# that of the newly constructed object
brace = always_redraw(Brace, square, UP)
label = TexText("Width = 0.00")
number = label.make_number_changable("0.00")
# This ensures that the method deicmal.next_to(square)
# is called on every frame
always(label.next_to, brace, UP)
# You could also write the following equivalent line
# label.add_updater(lambda m: m.next_to(brace, UP))
# If the argument itself might change, you can use f_always,
# for which the arguments following the initial Mobject method
# should be functions returning arguments to that method.
# The following line ensures thst decimal.set_value(square.get_y())
# is called every frame
f_always(number.set_value, square.get_width)
# You could also write the following equivalent line
# number.add_updater(lambda m: m.set_value(square.get_width()))
self.add(square, brace, label)
# Notice that the brace and label track with the square
self.play(
square.animate.scale(2),
rate_func=there_and_back,
run_time=2,
)
self.wait()
self.play(
square.animate.set_width(5, stretch=True),
run_time=3,
)
self.wait()
self.play(
square.animate.set_width(2),
run_time=3
)
self.wait()
# In general, you can alway call Mobject.add_updater, and pass in
# a function that you want to be called on every frame. The function
# should take in either one argument, the mobject, or two arguments,
# the mobject and the amount of time since the last frame.
now = self.time
w0 = square.get_width()
square.add_updater(
lambda m: m.set_width(w0 * math.sin(self.time - now) + w0)
)
self.wait(4 * PI)
class CoordinateSystemExample(Scene):
def construct(self):
axes = Axes(
# x-axis ranges from -1 to 10, with a default step size of 1
x_range=(-1, 10),
# y-axis ranges from -2 to 2 with a step size of 0.5
y_range=(-2, 2, 0.5),
# The axes will be stretched so as to match the specified
# height and width
height=6,
width=10,
# Axes is made of two NumberLine mobjects. You can specify
# their configuration with axis_config
axis_config=dict(
stroke_color=GREY_A,
stroke_width=2,
numbers_to_exclude=[0],
),
# Alternatively, you can specify configuration for just one
# of them, like this.
y_axis_config=dict(
numbers_with_elongated_ticks=[-2, 2],
)
)
# Keyword arguments of add_coordinate_labels can be used to
# configure the DecimalNumber mobjects which it creates and
# adds to the axes
axes.add_coordinate_labels(
font_size=20,
num_decimal_places=1,
)
self.add(axes)
# Axes descends from the CoordinateSystem class, meaning
# you can call call axes.coords_to_point, abbreviated to
# axes.c2p, to associate a set of coordinates with a point,
# like so:
dot = Dot(color=RED)
dot.move_to(axes.c2p(0, 0))
self.play(FadeIn(dot, scale=0.5))
self.play(dot.animate.move_to(axes.c2p(3, 2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(5, 0.5)))
self.wait()
# Similarly, you can call axes.point_to_coords, or axes.p2c
# print(axes.p2c(dot.get_center()))
# We can draw lines from the axes to better mark the coordinates
# of a given point.
# Here, the always_redraw command means that on each new frame
# the lines will be redrawn
h_line = always_redraw(lambda: axes.get_h_line(dot.get_left()))
v_line = always_redraw(lambda: axes.get_v_line(dot.get_bottom()))
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
)
self.play(dot.animate.move_to(axes.c2p(3, -2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(1, 1)))
self.wait()
# If we tie the dot to a particular set of coordinates, notice
# that as we move the axes around it respects the coordinate
# system defined by them.
f_always(dot.move_to, lambda: axes.c2p(1, 1))
self.play(
axes.animate.scale(0.75).to_corner(UL),
run_time=2,
)
self.wait()
self.play(FadeOut(VGroup(axes, dot, h_line, v_line)))
# Other coordinate systems you can play around with include
# ThreeDAxes, NumberPlane, and ComplexPlane.
class GraphExample(Scene):
def construct(self):
axes = Axes((-3, 10), (-1, 8), height=6)
axes.add_coordinate_labels()
self.play(Write(axes, lag_ratio=0.01, run_time=1))
# Axes.get_graph will return the graph of a function
sin_graph = axes.get_graph(
lambda x: 2 * math.sin(x),
color=BLUE,
)
# By default, it draws it so as to somewhat smoothly interpolate
# between sampled points (x, f(x)). If the graph is meant to have
# a corner, though, you can set use_smoothing to False
relu_graph = axes.get_graph(
lambda x: max(x, 0),
use_smoothing=False,
color=YELLOW,
)
# For discontinuous functions, you can specify the point of
# discontinuity so that it does not try to draw over the gap.
step_graph = axes.get_graph(
lambda x: 2.0 if x > 3 else 1.0,
discontinuities=[3],
color=GREEN,
)
# Axes.get_graph_label takes in either a string or a mobject.
# If it's a string, it treats it as a LaTeX expression. By default
# it places the label next to the graph near the right side, and
# has it match the color of the graph
sin_label = axes.get_graph_label(sin_graph, "\\sin(x)")
relu_label = axes.get_graph_label(relu_graph, Text("ReLU"))
step_label = axes.get_graph_label(step_graph, Text("Step"), x=4)
self.play(
ShowCreation(sin_graph),
FadeIn(sin_label, RIGHT),
)
self.wait(2)
self.play(
ReplacementTransform(sin_graph, relu_graph),
FadeTransform(sin_label, relu_label),
)
self.wait()
self.play(
ReplacementTransform(relu_graph, step_graph),
FadeTransform(relu_label, step_label),
)
self.wait()
parabola = axes.get_graph(lambda x: 0.25 * x**2)
parabola.set_stroke(BLUE)
self.play(
FadeOut(step_graph),
FadeOut(step_label),
ShowCreation(parabola)
)
self.wait()
# You can use axes.input_to_graph_point, abbreviated
# to axes.i2gp, to find a particular point on a graph
dot = Dot(color=RED)
dot.move_to(axes.i2gp(2, parabola))
self.play(FadeIn(dot, scale=0.5))
# A value tracker lets us animate a parameter, usually
# with the intent of having other mobjects update based
# on the parameter
x_tracker = ValueTracker(2)
f_always(
dot.move_to,
lambda: axes.i2gp(x_tracker.get_value(), parabola)
)
self.play(x_tracker.animate.set_value(4), run_time=3)
self.play(x_tracker.animate.set_value(-2), run_time=3)
self.wait()
class TexAndNumbersExample(Scene):
def construct(self):
axes = Axes((-3, 3), (-3, 3), unit_size=1)
axes.to_edge(DOWN)
axes.add_coordinate_labels(font_size=16)
circle = Circle(radius=2)
circle.set_stroke(YELLOW, 3)
circle.move_to(axes.get_origin())
self.add(axes, circle)
# When numbers show up in tex, they can be readily
# replaced with DecimalMobjects so that methods like
# get_value and set_value can be called on them, and
# animations like ChangeDecimalToValue can be called
# on them.
tex = Tex("x^2 + y^2 = 4.00")
tex.next_to(axes, UP, buff=0.5)
value = tex.make_number_changable("4.00")
# This will tie the right hand side of our equation to
# the square of the radius of the circle
value.add_updater(lambda v: v.set_value(circle.get_radius()**2))
self.add(tex)
text = Text("""
You can manipulate numbers
in Tex mobjects
""", font_size=30)
text.next_to(tex, RIGHT, buff=1.5)
arrow = Arrow(text, tex)
self.add(text, arrow)
self.play(
circle.animate.set_height(2.0),
run_time=4,
rate_func=there_and_back,
)
# By default, tex.make_number_changable replaces the first occurance
# of the number,but by passing replace_all=True it replaces all and
# returns a group of the results
exponents = tex.make_number_changable("2", replace_all=True)
self.play(
LaggedStartMap(
FlashAround, exponents,
lag_ratio=0.2, buff=0.1, color=RED
),
exponents.animate.set_color(RED)
)
def | (x, y):
# Switch from manim coords to axes coords
xa, ya = axes.point_to_coords(np.array([x, y, 0]))
return xa**4 + ya**4 - 4
new_curve = ImplicitFunction(func)
new_curve.match_style(circle)
circle.rotate(angle_of_vector(new_curve.get_start())) # Align
value.clear_updaters()
self.play(
*(ChangeDecimalToValue(exp, 4) for exp in exponents),
ReplacementTransform(circle.copy(), new_curve),
circle.animate.set_stroke(width=1, opacity=0.5),
)
class SurfaceExample(ThreeDScene):
def construct(self):
surface_text = Text("For 3d scenes, try using surfaces")
surface_text.fix_in_frame()
surface_text.to_edge(UP)
self.add(surface_text)
self.wait(0.1)
torus1 = Torus(r1=1, r2=1)
torus2 = Torus(r1=3, r2=1)
sphere = Sphere(radius=3, resolution=torus1.resolution)
# You can texture a surface with up to two images, which will
# be interpreted as the side towards the light, and away from
# the light. These can be either urls, or paths to a local file
# in whatever you've set as the image directory in
# the custom_config.yml file
# day_texture = "EarthTextureMap"
# night_texture = "NightEarthTextureMap"
day_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Whole_world_-_land_and_oceans.jpg/1280px-Whole_world_-_land_and_oceans.jpg"
night_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/The_earth_at_night.jpg/1280px-The_earth_at_night.jpg"
surfaces = [
TexturedSurface(surface, day_texture, night_texture)
for surface in [sphere, torus1, torus2]
]
for mob in surfaces:
mob.shift(IN)
mob.mesh = SurfaceMesh(mob)
mob.mesh.set_stroke(BLUE, 1, opacity=0.5)
surface = surfaces[0]
self.play(
FadeIn(surface),
ShowCreation(surface.mesh, lag_ratio=0.01, run_time=3),
)
for mob in surfaces:
mob.add(mob.mesh)
surface.save_state()
self.play(Rotate(surface, PI / 2), run_time=2)
for mob in surfaces[1:]:
mob.rotate(PI / 2)
self.play(
Transform(surface, surfaces[1]),
run_time=3
)
self.play(
Transform(surface, surfaces[2]),
# Move camera frame during the transition
self.frame.animate.increment_phi(-10 * DEGREES),
self.frame.animate.increment_theta(-20 * DEGREES),
run_time=3
)
# Add ambient rotation
self.frame.add_updater(lambda m, dt: m.increment_theta(-0.1 * dt))
# Play around with where the light is
light_text = Text("You can move around the light source")
light_text.move_to(surface_text)
light_text.fix_in_frame()
self.play(FadeTransform(surface_text, light_text))
light = self.camera.light_source
self.add(light)
light.save_state()
self.play(light.animate.move_to(3 * IN), run_time=5)
self.play(light.animate.shift(10 * OUT), run_time=5)
drag_text = Text("Try moving the mouse while pressing d or f")
drag_text.move_to(light_text)
drag_text.fix_in_frame()
self.play(FadeTransform(light_text, drag_text))
self.wait()
class InteractiveDevelopment(Scene):
def construct(self):
circle = Circle()
circle.set_fill(BLUE, opacity=0.5)
circle.set_stroke(BLUE_E, width=4)
square = Square()
self.play(ShowCreation(square))
self.wait()
# This opens an iPython terminal where you can keep writing
# lines as if they were part of this construct method.
# In particular, 'square', 'circle' and 'self' will all be
# part of the local namespace in that terminal.
self.embed()
# Try copying and pasting some of the lines below into
# the interactive shell
self.play(ReplacementTransform(square, circle))
self.wait()
self.play(circle.animate.stretch(4, 0))
self.play(Rotate(circle, 90 * DEGREES))
self.play(circle.animate.shift(2 * RIGHT).scale(0.25))
text = Text("""
In general, using the interactive shell
is very helpful when developing new scenes
""")
self.play(Write(text))
# In the interactive shell, you can just type
# play, add, remove, clear, wait, save_state and restore,
# instead of self.play, self.add, self.remove, etc.
# To interact with the window, type touch(). You can then
# scroll in the window, or zoom by holding down 'z' while scrolling,
# and change camera perspective by holding down 'd' while moving
# the mouse. Press 'r' to reset to the standard camera position.
# Press 'q' to stop interacting with the window and go back to
# typing new commands into the shell.
# In principle you can customize a scene to be responsive to
# mouse and keyboard interactions
always(circle.move_to, self.mouse_point)
class ControlsExample(Scene):
drag_to_pan = False
def setup(self):
self.textbox = Textbox()
self.checkbox = Checkbox()
self.color_picker = ColorSliders()
self.panel = ControlPanel(
Text("Text", font_size=24), self.textbox, Line(),
Text("Show/Hide Text", font_size=24), self.checkbox, Line(),
Text("Color of Text", font_size=24), self.color_picker
)
self.add(self.panel)
def construct(self):
text = Text("text", font_size=96)
def text_updater(old_text):
assert(isinstance(old_text, Text))
new_text = Text(self.textbox.get_value(), font_size=old_text.font_size)
# new_text.align_data_and_family(old_text)
new_text.move_to(old_text)
if self.checkbox.get_value():
new_text.set_fill(
color=self.color_picker.get_picked_color(),
opacity=self.color_picker.get_picked_opacity()
)
else:
new_text.set_opacity(0)
old_text.become(new_text)
text.add_updater(text_updater)
self.add(MotionMobject(text))
self.textbox.set_value("Manim")
# self.wait(60)
# self.embed()
# See https://github.com/3b1b/videos for many, many more
| func | identifier_name |
Skycam.py | from __future__ import division
from math import sin, cos, acos, asin, degrees, pi
import serial
from time import sleep
import numpy as np
from scipy.interpolate import splprep, splev, splrep
class Skycam:
''' Represents entire 3-node and camera system. Contains methods to calculate
and initialize paths, control the camera, connect and send serial commands.
'''
def __init__(self, a, b, c, zB, zC, cam):
''' Intialize new Skycam with calculated node positions, camera position,
in path-controlled mode.
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
zB: (float) height of point B
zC: (float) height of point C
cam: (tuple of floats) initial position of camera
'''
self.node0, self.node1, self.node2 = self.calc_nodes(a, b, c, zB, zC)
self.cam = cam
self.direct = False
self.pause = False
self.save_point = 0
def calc_nodes(self, a, b, c, zB, zC):
''' Calculate the positions of Skycam nodes based on node distance measurements.
A is the origin, B is along the y-axis, C is the remaining point.
Sides are opposite their respective points:
a is BC, b is AC, c is AB
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
Returns:
(tuple of floats) coordinates of node0, node1, node2
'''
# Project lengths into xy plane
a_eff = ((zC-zB)**2 + c**2)**.5
b_eff = (zC**2 + b**2)**.5
c_eff = (zB**2 + c**2)**.5
# Law of cosines
numer = b_eff**2 + c_eff**2 - a_eff**2
denom = 2*b_eff*c_eff
Arad = acos(numer/denom)
# Law of sines
Brad = asin(b*sin(Arad)/a)
Crad = asin(c*sin(Arad)/a)
theta = .5*pi-Arad
return (0,0, 0), (0, c, zB), (b*cos(theta), b*sin(theta), zC)
def load_path(self, points):
''' Create a new path based on predetermined points.
Inputs:
points: (list of tuples of floats) specific camera positions for
any given time.
Returns:
Initializes new Path in Skycam's path attribute
'''
self.path = Path.new_path(points, self.node0, self.node1, self.node2)
def create_path(self, waypoints, steps):
''' Generate a new list of points based on waypoints.
Inputs:
waypoints: (list of tuples of floats): points the path should
bring the camera to
steps: (int) number of steps in which to complete the path
Returns:
Calls load_path method on list of generated spline points
'''
xpoints = [point[0] for point in waypoints]
ypoints = [point[1] for point in waypoints]
zpoints = [point[2] for point in waypoints]
# spline parameters
s = 2.0 # smoothness
k = 1 # spline order
nest = -1 # estimate of knots needed
# create spline and calculate length
s, us = splprep([xpoints, ypoints, zpoints], s=s, k=k, nest=nest)
totl = self.splineLen(s)
dl = totl/steps
if dl > 1:
print "dl greater than 1!"
i = 0
u = 0
upath = [u]
# Divide path into equidistant lengths
while i < steps-1:
u = self.binary_search(u, s, dl) # optionally pass tolerance
upath.append(u)
print i
i += 1
path = [splev(u, s) for u in upath]
path_lens = []
for i in xrange(len(path) - 1):
path_lens.append(distance(path[i], path[i+1]))
error = [ele - dl for ele in path_lens]
print 'Error is: ', sum(error)/len(error)
self.load_path(path)
# self.path = Path.new_path(path, self.node0, self.node1, self.node2)
def go_path(self, start):
'''Send appropriate movement commands for loaded path.
Input:
start: (int) index of path at which to begin sending commands
'''
#TODO: Implement save point
while (not self.direct and not self.pause):
for i in xrange(len(self.path.diffs0) - start):
self.send_command(self.path.diffs0[i + start], self.path.diffs1[i + start], self.path.diffs2[i + start])
# raw_input('')
self.save_point = i
break
# def pause_path(self):
# ''' Pause path traversal.'''
# self.pause = True
# def switch_mode(self):
# ''' Switch from path control to joystick control '''
# self.direct = not self.direct
# def go_input(self):
# ''' Translate a direct-control input into a directional vector and send appropriate commands '''
# pass
def connect(self, baud=57600):
''' Connect to proper serial ports for Bluetooth communication.
Inputs:
baud: (int) baud rate at which to connect
Returns:
Print confirmation of connection
'''
# Connect to proper serial ports
self.serA = serial.Serial('/dev/rfcomm0', baud, timeout=50)
self.serB = serial.Serial('/dev/rfcomm1', baud, timeout=50)
self.serC = serial.Serial('/dev/rfcomm2', baud, timeout=50)
print 'Hacking the mainframe...'
sleep(8)
print 'Mainframe hacked'
def send_command(self, diff0, diff1, diff2):
'''Send proper commands to all three serial ports.
Inputs:
diff0: (float) node length difference for node 0
diff1: (float) node length difference for node 1
diff2: (float) node length difference for node 2
'''
print diff0, diff1, diff2
self.serA.write(str(diff0) + 'g')
self.serB.write(str(diff1) + 'g')
self.serC.write(str(diff2) + 'g')
#TODO: Always mess around with this value
sleep(.35)
pass
# def dldp(self, nodePos, theta, phi):
# ''' use a directional vector and current position to calculate change in node length '''
# cam = self.cam
# deltaX = cam[0] - nodePos[0]
# deltaY = cam[1] - nodePos[1]
# deltaZ = cam[2] - nodePos[2]
# numer = deltaX*cos(theta)*cos(phi) + deltaY*sin(theta)*cos(phi) + deltaZ*sin(phi)
# denom = (deltaX**2 + deltaY**2 + deltaZ**2)**.5
# return numer/denom
def | (self, ustart, s, dl, tol=.01):
''' Perform a binary search to find parametrized location of point.
Inputs:
ustart: (float)
s: (spline object)
dl: (float)
tol: (float)
Returns:
Reassigns middle and endpoints of search
um: (float) midpoint of search
'''
point = splev(ustart, s)
ui = ustart
uf = 1
um = (ui + uf)/2
xm, ym, zm = splev(um, s)
xf, yf, zf = splev(uf, s)
while True:
tpoint = splev(um, s)
if distance(point, tpoint)>(dl*(1+tol)):
uf, um = um, (um+ui)/2
elif distance(point, tpoint)<(dl*(1-tol)):
ui, um = um, (um+uf)/2
else:
return um
def splineLen(self, s):
''' Calculate length of a spline.
Inputs:
s: (spline object) represents path that joins waypoints
Returns:
(float) length of spline
'''
ts = np.linspace(0, 1, 1000)
xs, ys, zs = splev(ts, s)
spline = zip(xs, ys, zs)
ipoint = spline[0]
totl = 0
for point in spline:
totl += distance(point, ipoint)
ipoint = point
return totl
def tighten(self):
''' Calibrate node lengths to current position of camera.
Enter ' ' to tighten
Enter 's' to accept node length
'''
while True:
input = raw_input('Tightening Node A')
if input == ' ':
self.serA.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node B')
if input == ' ':
self.serB.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node C')
if input == ' ':
self.serC.write('-100g')
elif input == 's':
return
class Path:
''' Path object stores a path's points, node lengths, and length differences
to enable in path traversal.
'''
def __init__(self, points, node0, node1, node2):
''' Init method for Path class.
Input:
points: (list of tuples of floats)
node0, 1, 2: (tuple of floats)
Returns:
Initializes Path attributes
'''
self.points = points
self.lens0 = [distance(node0, point) for point in points]
self.lens1 = [distance(node1, point) for point in points]
self.lens2 = [distance(node2, point) for point in points]
self.diffs0 = self.diff_calc(self.lens0)
self.diffs1 = self.diff_calc(self.lens1)
self.diffs2 = self.diff_calc(self.lens2)
@staticmethod
def new_path(points, node0, node1, node2):
''' Factory function to create new path object, if it exists within boundary.
Inputs:
points: (list of tuples of floats) points that make up a path
node0, 1, 2: (tuple of floats) coordinates of nodes
Returns:
(Path) new initialized Path object
'''
#Check if any point lies outside boundary
for point in points:
if Path.boundary(node0, node1, node2, point):
return None
return Path(points, node0, node1, node2)
@staticmethod
def boundary(node0, node1, node2, point, offset=6, hbound=120):
''' Check if any given point lies outside the boundaries of our system.
Inputs:
node0, 1, 2: (tuple of floats)
point: (tuple of floats)
offset: (float) offset distance from nodes to define boundary triangle
hbound: (float) lower bound of z y-axis
Returns:
(bool) Whether point is outside boundary, prints which
'''
# Find midpoint of each side
mid_AB = tuple((node0[i] + node1[i])/2 for i in xrange(3))
mid_BC = tuple((node1[i] + node2[i])/2 for i in xrange(3))
mid_AC = tuple((node2[i] + node0[i])/2 for i in xrange(3))
# Find slope of line connecting point to opposite midpoint
m_A = tuple((mid - node)/distance(mid_BC, node0) for (mid, node) in zip(mid_BC, node0))
m_B = tuple((mid - node)/distance(mid_AC, node1) for (mid, node) in zip(mid_AC, node1))
m_C = tuple((mid - node)/distance(mid_AB, node2) for (mid, node) in zip(mid_AB, node2))
# Find offset node coordinates
new_0 = tuple(coord + slope*offset for (coord, slope) in zip(node0, m_A))
new_1 = tuple(coord + slope*offset for (coord, slope) in zip(node1, m_B))
new_2 = tuple(coord + slope*offset for (coord, slope) in zip(node2, m_C))
if point[2] < 0 or point[2] > hbound:
print 'Height of path out of bounds', point[2]
return True
elif point[0] < 0:
print "Path out of bounds of line AB", point[0]
return True
elif point[1] < (new_2[1]*point[0]/new_2[0]):
print "Path out of bounds of line AC", point[1]
return True
elif point[1] > (((new_2[1] - new_1[1])/new_2[0])*point[0] + new_1[1]):
print "Path out of bounds of line BC", point[1]
return True
else:
return False
def diff_calc(self, lens):
''' Return differences between subsequent spool lengths x100 for sending.
Input:
lens: (list of floats) lengths of node wires at any time
Returns:
(list of floats) differences between subsequent lengths*100
'''
return [int(80*(lens[ind+1] - lens[ind])) for ind in xrange(len(lens)-1)]
def distance(A, B):
''' Calculate the distance between two points.
Inputs:
A: (tuple of floats/ints) first point
B: (tuple of floats/ints) second point
Returns:
(float) distance between the points
'''
dx = A[0] - B[0]
dy = A[1] - B[1]
dz = A[2] - B[2]
return (dx**2 + dy**2 + dz**2)**.5 | binary_search | identifier_name |
Skycam.py | from __future__ import division
from math import sin, cos, acos, asin, degrees, pi
import serial
from time import sleep
import numpy as np
from scipy.interpolate import splprep, splev, splrep
class Skycam:
''' Represents entire 3-node and camera system. Contains methods to calculate
and initialize paths, control the camera, connect and send serial commands.
'''
def __init__(self, a, b, c, zB, zC, cam):
''' Intialize new Skycam with calculated node positions, camera position,
in path-controlled mode.
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
zB: (float) height of point B
zC: (float) height of point C
cam: (tuple of floats) initial position of camera
'''
self.node0, self.node1, self.node2 = self.calc_nodes(a, b, c, zB, zC)
self.cam = cam
self.direct = False
self.pause = False
self.save_point = 0
def calc_nodes(self, a, b, c, zB, zC):
''' Calculate the positions of Skycam nodes based on node distance measurements.
A is the origin, B is along the y-axis, C is the remaining point.
Sides are opposite their respective points:
a is BC, b is AC, c is AB
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
Returns:
(tuple of floats) coordinates of node0, node1, node2
'''
# Project lengths into xy plane
a_eff = ((zC-zB)**2 + c**2)**.5
b_eff = (zC**2 + b**2)**.5
c_eff = (zB**2 + c**2)**.5
# Law of cosines
numer = b_eff**2 + c_eff**2 - a_eff**2
denom = 2*b_eff*c_eff
Arad = acos(numer/denom)
# Law of sines
Brad = asin(b*sin(Arad)/a)
Crad = asin(c*sin(Arad)/a)
theta = .5*pi-Arad
return (0,0, 0), (0, c, zB), (b*cos(theta), b*sin(theta), zC)
def load_path(self, points):
''' Create a new path based on predetermined points.
Inputs:
points: (list of tuples of floats) specific camera positions for
any given time.
Returns:
Initializes new Path in Skycam's path attribute
'''
self.path = Path.new_path(points, self.node0, self.node1, self.node2)
def create_path(self, waypoints, steps):
|
def go_path(self, start):
'''Send appropriate movement commands for loaded path.
Input:
start: (int) index of path at which to begin sending commands
'''
#TODO: Implement save point
while (not self.direct and not self.pause):
for i in xrange(len(self.path.diffs0) - start):
self.send_command(self.path.diffs0[i + start], self.path.diffs1[i + start], self.path.diffs2[i + start])
# raw_input('')
self.save_point = i
break
# def pause_path(self):
# ''' Pause path traversal.'''
# self.pause = True
# def switch_mode(self):
# ''' Switch from path control to joystick control '''
# self.direct = not self.direct
# def go_input(self):
# ''' Translate a direct-control input into a directional vector and send appropriate commands '''
# pass
def connect(self, baud=57600):
''' Connect to proper serial ports for Bluetooth communication.
Inputs:
baud: (int) baud rate at which to connect
Returns:
Print confirmation of connection
'''
# Connect to proper serial ports
self.serA = serial.Serial('/dev/rfcomm0', baud, timeout=50)
self.serB = serial.Serial('/dev/rfcomm1', baud, timeout=50)
self.serC = serial.Serial('/dev/rfcomm2', baud, timeout=50)
print 'Hacking the mainframe...'
sleep(8)
print 'Mainframe hacked'
def send_command(self, diff0, diff1, diff2):
'''Send proper commands to all three serial ports.
Inputs:
diff0: (float) node length difference for node 0
diff1: (float) node length difference for node 1
diff2: (float) node length difference for node 2
'''
print diff0, diff1, diff2
self.serA.write(str(diff0) + 'g')
self.serB.write(str(diff1) + 'g')
self.serC.write(str(diff2) + 'g')
#TODO: Always mess around with this value
sleep(.35)
pass
# def dldp(self, nodePos, theta, phi):
# ''' use a directional vector and current position to calculate change in node length '''
# cam = self.cam
# deltaX = cam[0] - nodePos[0]
# deltaY = cam[1] - nodePos[1]
# deltaZ = cam[2] - nodePos[2]
# numer = deltaX*cos(theta)*cos(phi) + deltaY*sin(theta)*cos(phi) + deltaZ*sin(phi)
# denom = (deltaX**2 + deltaY**2 + deltaZ**2)**.5
# return numer/denom
def binary_search(self, ustart, s, dl, tol=.01):
''' Perform a binary search to find parametrized location of point.
Inputs:
ustart: (float)
s: (spline object)
dl: (float)
tol: (float)
Returns:
Reassigns middle and endpoints of search
um: (float) midpoint of search
'''
point = splev(ustart, s)
ui = ustart
uf = 1
um = (ui + uf)/2
xm, ym, zm = splev(um, s)
xf, yf, zf = splev(uf, s)
while True:
tpoint = splev(um, s)
if distance(point, tpoint)>(dl*(1+tol)):
uf, um = um, (um+ui)/2
elif distance(point, tpoint)<(dl*(1-tol)):
ui, um = um, (um+uf)/2
else:
return um
def splineLen(self, s):
''' Calculate length of a spline.
Inputs:
s: (spline object) represents path that joins waypoints
Returns:
(float) length of spline
'''
ts = np.linspace(0, 1, 1000)
xs, ys, zs = splev(ts, s)
spline = zip(xs, ys, zs)
ipoint = spline[0]
totl = 0
for point in spline:
totl += distance(point, ipoint)
ipoint = point
return totl
def tighten(self):
''' Calibrate node lengths to current position of camera.
Enter ' ' to tighten
Enter 's' to accept node length
'''
while True:
input = raw_input('Tightening Node A')
if input == ' ':
self.serA.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node B')
if input == ' ':
self.serB.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node C')
if input == ' ':
self.serC.write('-100g')
elif input == 's':
return
class Path:
''' Path object stores a path's points, node lengths, and length differences
to enable in path traversal.
'''
def __init__(self, points, node0, node1, node2):
''' Init method for Path class.
Input:
points: (list of tuples of floats)
node0, 1, 2: (tuple of floats)
Returns:
Initializes Path attributes
'''
self.points = points
self.lens0 = [distance(node0, point) for point in points]
self.lens1 = [distance(node1, point) for point in points]
self.lens2 = [distance(node2, point) for point in points]
self.diffs0 = self.diff_calc(self.lens0)
self.diffs1 = self.diff_calc(self.lens1)
self.diffs2 = self.diff_calc(self.lens2)
@staticmethod
def new_path(points, node0, node1, node2):
''' Factory function to create new path object, if it exists within boundary.
Inputs:
points: (list of tuples of floats) points that make up a path
node0, 1, 2: (tuple of floats) coordinates of nodes
Returns:
(Path) new initialized Path object
'''
#Check if any point lies outside boundary
for point in points:
if Path.boundary(node0, node1, node2, point):
return None
return Path(points, node0, node1, node2)
@staticmethod
def boundary(node0, node1, node2, point, offset=6, hbound=120):
''' Check if any given point lies outside the boundaries of our system.
Inputs:
node0, 1, 2: (tuple of floats)
point: (tuple of floats)
offset: (float) offset distance from nodes to define boundary triangle
hbound: (float) lower bound of z y-axis
Returns:
(bool) Whether point is outside boundary, prints which
'''
# Find midpoint of each side
mid_AB = tuple((node0[i] + node1[i])/2 for i in xrange(3))
mid_BC = tuple((node1[i] + node2[i])/2 for i in xrange(3))
mid_AC = tuple((node2[i] + node0[i])/2 for i in xrange(3))
# Find slope of line connecting point to opposite midpoint
m_A = tuple((mid - node)/distance(mid_BC, node0) for (mid, node) in zip(mid_BC, node0))
m_B = tuple((mid - node)/distance(mid_AC, node1) for (mid, node) in zip(mid_AC, node1))
m_C = tuple((mid - node)/distance(mid_AB, node2) for (mid, node) in zip(mid_AB, node2))
# Find offset node coordinates
new_0 = tuple(coord + slope*offset for (coord, slope) in zip(node0, m_A))
new_1 = tuple(coord + slope*offset for (coord, slope) in zip(node1, m_B))
new_2 = tuple(coord + slope*offset for (coord, slope) in zip(node2, m_C))
if point[2] < 0 or point[2] > hbound:
print 'Height of path out of bounds', point[2]
return True
elif point[0] < 0:
print "Path out of bounds of line AB", point[0]
return True
elif point[1] < (new_2[1]*point[0]/new_2[0]):
print "Path out of bounds of line AC", point[1]
return True
elif point[1] > (((new_2[1] - new_1[1])/new_2[0])*point[0] + new_1[1]):
print "Path out of bounds of line BC", point[1]
return True
else:
return False
def diff_calc(self, lens):
''' Return differences between subsequent spool lengths x100 for sending.
Input:
lens: (list of floats) lengths of node wires at any time
Returns:
(list of floats) differences between subsequent lengths*100
'''
return [int(80*(lens[ind+1] - lens[ind])) for ind in xrange(len(lens)-1)]
def distance(A, B):
''' Calculate the distance between two points.
Inputs:
A: (tuple of floats/ints) first point
B: (tuple of floats/ints) second point
Returns:
(float) distance between the points
'''
dx = A[0] - B[0]
dy = A[1] - B[1]
dz = A[2] - B[2]
return (dx**2 + dy**2 + dz**2)**.5 | ''' Generate a new list of points based on waypoints.
Inputs:
waypoints: (list of tuples of floats): points the path should
bring the camera to
steps: (int) number of steps in which to complete the path
Returns:
Calls load_path method on list of generated spline points
'''
xpoints = [point[0] for point in waypoints]
ypoints = [point[1] for point in waypoints]
zpoints = [point[2] for point in waypoints]
# spline parameters
s = 2.0 # smoothness
k = 1 # spline order
nest = -1 # estimate of knots needed
# create spline and calculate length
s, us = splprep([xpoints, ypoints, zpoints], s=s, k=k, nest=nest)
totl = self.splineLen(s)
dl = totl/steps
if dl > 1:
print "dl greater than 1!"
i = 0
u = 0
upath = [u]
# Divide path into equidistant lengths
while i < steps-1:
u = self.binary_search(u, s, dl) # optionally pass tolerance
upath.append(u)
print i
i += 1
path = [splev(u, s) for u in upath]
path_lens = []
for i in xrange(len(path) - 1):
path_lens.append(distance(path[i], path[i+1]))
error = [ele - dl for ele in path_lens]
print 'Error is: ', sum(error)/len(error)
self.load_path(path)
# self.path = Path.new_path(path, self.node0, self.node1, self.node2) | identifier_body |
Skycam.py | from __future__ import division
from math import sin, cos, acos, asin, degrees, pi
import serial
from time import sleep
import numpy as np
from scipy.interpolate import splprep, splev, splrep
class Skycam:
''' Represents entire 3-node and camera system. Contains methods to calculate
and initialize paths, control the camera, connect and send serial commands.
'''
def __init__(self, a, b, c, zB, zC, cam):
''' Intialize new Skycam with calculated node positions, camera position,
in path-controlled mode.
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
zB: (float) height of point B
zC: (float) height of point C
cam: (tuple of floats) initial position of camera
'''
self.node0, self.node1, self.node2 = self.calc_nodes(a, b, c, zB, zC)
self.cam = cam
self.direct = False
self.pause = False
self.save_point = 0
def calc_nodes(self, a, b, c, zB, zC):
''' Calculate the positions of Skycam nodes based on node distance measurements.
A is the origin, B is along the y-axis, C is the remaining point.
Sides are opposite their respective points:
a is BC, b is AC, c is AB
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
Returns:
(tuple of floats) coordinates of node0, node1, node2
'''
# Project lengths into xy plane
a_eff = ((zC-zB)**2 + c**2)**.5
b_eff = (zC**2 + b**2)**.5
c_eff = (zB**2 + c**2)**.5
# Law of cosines
numer = b_eff**2 + c_eff**2 - a_eff**2
denom = 2*b_eff*c_eff
Arad = acos(numer/denom)
# Law of sines
Brad = asin(b*sin(Arad)/a)
Crad = asin(c*sin(Arad)/a)
theta = .5*pi-Arad
return (0,0, 0), (0, c, zB), (b*cos(theta), b*sin(theta), zC)
def load_path(self, points):
''' Create a new path based on predetermined points.
Inputs:
points: (list of tuples of floats) specific camera positions for
any given time.
Returns:
Initializes new Path in Skycam's path attribute
'''
self.path = Path.new_path(points, self.node0, self.node1, self.node2)
def create_path(self, waypoints, steps):
''' Generate a new list of points based on waypoints.
Inputs:
waypoints: (list of tuples of floats): points the path should
bring the camera to
steps: (int) number of steps in which to complete the path
Returns:
Calls load_path method on list of generated spline points
'''
xpoints = [point[0] for point in waypoints]
ypoints = [point[1] for point in waypoints]
zpoints = [point[2] for point in waypoints]
# spline parameters
s = 2.0 # smoothness
k = 1 # spline order
nest = -1 # estimate of knots needed
# create spline and calculate length
s, us = splprep([xpoints, ypoints, zpoints], s=s, k=k, nest=nest)
totl = self.splineLen(s)
dl = totl/steps
if dl > 1:
print "dl greater than 1!"
i = 0
u = 0
upath = [u]
# Divide path into equidistant lengths
while i < steps-1:
u = self.binary_search(u, s, dl) # optionally pass tolerance
upath.append(u)
print i
i += 1
path = [splev(u, s) for u in upath]
path_lens = []
for i in xrange(len(path) - 1):
path_lens.append(distance(path[i], path[i+1]))
error = [ele - dl for ele in path_lens]
print 'Error is: ', sum(error)/len(error)
self.load_path(path)
# self.path = Path.new_path(path, self.node0, self.node1, self.node2)
def go_path(self, start):
'''Send appropriate movement commands for loaded path.
Input:
start: (int) index of path at which to begin sending commands
'''
#TODO: Implement save point
while (not self.direct and not self.pause):
for i in xrange(len(self.path.diffs0) - start):
self.send_command(self.path.diffs0[i + start], self.path.diffs1[i + start], self.path.diffs2[i + start])
# raw_input('')
self.save_point = i
break
# def pause_path(self):
# ''' Pause path traversal.'''
# self.pause = True
# def switch_mode(self):
# ''' Switch from path control to joystick control '''
# self.direct = not self.direct
# def go_input(self):
# ''' Translate a direct-control input into a directional vector and send appropriate commands '''
# pass
def connect(self, baud=57600):
''' Connect to proper serial ports for Bluetooth communication.
Inputs:
baud: (int) baud rate at which to connect
Returns:
Print confirmation of connection
'''
# Connect to proper serial ports
self.serA = serial.Serial('/dev/rfcomm0', baud, timeout=50)
self.serB = serial.Serial('/dev/rfcomm1', baud, timeout=50)
self.serC = serial.Serial('/dev/rfcomm2', baud, timeout=50)
print 'Hacking the mainframe...'
sleep(8)
print 'Mainframe hacked'
def send_command(self, diff0, diff1, diff2):
'''Send proper commands to all three serial ports.
Inputs:
diff0: (float) node length difference for node 0
diff1: (float) node length difference for node 1
diff2: (float) node length difference for node 2
'''
print diff0, diff1, diff2
self.serA.write(str(diff0) + 'g')
self.serB.write(str(diff1) + 'g')
self.serC.write(str(diff2) + 'g')
#TODO: Always mess around with this value
sleep(.35)
pass
# def dldp(self, nodePos, theta, phi):
# ''' use a directional vector and current position to calculate change in node length '''
# cam = self.cam
# deltaX = cam[0] - nodePos[0]
# deltaY = cam[1] - nodePos[1]
# deltaZ = cam[2] - nodePos[2]
# numer = deltaX*cos(theta)*cos(phi) + deltaY*sin(theta)*cos(phi) + deltaZ*sin(phi)
# denom = (deltaX**2 + deltaY**2 + deltaZ**2)**.5
# return numer/denom
def binary_search(self, ustart, s, dl, tol=.01):
''' Perform a binary search to find parametrized location of point.
Inputs:
ustart: (float)
s: (spline object)
dl: (float)
tol: (float)
Returns:
Reassigns middle and endpoints of search
um: (float) midpoint of search
'''
point = splev(ustart, s)
ui = ustart
uf = 1
um = (ui + uf)/2
xm, ym, zm = splev(um, s)
xf, yf, zf = splev(uf, s)
while True:
tpoint = splev(um, s)
if distance(point, tpoint)>(dl*(1+tol)):
uf, um = um, (um+ui)/2
elif distance(point, tpoint)<(dl*(1-tol)):
ui, um = um, (um+uf)/2
else:
return um
def splineLen(self, s):
''' Calculate length of a spline.
Inputs:
s: (spline object) represents path that joins waypoints
Returns:
(float) length of spline
'''
ts = np.linspace(0, 1, 1000)
xs, ys, zs = splev(ts, s)
spline = zip(xs, ys, zs)
ipoint = spline[0]
totl = 0
for point in spline:
|
return totl
def tighten(self):
''' Calibrate node lengths to current position of camera.
Enter ' ' to tighten
Enter 's' to accept node length
'''
while True:
input = raw_input('Tightening Node A')
if input == ' ':
self.serA.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node B')
if input == ' ':
self.serB.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node C')
if input == ' ':
self.serC.write('-100g')
elif input == 's':
return
class Path:
''' Path object stores a path's points, node lengths, and length differences
to enable in path traversal.
'''
def __init__(self, points, node0, node1, node2):
''' Init method for Path class.
Input:
points: (list of tuples of floats)
node0, 1, 2: (tuple of floats)
Returns:
Initializes Path attributes
'''
self.points = points
self.lens0 = [distance(node0, point) for point in points]
self.lens1 = [distance(node1, point) for point in points]
self.lens2 = [distance(node2, point) for point in points]
self.diffs0 = self.diff_calc(self.lens0)
self.diffs1 = self.diff_calc(self.lens1)
self.diffs2 = self.diff_calc(self.lens2)
@staticmethod
def new_path(points, node0, node1, node2):
''' Factory function to create new path object, if it exists within boundary.
Inputs:
points: (list of tuples of floats) points that make up a path
node0, 1, 2: (tuple of floats) coordinates of nodes
Returns:
(Path) new initialized Path object
'''
#Check if any point lies outside boundary
for point in points:
if Path.boundary(node0, node1, node2, point):
return None
return Path(points, node0, node1, node2)
@staticmethod
def boundary(node0, node1, node2, point, offset=6, hbound=120):
''' Check if any given point lies outside the boundaries of our system.
Inputs:
node0, 1, 2: (tuple of floats)
point: (tuple of floats)
offset: (float) offset distance from nodes to define boundary triangle
hbound: (float) lower bound of z y-axis
Returns:
(bool) Whether point is outside boundary, prints which
'''
# Find midpoint of each side
mid_AB = tuple((node0[i] + node1[i])/2 for i in xrange(3))
mid_BC = tuple((node1[i] + node2[i])/2 for i in xrange(3))
mid_AC = tuple((node2[i] + node0[i])/2 for i in xrange(3))
# Find slope of line connecting point to opposite midpoint
m_A = tuple((mid - node)/distance(mid_BC, node0) for (mid, node) in zip(mid_BC, node0))
m_B = tuple((mid - node)/distance(mid_AC, node1) for (mid, node) in zip(mid_AC, node1))
m_C = tuple((mid - node)/distance(mid_AB, node2) for (mid, node) in zip(mid_AB, node2))
# Find offset node coordinates
new_0 = tuple(coord + slope*offset for (coord, slope) in zip(node0, m_A))
new_1 = tuple(coord + slope*offset for (coord, slope) in zip(node1, m_B))
new_2 = tuple(coord + slope*offset for (coord, slope) in zip(node2, m_C))
if point[2] < 0 or point[2] > hbound:
print 'Height of path out of bounds', point[2]
return True
elif point[0] < 0:
print "Path out of bounds of line AB", point[0]
return True
elif point[1] < (new_2[1]*point[0]/new_2[0]):
print "Path out of bounds of line AC", point[1]
return True
elif point[1] > (((new_2[1] - new_1[1])/new_2[0])*point[0] + new_1[1]):
print "Path out of bounds of line BC", point[1]
return True
else:
return False
def diff_calc(self, lens):
''' Return differences between subsequent spool lengths x100 for sending.
Input:
lens: (list of floats) lengths of node wires at any time
Returns:
(list of floats) differences between subsequent lengths*100
'''
return [int(80*(lens[ind+1] - lens[ind])) for ind in xrange(len(lens)-1)]
def distance(A, B):
''' Calculate the distance between two points.
Inputs:
A: (tuple of floats/ints) first point
B: (tuple of floats/ints) second point
Returns:
(float) distance between the points
'''
dx = A[0] - B[0]
dy = A[1] - B[1]
dz = A[2] - B[2]
return (dx**2 + dy**2 + dz**2)**.5 | totl += distance(point, ipoint)
ipoint = point | conditional_block |
Skycam.py | from __future__ import division
from math import sin, cos, acos, asin, degrees, pi
import serial
from time import sleep
import numpy as np
from scipy.interpolate import splprep, splev, splrep
class Skycam:
''' Represents entire 3-node and camera system. Contains methods to calculate
and initialize paths, control the camera, connect and send serial commands.
'''
def __init__(self, a, b, c, zB, zC, cam):
''' Intialize new Skycam with calculated node positions, camera position,
in path-controlled mode.
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
zB: (float) height of point B
zC: (float) height of point C
cam: (tuple of floats) initial position of camera
'''
self.node0, self.node1, self.node2 = self.calc_nodes(a, b, c, zB, zC)
self.cam = cam
self.direct = False
self.pause = False
self.save_point = 0
def calc_nodes(self, a, b, c, zB, zC):
''' Calculate the positions of Skycam nodes based on node distance measurements.
A is the origin, B is along the y-axis, C is the remaining point.
Sides are opposite their respective points:
a is BC, b is AC, c is AB
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
Returns:
(tuple of floats) coordinates of node0, node1, node2
'''
# Project lengths into xy plane
a_eff = ((zC-zB)**2 + c**2)**.5
b_eff = (zC**2 + b**2)**.5
c_eff = (zB**2 + c**2)**.5
# Law of cosines
numer = b_eff**2 + c_eff**2 - a_eff**2
denom = 2*b_eff*c_eff
Arad = acos(numer/denom)
# Law of sines
Brad = asin(b*sin(Arad)/a)
Crad = asin(c*sin(Arad)/a)
theta = .5*pi-Arad
return (0,0, 0), (0, c, zB), (b*cos(theta), b*sin(theta), zC)
def load_path(self, points):
''' Create a new path based on predetermined points.
Inputs:
points: (list of tuples of floats) specific camera positions for
any given time.
Returns:
Initializes new Path in Skycam's path attribute
'''
self.path = Path.new_path(points, self.node0, self.node1, self.node2)
def create_path(self, waypoints, steps):
''' Generate a new list of points based on waypoints.
Inputs:
waypoints: (list of tuples of floats): points the path should
bring the camera to | Calls load_path method on list of generated spline points
'''
xpoints = [point[0] for point in waypoints]
ypoints = [point[1] for point in waypoints]
zpoints = [point[2] for point in waypoints]
# spline parameters
s = 2.0 # smoothness
k = 1 # spline order
nest = -1 # estimate of knots needed
# create spline and calculate length
s, us = splprep([xpoints, ypoints, zpoints], s=s, k=k, nest=nest)
totl = self.splineLen(s)
dl = totl/steps
if dl > 1:
print "dl greater than 1!"
i = 0
u = 0
upath = [u]
# Divide path into equidistant lengths
while i < steps-1:
u = self.binary_search(u, s, dl) # optionally pass tolerance
upath.append(u)
print i
i += 1
path = [splev(u, s) for u in upath]
path_lens = []
for i in xrange(len(path) - 1):
path_lens.append(distance(path[i], path[i+1]))
error = [ele - dl for ele in path_lens]
print 'Error is: ', sum(error)/len(error)
self.load_path(path)
# self.path = Path.new_path(path, self.node0, self.node1, self.node2)
def go_path(self, start):
'''Send appropriate movement commands for loaded path.
Input:
start: (int) index of path at which to begin sending commands
'''
#TODO: Implement save point
while (not self.direct and not self.pause):
for i in xrange(len(self.path.diffs0) - start):
self.send_command(self.path.diffs0[i + start], self.path.diffs1[i + start], self.path.diffs2[i + start])
# raw_input('')
self.save_point = i
break
# def pause_path(self):
# ''' Pause path traversal.'''
# self.pause = True
# def switch_mode(self):
# ''' Switch from path control to joystick control '''
# self.direct = not self.direct
# def go_input(self):
# ''' Translate a direct-control input into a directional vector and send appropriate commands '''
# pass
def connect(self, baud=57600):
''' Connect to proper serial ports for Bluetooth communication.
Inputs:
baud: (int) baud rate at which to connect
Returns:
Print confirmation of connection
'''
# Connect to proper serial ports
self.serA = serial.Serial('/dev/rfcomm0', baud, timeout=50)
self.serB = serial.Serial('/dev/rfcomm1', baud, timeout=50)
self.serC = serial.Serial('/dev/rfcomm2', baud, timeout=50)
print 'Hacking the mainframe...'
sleep(8)
print 'Mainframe hacked'
def send_command(self, diff0, diff1, diff2):
'''Send proper commands to all three serial ports.
Inputs:
diff0: (float) node length difference for node 0
diff1: (float) node length difference for node 1
diff2: (float) node length difference for node 2
'''
print diff0, diff1, diff2
self.serA.write(str(diff0) + 'g')
self.serB.write(str(diff1) + 'g')
self.serC.write(str(diff2) + 'g')
#TODO: Always mess around with this value
sleep(.35)
pass
# def dldp(self, nodePos, theta, phi):
# ''' use a directional vector and current position to calculate change in node length '''
# cam = self.cam
# deltaX = cam[0] - nodePos[0]
# deltaY = cam[1] - nodePos[1]
# deltaZ = cam[2] - nodePos[2]
# numer = deltaX*cos(theta)*cos(phi) + deltaY*sin(theta)*cos(phi) + deltaZ*sin(phi)
# denom = (deltaX**2 + deltaY**2 + deltaZ**2)**.5
# return numer/denom
def binary_search(self, ustart, s, dl, tol=.01):
''' Perform a binary search to find parametrized location of point.
Inputs:
ustart: (float)
s: (spline object)
dl: (float)
tol: (float)
Returns:
Reassigns middle and endpoints of search
um: (float) midpoint of search
'''
point = splev(ustart, s)
ui = ustart
uf = 1
um = (ui + uf)/2
xm, ym, zm = splev(um, s)
xf, yf, zf = splev(uf, s)
while True:
tpoint = splev(um, s)
if distance(point, tpoint)>(dl*(1+tol)):
uf, um = um, (um+ui)/2
elif distance(point, tpoint)<(dl*(1-tol)):
ui, um = um, (um+uf)/2
else:
return um
def splineLen(self, s):
''' Calculate length of a spline.
Inputs:
s: (spline object) represents path that joins waypoints
Returns:
(float) length of spline
'''
ts = np.linspace(0, 1, 1000)
xs, ys, zs = splev(ts, s)
spline = zip(xs, ys, zs)
ipoint = spline[0]
totl = 0
for point in spline:
totl += distance(point, ipoint)
ipoint = point
return totl
def tighten(self):
''' Calibrate node lengths to current position of camera.
Enter ' ' to tighten
Enter 's' to accept node length
'''
while True:
input = raw_input('Tightening Node A')
if input == ' ':
self.serA.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node B')
if input == ' ':
self.serB.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node C')
if input == ' ':
self.serC.write('-100g')
elif input == 's':
return
class Path:
''' Path object stores a path's points, node lengths, and length differences
to enable in path traversal.
'''
def __init__(self, points, node0, node1, node2):
''' Init method for Path class.
Input:
points: (list of tuples of floats)
node0, 1, 2: (tuple of floats)
Returns:
Initializes Path attributes
'''
self.points = points
self.lens0 = [distance(node0, point) for point in points]
self.lens1 = [distance(node1, point) for point in points]
self.lens2 = [distance(node2, point) for point in points]
self.diffs0 = self.diff_calc(self.lens0)
self.diffs1 = self.diff_calc(self.lens1)
self.diffs2 = self.diff_calc(self.lens2)
@staticmethod
def new_path(points, node0, node1, node2):
''' Factory function to create new path object, if it exists within boundary.
Inputs:
points: (list of tuples of floats) points that make up a path
node0, 1, 2: (tuple of floats) coordinates of nodes
Returns:
(Path) new initialized Path object
'''
#Check if any point lies outside boundary
for point in points:
if Path.boundary(node0, node1, node2, point):
return None
return Path(points, node0, node1, node2)
@staticmethod
def boundary(node0, node1, node2, point, offset=6, hbound=120):
''' Check if any given point lies outside the boundaries of our system.
Inputs:
node0, 1, 2: (tuple of floats)
point: (tuple of floats)
offset: (float) offset distance from nodes to define boundary triangle
hbound: (float) lower bound of z y-axis
Returns:
(bool) Whether point is outside boundary, prints which
'''
# Find midpoint of each side
mid_AB = tuple((node0[i] + node1[i])/2 for i in xrange(3))
mid_BC = tuple((node1[i] + node2[i])/2 for i in xrange(3))
mid_AC = tuple((node2[i] + node0[i])/2 for i in xrange(3))
# Find slope of line connecting point to opposite midpoint
m_A = tuple((mid - node)/distance(mid_BC, node0) for (mid, node) in zip(mid_BC, node0))
m_B = tuple((mid - node)/distance(mid_AC, node1) for (mid, node) in zip(mid_AC, node1))
m_C = tuple((mid - node)/distance(mid_AB, node2) for (mid, node) in zip(mid_AB, node2))
# Find offset node coordinates
new_0 = tuple(coord + slope*offset for (coord, slope) in zip(node0, m_A))
new_1 = tuple(coord + slope*offset for (coord, slope) in zip(node1, m_B))
new_2 = tuple(coord + slope*offset for (coord, slope) in zip(node2, m_C))
if point[2] < 0 or point[2] > hbound:
print 'Height of path out of bounds', point[2]
return True
elif point[0] < 0:
print "Path out of bounds of line AB", point[0]
return True
elif point[1] < (new_2[1]*point[0]/new_2[0]):
print "Path out of bounds of line AC", point[1]
return True
elif point[1] > (((new_2[1] - new_1[1])/new_2[0])*point[0] + new_1[1]):
print "Path out of bounds of line BC", point[1]
return True
else:
return False
def diff_calc(self, lens):
''' Return differences between subsequent spool lengths x100 for sending.
Input:
lens: (list of floats) lengths of node wires at any time
Returns:
(list of floats) differences between subsequent lengths*100
'''
return [int(80*(lens[ind+1] - lens[ind])) for ind in xrange(len(lens)-1)]
def distance(A, B):
''' Calculate the distance between two points.
Inputs:
A: (tuple of floats/ints) first point
B: (tuple of floats/ints) second point
Returns:
(float) distance between the points
'''
dx = A[0] - B[0]
dy = A[1] - B[1]
dz = A[2] - B[2]
return (dx**2 + dy**2 + dz**2)**.5 | steps: (int) number of steps in which to complete the path
Returns: | random_line_split |
kafka.go | // Copyright (c) 2019 AccelByte Inc. All Rights Reserved.
// This is licensed software from AccelByte Inc, for limitations
// and restrictions contact your company contract manager.
package messagebus
import (
"crypto/sha512"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/Shopify/sarama"
"github.com/sirupsen/logrus"
"time"
)
const saslScramAuth = "SASL-SCRAM"
var (
rtoError = errors.New("request time out in publish with response")
subscribeMap map[string]map[string]func(message *Message, err error)
publishResponseTimeout = time.Duration(10000)
topicTimeout = time.Duration(15000)
)
// KafkaClient wraps client's functionality for Kafka
type KafkaClient struct {
asyncProducer sarama.AsyncProducer
syncProducer sarama.SyncProducer
consumer sarama.Consumer
broker *sarama.Broker
realm string
}
type SecurityConfig struct {
AuthenticationType string
SASLUsername string
SASLPassword string
}
// PublishBuilderEntry defines data structure to use with Kafka client
type PublishBuilderEntry struct {
PublishBuilder
encoded []byte
err error
}
func (publishBuilderEntry *PublishBuilderEntry) ensureEncoded() {
if publishBuilderEntry.encoded == nil && publishBuilderEntry.err == nil {
if publishBuilderEntry.traceId == "" {
publishBuilderEntry.traceId = generateID()
}
if publishBuilderEntry.messageId == "" {
publishBuilderEntry.messageId = generateID()
}
publishBuilderEntry.encoded, publishBuilderEntry.err = json.Marshal(Message{
Message: publishBuilderEntry.message,
MessageType: publishBuilderEntry.messageType,
Service: publishBuilderEntry.service,
TraceId: publishBuilderEntry.traceId,
MessageId: publishBuilderEntry.messageId,
})
}
}
// Encode PublishBuilder into array of bytes
func (publishBuilderEntry *PublishBuilderEntry) Encode() ([]byte, error) {
publishBuilderEntry.ensureEncoded()
return publishBuilderEntry.encoded, publishBuilderEntry.err
}
// Length returns size of encoded value
func (publishBuilderEntry *PublishBuilderEntry) Length() int {
publishBuilderEntry.ensureEncoded()
return len(publishBuilderEntry.encoded)
}
// NewKafkaClient create a new instance of KafkaClient
func NewKafkaClient(brokerList []string, realm string) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, nil)
}
// NewKafkaClientWithAuthentication create a new instance of KafkaClient with Authentication
func NewKafkaClientWithAuthentication(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, securityConfig)
}
func initKafkaClient(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
config := sarama.NewConfig()
config.Version = sarama.V2_1_0_0
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(config, securityConfig)
}
// currently only support 1 message broker
broker := sarama.NewBroker(brokerList[0])
err := broker.Open(config)
if err != nil {
logrus.Error("unable to open kafka")
return nil, err
}
if connected, err := broker.Connected(); !connected {
logrus.Error("unable connect to kafka")
return nil, err
}
configAsync := sarama.NewConfig()
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configAsync, securityConfig)
}
asyncProducer, err := sarama.NewAsyncProducer(brokerList, configAsync)
if err != nil {
logrus.Error("unable to create async producer in kafka : ", err)
return nil, err
}
configSync := sarama.NewConfig()
configSync.Producer.Return.Successes = true
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configSync, securityConfig)
}
syncProducer, err := sarama.NewSyncProducer(brokerList, configSync)
if err != nil {
logrus.Error("unable to create sync producer in kafka : ", err)
return nil, err
}
consumer, err := sarama.NewConsumer(brokerList, config)
if err != nil {
logrus.Error("Unable to create consumer in kafka : ", err)
return nil, err
}
client := &KafkaClient{
asyncProducer,
syncProducer,
consumer,
broker,
realm,
}
subscribeMap = make(map[string]map[string]func(message *Message, err error))
go listenAsyncError(asyncProducer)
go cleanup(client)
return client, nil
}
func configureSASLScramAuthentication(config *sarama.Config, securityConfig *SecurityConfig) {
config.Net.SASL.Enable = true
config.Net.SASL.User = securityConfig.SASLUsername
config.Net.SASL.Password = securityConfig.SASLPassword
config.Net.SASL.Handshake = true
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &SCRAMClient{HashGeneratorFcn: sha512.New} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
config.Net.TLS.Enable = true
config.Net.TLS.Config = &tls.Config{}
}
func cleanup(client *KafkaClient) {
// define signal notify
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
func() {
for {
select {
case <-sig:
_ = client.asyncProducer.Close()
_ = client.syncProducer.Close()
_ = client.consumer.Close()
_ = client.broker.Close()
}
}
}()
}
func listenAsyncError(producer sarama.AsyncProducer) {
for err := range producer.Errors() {
logrus.Error("unable to publish message using async producer to kafka : ", err)
}
}
func constructTopic(realm, topic string) string {
return realm + separator + topic
}
// SetTimeout set listening timeout for publish with response
func (client *KafkaClient) SetTimeout(timeout int) {
publishResponseTimeout = time.Duration(timeout)
}
// PublishAsync push a message to message broker topic asynchronously
func (client *KafkaClient) PublishAsync(publishBuilder *PublishBuilder) {
// send message
client.asyncProducer.Input() <- &sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
}
}
// PublishSync push a message to message broker topic synchronously
func (client *KafkaClient) PublishSync(publishBuilder *PublishBuilder) error {
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
})
if err != nil {
logrus.Error("unable to publish message using sync producer to kafka : ", err)
}
return err
}
// PublishWithResponses push a message to message broker topic synchronously and waiting response consumer until timeout
// Intended for Point to Point Communication
func (client *KafkaClient) PublishWithResponses(publishBuilder *PublishBuilder) {
topic := constructTopic(client.realm, publishBuilder.topic)
// register callback into map with topic and message Type as a key
// need to create topic, if the keys doesn't exists
if registerCallback(topic, publishBuilder.messageType, publishBuilder.callback) | else {
defer func() {
err := deleteTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to delete topic in kafka : ", err)
return
}
}()
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for timeout := time.After(time.Duration(publishResponseTimeout) * time.Millisecond); ; {
select {
case consumerMessage := <-consumer.Messages():
if !processResponseMessage(consumerMessage, publishBuilder.messageId) {
continue
}
break
case err = <-consumer.Errors():
callback := publishBuilder.callback
callback(nil, err)
break
case <-timeout:
callback := publishBuilder.callback
callback(nil, rtoError)
break
}
}
}()
}
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: topic,
})
if err != nil {
logrus.Error("unable to publish message in publish with response to kafka : ", err)
return
}
}
// Register add subscriber for a topic and register callback function
func (client *KafkaClient) Register(subscribeBuilder *SubscribeBuilder) {
topic := constructTopic(client.realm, subscribeBuilder.topic)
if registerCallback(topic, subscribeBuilder.messageType, subscribeBuilder.callback) {
logrus.Error("topic and message type already registered")
return
}
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
// each topic would have his own goroutine
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for {
select {
case consumerMessage := <-consumer.Messages():
processMessage(consumerMessage)
break
case err = <-consumer.Errors():
callback := subscribeBuilder.callback
callback(nil, err)
break
}
}
}()
}
// createTopic create kafka topic
func createTopic(broker *sarama.Broker, topicName string) error {
topicDetail := &sarama.TopicDetail{}
topicDetail.NumPartitions = int32(1)
topicDetail.ReplicationFactor = int16(1)
topicDetail.ConfigEntries = make(map[string]*string)
topicDetails := make(map[string]*sarama.TopicDetail)
topicDetails[topicName] = topicDetail
request := sarama.CreateTopicsRequest{
Timeout: time.Second * 15,
TopicDetails: topicDetails,
}
_, err := broker.CreateTopics(&request)
return err
}
// deleteTopic delete kafka topic
func deleteTopic(broker *sarama.Broker, topicName string) error {
request := sarama.DeleteTopicsRequest{
Timeout: time.Second * topicTimeout,
Topics: []string{topicName},
}
_, err := broker.DeleteTopics(&request)
return err
}
// unmarshal unmarshal received message into message struct
func unmarshal(consumerMessage *sarama.ConsumerMessage) *Message {
var receivedMessage Message
err := json.Unmarshal(consumerMessage.Value, &receivedMessage)
if err != nil {
logrus.Error("unable to unmarshal message from consumer in kafka : ", err)
return &Message{}
}
return &receivedMessage
}
// registerCallback add callback to map with topic and message Type as a key
func registerCallback(topic, messageType string, callback func(message *Message, err error)) (isRegistered bool) {
if subscribeMap == nil {
subscribeMap = make(map[string]map[string]func(message *Message, err error))
}
if callbackMap, isTopic := subscribeMap[topic]; isTopic {
if _, isMsgType := callbackMap[messageType]; isMsgType {
return true
}
}
newCallbackMap := make(map[string]func(message *Message, err error))
newCallbackMap[messageType] = callback
subscribeMap[topic] = newCallbackMap
return false
}
// runCallback run callback function when receive a message
func runCallback(receivedMessage *Message, consumerMessage *sarama.ConsumerMessage) {
callback := subscribeMap[consumerMessage.Topic][receivedMessage.MessageType]
if callback == nil {
logrus.Error(fmt.Sprintf("callback not found for topic : %s, message type : %s", consumerMessage.Topic,
receivedMessage.MessageType))
return
}
go callback(&Message{
Topic: consumerMessage.Topic,
Message: receivedMessage.Message,
MessageType: receivedMessage.MessageType,
Service: receivedMessage.Service,
TraceId: receivedMessage.TraceId,
MessageId: receivedMessage.MessageId,
}, nil)
}
// processMessage process a message from kafka
func processMessage(consumerMessage *sarama.ConsumerMessage) {
receivedMessage := unmarshal(consumerMessage)
runCallback(receivedMessage, consumerMessage)
}
// processResponseMessage process a message from kafka for publish with responses function
func processResponseMessage(consumerMessage *sarama.ConsumerMessage, messageID string) bool {
receivedMessage := unmarshal(consumerMessage)
// check the request and response message ID should be equal
if messageID != receivedMessage.MessageId {
return false
}
runCallback(receivedMessage, consumerMessage)
return true
}
| {
logrus.Warn("topic and message type already registered")
} | conditional_block |
kafka.go | // Copyright (c) 2019 AccelByte Inc. All Rights Reserved.
// This is licensed software from AccelByte Inc, for limitations
// and restrictions contact your company contract manager.
package messagebus
import (
"crypto/sha512"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/Shopify/sarama"
"github.com/sirupsen/logrus"
"time"
)
const saslScramAuth = "SASL-SCRAM"
var (
rtoError = errors.New("request time out in publish with response")
subscribeMap map[string]map[string]func(message *Message, err error)
publishResponseTimeout = time.Duration(10000)
topicTimeout = time.Duration(15000)
)
// KafkaClient wraps client's functionality for Kafka
type KafkaClient struct {
asyncProducer sarama.AsyncProducer
syncProducer sarama.SyncProducer
consumer sarama.Consumer
broker *sarama.Broker
realm string
}
type SecurityConfig struct {
AuthenticationType string
SASLUsername string
SASLPassword string
}
// PublishBuilderEntry defines data structure to use with Kafka client
type PublishBuilderEntry struct {
PublishBuilder
encoded []byte
err error
}
func (publishBuilderEntry *PublishBuilderEntry) ensureEncoded() {
if publishBuilderEntry.encoded == nil && publishBuilderEntry.err == nil {
if publishBuilderEntry.traceId == "" {
publishBuilderEntry.traceId = generateID()
}
if publishBuilderEntry.messageId == "" {
publishBuilderEntry.messageId = generateID()
}
publishBuilderEntry.encoded, publishBuilderEntry.err = json.Marshal(Message{
Message: publishBuilderEntry.message,
MessageType: publishBuilderEntry.messageType,
Service: publishBuilderEntry.service,
TraceId: publishBuilderEntry.traceId,
MessageId: publishBuilderEntry.messageId,
})
}
}
// Encode PublishBuilder into array of bytes
func (publishBuilderEntry *PublishBuilderEntry) Encode() ([]byte, error) {
publishBuilderEntry.ensureEncoded()
return publishBuilderEntry.encoded, publishBuilderEntry.err
}
// Length returns size of encoded value
func (publishBuilderEntry *PublishBuilderEntry) Length() int {
publishBuilderEntry.ensureEncoded()
return len(publishBuilderEntry.encoded)
}
// NewKafkaClient create a new instance of KafkaClient
func NewKafkaClient(brokerList []string, realm string) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, nil)
}
// NewKafkaClientWithAuthentication create a new instance of KafkaClient with Authentication
func NewKafkaClientWithAuthentication(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, securityConfig)
}
func initKafkaClient(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
config := sarama.NewConfig()
config.Version = sarama.V2_1_0_0
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(config, securityConfig)
}
// currently only support 1 message broker
broker := sarama.NewBroker(brokerList[0])
err := broker.Open(config)
if err != nil {
logrus.Error("unable to open kafka")
return nil, err
}
if connected, err := broker.Connected(); !connected {
logrus.Error("unable connect to kafka")
return nil, err
}
configAsync := sarama.NewConfig()
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configAsync, securityConfig)
}
asyncProducer, err := sarama.NewAsyncProducer(brokerList, configAsync)
if err != nil {
logrus.Error("unable to create async producer in kafka : ", err)
return nil, err
}
configSync := sarama.NewConfig()
configSync.Producer.Return.Successes = true
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configSync, securityConfig)
}
syncProducer, err := sarama.NewSyncProducer(brokerList, configSync)
if err != nil {
logrus.Error("unable to create sync producer in kafka : ", err)
return nil, err
}
consumer, err := sarama.NewConsumer(brokerList, config)
if err != nil {
logrus.Error("Unable to create consumer in kafka : ", err)
return nil, err
}
client := &KafkaClient{
asyncProducer,
syncProducer,
consumer,
broker,
realm,
}
subscribeMap = make(map[string]map[string]func(message *Message, err error))
go listenAsyncError(asyncProducer)
go cleanup(client)
return client, nil
}
func configureSASLScramAuthentication(config *sarama.Config, securityConfig *SecurityConfig) {
config.Net.SASL.Enable = true
config.Net.SASL.User = securityConfig.SASLUsername
config.Net.SASL.Password = securityConfig.SASLPassword
config.Net.SASL.Handshake = true
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &SCRAMClient{HashGeneratorFcn: sha512.New} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
config.Net.TLS.Enable = true
config.Net.TLS.Config = &tls.Config{}
}
func cleanup(client *KafkaClient) {
// define signal notify
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
func() {
for {
select {
case <-sig:
_ = client.asyncProducer.Close()
_ = client.syncProducer.Close()
_ = client.consumer.Close()
_ = client.broker.Close()
}
}
}()
}
func listenAsyncError(producer sarama.AsyncProducer) {
for err := range producer.Errors() {
logrus.Error("unable to publish message using async producer to kafka : ", err)
}
}
func constructTopic(realm, topic string) string {
return realm + separator + topic
}
// SetTimeout set listening timeout for publish with response
func (client *KafkaClient) SetTimeout(timeout int) {
publishResponseTimeout = time.Duration(timeout)
}
// PublishAsync push a message to message broker topic asynchronously
func (client *KafkaClient) PublishAsync(publishBuilder *PublishBuilder) {
// send message
client.asyncProducer.Input() <- &sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
}, |
// PublishSync push a message to message broker topic synchronously
func (client *KafkaClient) PublishSync(publishBuilder *PublishBuilder) error {
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
})
if err != nil {
logrus.Error("unable to publish message using sync producer to kafka : ", err)
}
return err
}
// PublishWithResponses push a message to message broker topic synchronously and waiting response consumer until timeout
// Intended for Point to Point Communication
func (client *KafkaClient) PublishWithResponses(publishBuilder *PublishBuilder) {
topic := constructTopic(client.realm, publishBuilder.topic)
// register callback into map with topic and message Type as a key
// need to create topic, if the keys doesn't exists
if registerCallback(topic, publishBuilder.messageType, publishBuilder.callback) {
logrus.Warn("topic and message type already registered")
} else {
defer func() {
err := deleteTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to delete topic in kafka : ", err)
return
}
}()
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for timeout := time.After(time.Duration(publishResponseTimeout) * time.Millisecond); ; {
select {
case consumerMessage := <-consumer.Messages():
if !processResponseMessage(consumerMessage, publishBuilder.messageId) {
continue
}
break
case err = <-consumer.Errors():
callback := publishBuilder.callback
callback(nil, err)
break
case <-timeout:
callback := publishBuilder.callback
callback(nil, rtoError)
break
}
}
}()
}
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: topic,
})
if err != nil {
logrus.Error("unable to publish message in publish with response to kafka : ", err)
return
}
}
// Register add subscriber for a topic and register callback function
func (client *KafkaClient) Register(subscribeBuilder *SubscribeBuilder) {
topic := constructTopic(client.realm, subscribeBuilder.topic)
if registerCallback(topic, subscribeBuilder.messageType, subscribeBuilder.callback) {
logrus.Error("topic and message type already registered")
return
}
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
// each topic would have his own goroutine
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for {
select {
case consumerMessage := <-consumer.Messages():
processMessage(consumerMessage)
break
case err = <-consumer.Errors():
callback := subscribeBuilder.callback
callback(nil, err)
break
}
}
}()
}
// createTopic create kafka topic
func createTopic(broker *sarama.Broker, topicName string) error {
topicDetail := &sarama.TopicDetail{}
topicDetail.NumPartitions = int32(1)
topicDetail.ReplicationFactor = int16(1)
topicDetail.ConfigEntries = make(map[string]*string)
topicDetails := make(map[string]*sarama.TopicDetail)
topicDetails[topicName] = topicDetail
request := sarama.CreateTopicsRequest{
Timeout: time.Second * 15,
TopicDetails: topicDetails,
}
_, err := broker.CreateTopics(&request)
return err
}
// deleteTopic delete kafka topic
func deleteTopic(broker *sarama.Broker, topicName string) error {
request := sarama.DeleteTopicsRequest{
Timeout: time.Second * topicTimeout,
Topics: []string{topicName},
}
_, err := broker.DeleteTopics(&request)
return err
}
// unmarshal unmarshal received message into message struct
func unmarshal(consumerMessage *sarama.ConsumerMessage) *Message {
var receivedMessage Message
err := json.Unmarshal(consumerMessage.Value, &receivedMessage)
if err != nil {
logrus.Error("unable to unmarshal message from consumer in kafka : ", err)
return &Message{}
}
return &receivedMessage
}
// registerCallback add callback to map with topic and message Type as a key
func registerCallback(topic, messageType string, callback func(message *Message, err error)) (isRegistered bool) {
if subscribeMap == nil {
subscribeMap = make(map[string]map[string]func(message *Message, err error))
}
if callbackMap, isTopic := subscribeMap[topic]; isTopic {
if _, isMsgType := callbackMap[messageType]; isMsgType {
return true
}
}
newCallbackMap := make(map[string]func(message *Message, err error))
newCallbackMap[messageType] = callback
subscribeMap[topic] = newCallbackMap
return false
}
// runCallback run callback function when receive a message
func runCallback(receivedMessage *Message, consumerMessage *sarama.ConsumerMessage) {
callback := subscribeMap[consumerMessage.Topic][receivedMessage.MessageType]
if callback == nil {
logrus.Error(fmt.Sprintf("callback not found for topic : %s, message type : %s", consumerMessage.Topic,
receivedMessage.MessageType))
return
}
go callback(&Message{
Topic: consumerMessage.Topic,
Message: receivedMessage.Message,
MessageType: receivedMessage.MessageType,
Service: receivedMessage.Service,
TraceId: receivedMessage.TraceId,
MessageId: receivedMessage.MessageId,
}, nil)
}
// processMessage process a message from kafka
func processMessage(consumerMessage *sarama.ConsumerMessage) {
receivedMessage := unmarshal(consumerMessage)
runCallback(receivedMessage, consumerMessage)
}
// processResponseMessage process a message from kafka for publish with responses function
func processResponseMessage(consumerMessage *sarama.ConsumerMessage, messageID string) bool {
receivedMessage := unmarshal(consumerMessage)
// check the request and response message ID should be equal
if messageID != receivedMessage.MessageId {
return false
}
runCallback(receivedMessage, consumerMessage)
return true
} | Topic: constructTopic(client.realm, publishBuilder.topic),
}
} | random_line_split |
kafka.go | // Copyright (c) 2019 AccelByte Inc. All Rights Reserved.
// This is licensed software from AccelByte Inc, for limitations
// and restrictions contact your company contract manager.
package messagebus
import (
"crypto/sha512"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/Shopify/sarama"
"github.com/sirupsen/logrus"
"time"
)
const saslScramAuth = "SASL-SCRAM"
var (
rtoError = errors.New("request time out in publish with response")
subscribeMap map[string]map[string]func(message *Message, err error)
publishResponseTimeout = time.Duration(10000)
topicTimeout = time.Duration(15000)
)
// KafkaClient wraps client's functionality for Kafka
type KafkaClient struct {
asyncProducer sarama.AsyncProducer
syncProducer sarama.SyncProducer
consumer sarama.Consumer
broker *sarama.Broker
realm string
}
type SecurityConfig struct {
AuthenticationType string
SASLUsername string
SASLPassword string
}
// PublishBuilderEntry defines data structure to use with Kafka client
type PublishBuilderEntry struct {
PublishBuilder
encoded []byte
err error
}
func (publishBuilderEntry *PublishBuilderEntry) ensureEncoded() {
if publishBuilderEntry.encoded == nil && publishBuilderEntry.err == nil {
if publishBuilderEntry.traceId == "" {
publishBuilderEntry.traceId = generateID()
}
if publishBuilderEntry.messageId == "" {
publishBuilderEntry.messageId = generateID()
}
publishBuilderEntry.encoded, publishBuilderEntry.err = json.Marshal(Message{
Message: publishBuilderEntry.message,
MessageType: publishBuilderEntry.messageType,
Service: publishBuilderEntry.service,
TraceId: publishBuilderEntry.traceId,
MessageId: publishBuilderEntry.messageId,
})
}
}
// Encode PublishBuilder into array of bytes
func (publishBuilderEntry *PublishBuilderEntry) Encode() ([]byte, error) {
publishBuilderEntry.ensureEncoded()
return publishBuilderEntry.encoded, publishBuilderEntry.err
}
// Length returns size of encoded value
func (publishBuilderEntry *PublishBuilderEntry) Length() int {
publishBuilderEntry.ensureEncoded()
return len(publishBuilderEntry.encoded)
}
// NewKafkaClient create a new instance of KafkaClient
func NewKafkaClient(brokerList []string, realm string) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, nil)
}
// NewKafkaClientWithAuthentication create a new instance of KafkaClient with Authentication
func NewKafkaClientWithAuthentication(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, securityConfig)
}
func initKafkaClient(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
config := sarama.NewConfig()
config.Version = sarama.V2_1_0_0
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(config, securityConfig)
}
// currently only support 1 message broker
broker := sarama.NewBroker(brokerList[0])
err := broker.Open(config)
if err != nil {
logrus.Error("unable to open kafka")
return nil, err
}
if connected, err := broker.Connected(); !connected {
logrus.Error("unable connect to kafka")
return nil, err
}
configAsync := sarama.NewConfig()
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configAsync, securityConfig)
}
asyncProducer, err := sarama.NewAsyncProducer(brokerList, configAsync)
if err != nil {
logrus.Error("unable to create async producer in kafka : ", err)
return nil, err
}
configSync := sarama.NewConfig()
configSync.Producer.Return.Successes = true
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configSync, securityConfig)
}
syncProducer, err := sarama.NewSyncProducer(brokerList, configSync)
if err != nil {
logrus.Error("unable to create sync producer in kafka : ", err)
return nil, err
}
consumer, err := sarama.NewConsumer(brokerList, config)
if err != nil {
logrus.Error("Unable to create consumer in kafka : ", err)
return nil, err
}
client := &KafkaClient{
asyncProducer,
syncProducer,
consumer,
broker,
realm,
}
subscribeMap = make(map[string]map[string]func(message *Message, err error))
go listenAsyncError(asyncProducer)
go cleanup(client)
return client, nil
}
func configureSASLScramAuthentication(config *sarama.Config, securityConfig *SecurityConfig) {
config.Net.SASL.Enable = true
config.Net.SASL.User = securityConfig.SASLUsername
config.Net.SASL.Password = securityConfig.SASLPassword
config.Net.SASL.Handshake = true
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &SCRAMClient{HashGeneratorFcn: sha512.New} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
config.Net.TLS.Enable = true
config.Net.TLS.Config = &tls.Config{}
}
func cleanup(client *KafkaClient) {
// define signal notify
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
func() {
for {
select {
case <-sig:
_ = client.asyncProducer.Close()
_ = client.syncProducer.Close()
_ = client.consumer.Close()
_ = client.broker.Close()
}
}
}()
}
func listenAsyncError(producer sarama.AsyncProducer) {
for err := range producer.Errors() {
logrus.Error("unable to publish message using async producer to kafka : ", err)
}
}
func constructTopic(realm, topic string) string {
return realm + separator + topic
}
// SetTimeout set listening timeout for publish with response
func (client *KafkaClient) SetTimeout(timeout int) {
publishResponseTimeout = time.Duration(timeout)
}
// PublishAsync push a message to message broker topic asynchronously
func (client *KafkaClient) PublishAsync(publishBuilder *PublishBuilder) {
// send message
client.asyncProducer.Input() <- &sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
}
}
// PublishSync push a message to message broker topic synchronously
func (client *KafkaClient) PublishSync(publishBuilder *PublishBuilder) error {
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
})
if err != nil {
logrus.Error("unable to publish message using sync producer to kafka : ", err)
}
return err
}
// PublishWithResponses push a message to message broker topic synchronously and waiting response consumer until timeout
// Intended for Point to Point Communication
func (client *KafkaClient) PublishWithResponses(publishBuilder *PublishBuilder) {
topic := constructTopic(client.realm, publishBuilder.topic)
// register callback into map with topic and message Type as a key
// need to create topic, if the keys doesn't exists
if registerCallback(topic, publishBuilder.messageType, publishBuilder.callback) {
logrus.Warn("topic and message type already registered")
} else {
defer func() {
err := deleteTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to delete topic in kafka : ", err)
return
}
}()
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for timeout := time.After(time.Duration(publishResponseTimeout) * time.Millisecond); ; {
select {
case consumerMessage := <-consumer.Messages():
if !processResponseMessage(consumerMessage, publishBuilder.messageId) {
continue
}
break
case err = <-consumer.Errors():
callback := publishBuilder.callback
callback(nil, err)
break
case <-timeout:
callback := publishBuilder.callback
callback(nil, rtoError)
break
}
}
}()
}
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: topic,
})
if err != nil {
logrus.Error("unable to publish message in publish with response to kafka : ", err)
return
}
}
// Register add subscriber for a topic and register callback function
func (client *KafkaClient) Register(subscribeBuilder *SubscribeBuilder) {
topic := constructTopic(client.realm, subscribeBuilder.topic)
if registerCallback(topic, subscribeBuilder.messageType, subscribeBuilder.callback) {
logrus.Error("topic and message type already registered")
return
}
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
// each topic would have his own goroutine
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for {
select {
case consumerMessage := <-consumer.Messages():
processMessage(consumerMessage)
break
case err = <-consumer.Errors():
callback := subscribeBuilder.callback
callback(nil, err)
break
}
}
}()
}
// createTopic create kafka topic
func createTopic(broker *sarama.Broker, topicName string) error {
topicDetail := &sarama.TopicDetail{}
topicDetail.NumPartitions = int32(1)
topicDetail.ReplicationFactor = int16(1)
topicDetail.ConfigEntries = make(map[string]*string)
topicDetails := make(map[string]*sarama.TopicDetail)
topicDetails[topicName] = topicDetail
request := sarama.CreateTopicsRequest{
Timeout: time.Second * 15,
TopicDetails: topicDetails,
}
_, err := broker.CreateTopics(&request)
return err
}
// deleteTopic delete kafka topic
func | (broker *sarama.Broker, topicName string) error {
request := sarama.DeleteTopicsRequest{
Timeout: time.Second * topicTimeout,
Topics: []string{topicName},
}
_, err := broker.DeleteTopics(&request)
return err
}
// unmarshal unmarshal received message into message struct
func unmarshal(consumerMessage *sarama.ConsumerMessage) *Message {
var receivedMessage Message
err := json.Unmarshal(consumerMessage.Value, &receivedMessage)
if err != nil {
logrus.Error("unable to unmarshal message from consumer in kafka : ", err)
return &Message{}
}
return &receivedMessage
}
// registerCallback add callback to map with topic and message Type as a key
func registerCallback(topic, messageType string, callback func(message *Message, err error)) (isRegistered bool) {
if subscribeMap == nil {
subscribeMap = make(map[string]map[string]func(message *Message, err error))
}
if callbackMap, isTopic := subscribeMap[topic]; isTopic {
if _, isMsgType := callbackMap[messageType]; isMsgType {
return true
}
}
newCallbackMap := make(map[string]func(message *Message, err error))
newCallbackMap[messageType] = callback
subscribeMap[topic] = newCallbackMap
return false
}
// runCallback run callback function when receive a message
func runCallback(receivedMessage *Message, consumerMessage *sarama.ConsumerMessage) {
callback := subscribeMap[consumerMessage.Topic][receivedMessage.MessageType]
if callback == nil {
logrus.Error(fmt.Sprintf("callback not found for topic : %s, message type : %s", consumerMessage.Topic,
receivedMessage.MessageType))
return
}
go callback(&Message{
Topic: consumerMessage.Topic,
Message: receivedMessage.Message,
MessageType: receivedMessage.MessageType,
Service: receivedMessage.Service,
TraceId: receivedMessage.TraceId,
MessageId: receivedMessage.MessageId,
}, nil)
}
// processMessage process a message from kafka
func processMessage(consumerMessage *sarama.ConsumerMessage) {
receivedMessage := unmarshal(consumerMessage)
runCallback(receivedMessage, consumerMessage)
}
// processResponseMessage process a message from kafka for publish with responses function
func processResponseMessage(consumerMessage *sarama.ConsumerMessage, messageID string) bool {
receivedMessage := unmarshal(consumerMessage)
// check the request and response message ID should be equal
if messageID != receivedMessage.MessageId {
return false
}
runCallback(receivedMessage, consumerMessage)
return true
}
| deleteTopic | identifier_name |
kafka.go | // Copyright (c) 2019 AccelByte Inc. All Rights Reserved.
// This is licensed software from AccelByte Inc, for limitations
// and restrictions contact your company contract manager.
package messagebus
import (
"crypto/sha512"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/Shopify/sarama"
"github.com/sirupsen/logrus"
"time"
)
const saslScramAuth = "SASL-SCRAM"
var (
rtoError = errors.New("request time out in publish with response")
subscribeMap map[string]map[string]func(message *Message, err error)
publishResponseTimeout = time.Duration(10000)
topicTimeout = time.Duration(15000)
)
// KafkaClient wraps client's functionality for Kafka
type KafkaClient struct {
asyncProducer sarama.AsyncProducer
syncProducer sarama.SyncProducer
consumer sarama.Consumer
broker *sarama.Broker
realm string
}
type SecurityConfig struct {
AuthenticationType string
SASLUsername string
SASLPassword string
}
// PublishBuilderEntry defines data structure to use with Kafka client
type PublishBuilderEntry struct {
PublishBuilder
encoded []byte
err error
}
func (publishBuilderEntry *PublishBuilderEntry) ensureEncoded() {
if publishBuilderEntry.encoded == nil && publishBuilderEntry.err == nil {
if publishBuilderEntry.traceId == "" {
publishBuilderEntry.traceId = generateID()
}
if publishBuilderEntry.messageId == "" {
publishBuilderEntry.messageId = generateID()
}
publishBuilderEntry.encoded, publishBuilderEntry.err = json.Marshal(Message{
Message: publishBuilderEntry.message,
MessageType: publishBuilderEntry.messageType,
Service: publishBuilderEntry.service,
TraceId: publishBuilderEntry.traceId,
MessageId: publishBuilderEntry.messageId,
})
}
}
// Encode PublishBuilder into array of bytes
func (publishBuilderEntry *PublishBuilderEntry) Encode() ([]byte, error) {
publishBuilderEntry.ensureEncoded()
return publishBuilderEntry.encoded, publishBuilderEntry.err
}
// Length returns size of encoded value
func (publishBuilderEntry *PublishBuilderEntry) Length() int {
publishBuilderEntry.ensureEncoded()
return len(publishBuilderEntry.encoded)
}
// NewKafkaClient create a new instance of KafkaClient
func NewKafkaClient(brokerList []string, realm string) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, nil)
}
// NewKafkaClientWithAuthentication create a new instance of KafkaClient with Authentication
func NewKafkaClientWithAuthentication(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, securityConfig)
}
func initKafkaClient(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
config := sarama.NewConfig()
config.Version = sarama.V2_1_0_0
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(config, securityConfig)
}
// currently only support 1 message broker
broker := sarama.NewBroker(brokerList[0])
err := broker.Open(config)
if err != nil {
logrus.Error("unable to open kafka")
return nil, err
}
if connected, err := broker.Connected(); !connected {
logrus.Error("unable connect to kafka")
return nil, err
}
configAsync := sarama.NewConfig()
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configAsync, securityConfig)
}
asyncProducer, err := sarama.NewAsyncProducer(brokerList, configAsync)
if err != nil {
logrus.Error("unable to create async producer in kafka : ", err)
return nil, err
}
configSync := sarama.NewConfig()
configSync.Producer.Return.Successes = true
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configSync, securityConfig)
}
syncProducer, err := sarama.NewSyncProducer(brokerList, configSync)
if err != nil {
logrus.Error("unable to create sync producer in kafka : ", err)
return nil, err
}
consumer, err := sarama.NewConsumer(brokerList, config)
if err != nil {
logrus.Error("Unable to create consumer in kafka : ", err)
return nil, err
}
client := &KafkaClient{
asyncProducer,
syncProducer,
consumer,
broker,
realm,
}
subscribeMap = make(map[string]map[string]func(message *Message, err error))
go listenAsyncError(asyncProducer)
go cleanup(client)
return client, nil
}
func configureSASLScramAuthentication(config *sarama.Config, securityConfig *SecurityConfig) {
config.Net.SASL.Enable = true
config.Net.SASL.User = securityConfig.SASLUsername
config.Net.SASL.Password = securityConfig.SASLPassword
config.Net.SASL.Handshake = true
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &SCRAMClient{HashGeneratorFcn: sha512.New} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
config.Net.TLS.Enable = true
config.Net.TLS.Config = &tls.Config{}
}
func cleanup(client *KafkaClient) {
// define signal notify
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
func() {
for {
select {
case <-sig:
_ = client.asyncProducer.Close()
_ = client.syncProducer.Close()
_ = client.consumer.Close()
_ = client.broker.Close()
}
}
}()
}
func listenAsyncError(producer sarama.AsyncProducer) {
for err := range producer.Errors() {
logrus.Error("unable to publish message using async producer to kafka : ", err)
}
}
func constructTopic(realm, topic string) string {
return realm + separator + topic
}
// SetTimeout set listening timeout for publish with response
func (client *KafkaClient) SetTimeout(timeout int) {
publishResponseTimeout = time.Duration(timeout)
}
// PublishAsync push a message to message broker topic asynchronously
func (client *KafkaClient) PublishAsync(publishBuilder *PublishBuilder) {
// send message
client.asyncProducer.Input() <- &sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
}
}
// PublishSync push a message to message broker topic synchronously
func (client *KafkaClient) PublishSync(publishBuilder *PublishBuilder) error {
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
})
if err != nil {
logrus.Error("unable to publish message using sync producer to kafka : ", err)
}
return err
}
// PublishWithResponses push a message to message broker topic synchronously and waiting response consumer until timeout
// Intended for Point to Point Communication
func (client *KafkaClient) PublishWithResponses(publishBuilder *PublishBuilder) {
topic := constructTopic(client.realm, publishBuilder.topic)
// register callback into map with topic and message Type as a key
// need to create topic, if the keys doesn't exists
if registerCallback(topic, publishBuilder.messageType, publishBuilder.callback) {
logrus.Warn("topic and message type already registered")
} else {
defer func() {
err := deleteTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to delete topic in kafka : ", err)
return
}
}()
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for timeout := time.After(time.Duration(publishResponseTimeout) * time.Millisecond); ; {
select {
case consumerMessage := <-consumer.Messages():
if !processResponseMessage(consumerMessage, publishBuilder.messageId) {
continue
}
break
case err = <-consumer.Errors():
callback := publishBuilder.callback
callback(nil, err)
break
case <-timeout:
callback := publishBuilder.callback
callback(nil, rtoError)
break
}
}
}()
}
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: topic,
})
if err != nil {
logrus.Error("unable to publish message in publish with response to kafka : ", err)
return
}
}
// Register add subscriber for a topic and register callback function
func (client *KafkaClient) Register(subscribeBuilder *SubscribeBuilder) {
topic := constructTopic(client.realm, subscribeBuilder.topic)
if registerCallback(topic, subscribeBuilder.messageType, subscribeBuilder.callback) {
logrus.Error("topic and message type already registered")
return
}
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
// each topic would have his own goroutine
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for {
select {
case consumerMessage := <-consumer.Messages():
processMessage(consumerMessage)
break
case err = <-consumer.Errors():
callback := subscribeBuilder.callback
callback(nil, err)
break
}
}
}()
}
// createTopic create kafka topic
func createTopic(broker *sarama.Broker, topicName string) error {
topicDetail := &sarama.TopicDetail{}
topicDetail.NumPartitions = int32(1)
topicDetail.ReplicationFactor = int16(1)
topicDetail.ConfigEntries = make(map[string]*string)
topicDetails := make(map[string]*sarama.TopicDetail)
topicDetails[topicName] = topicDetail
request := sarama.CreateTopicsRequest{
Timeout: time.Second * 15,
TopicDetails: topicDetails,
}
_, err := broker.CreateTopics(&request)
return err
}
// deleteTopic delete kafka topic
func deleteTopic(broker *sarama.Broker, topicName string) error {
request := sarama.DeleteTopicsRequest{
Timeout: time.Second * topicTimeout,
Topics: []string{topicName},
}
_, err := broker.DeleteTopics(&request)
return err
}
// unmarshal unmarshal received message into message struct
func unmarshal(consumerMessage *sarama.ConsumerMessage) *Message {
var receivedMessage Message
err := json.Unmarshal(consumerMessage.Value, &receivedMessage)
if err != nil {
logrus.Error("unable to unmarshal message from consumer in kafka : ", err)
return &Message{}
}
return &receivedMessage
}
// registerCallback add callback to map with topic and message Type as a key
func registerCallback(topic, messageType string, callback func(message *Message, err error)) (isRegistered bool) |
// runCallback run callback function when receive a message
func runCallback(receivedMessage *Message, consumerMessage *sarama.ConsumerMessage) {
callback := subscribeMap[consumerMessage.Topic][receivedMessage.MessageType]
if callback == nil {
logrus.Error(fmt.Sprintf("callback not found for topic : %s, message type : %s", consumerMessage.Topic,
receivedMessage.MessageType))
return
}
go callback(&Message{
Topic: consumerMessage.Topic,
Message: receivedMessage.Message,
MessageType: receivedMessage.MessageType,
Service: receivedMessage.Service,
TraceId: receivedMessage.TraceId,
MessageId: receivedMessage.MessageId,
}, nil)
}
// processMessage process a message from kafka
func processMessage(consumerMessage *sarama.ConsumerMessage) {
receivedMessage := unmarshal(consumerMessage)
runCallback(receivedMessage, consumerMessage)
}
// processResponseMessage process a message from kafka for publish with responses function
func processResponseMessage(consumerMessage *sarama.ConsumerMessage, messageID string) bool {
receivedMessage := unmarshal(consumerMessage)
// check the request and response message ID should be equal
if messageID != receivedMessage.MessageId {
return false
}
runCallback(receivedMessage, consumerMessage)
return true
}
| {
if subscribeMap == nil {
subscribeMap = make(map[string]map[string]func(message *Message, err error))
}
if callbackMap, isTopic := subscribeMap[topic]; isTopic {
if _, isMsgType := callbackMap[messageType]; isMsgType {
return true
}
}
newCallbackMap := make(map[string]func(message *Message, err error))
newCallbackMap[messageType] = callback
subscribeMap[topic] = newCallbackMap
return false
} | identifier_body |
pathfinding.py | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import cv2
from pathfinder import pathfinder
import random
import time
import math
from multiprocessing import Process, Manager
import os
import argparse
from libtiff import TIFF
from osgeo import osr, ogr
# from tqdm import tqdm
import multiprocessing
DL = 1 # 1 道路
SX = 2 # 2 水系
FW = 3 # 3 房屋(居民用地)
JZYD = 4 # 4 建设用地
NT = 5 # 5 农田
LM = 6 # 6 林木
class1 = [5, 6]
class2 = [2]
class3 = [3, 1]
class4 = [4]
unique_tag = str(round(time.time()))[-5:]
def output_profile():
if not os.path.exists("output/"):
os.makedirs("output/")
output_file = open("output/profile.txt", 'w')
output_file.write("地图大小:100,100\n")
output_file.write("电压等级为:3\n")
output_file.write("起点为:0,0;终点为:100,100\n")
output_file.write("起点与终点的直线距离为:100\n")
output_file.write("规划路线总长为:100,共架设:10个塔基\n")
output_file.write("综合代价为:1000\n")
output_file.write("跨越水系次数:3\n")
output_file.write("跨越交通线次数:3\n")
output_file.close()
def road_extract(layer):
line_list = []
layer_shape = layer.GetExtent()
for i in range(0, layer.GetFeatureCount()):
feat = layer.GetFeature(i)
geom = feat.geometry()
if geom is None:
continue
line = []
if geom.GetGeometryCount() > 0:
for j in range(0, geom.GetGeometryCount()):
g = feat.geometry().GetGeometryRef(j)
for p in range(0, g.GetPointCount()):
pt = g.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
else:
for p in range(0, geom.GetPointCount()):
pt = geom.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
line_list.append(line)
return line_list
def point_generator(gridMap, type):
X, Y = gridMap.shape
while True:
p_x = random.randint(0, X)
p_y = random.randint(0, Y)
if gridMap[p_x][p_y] == type:
return p_x, p_y
def se_generator(gridMap):
s_x, s_y = point_generator(gridMap, 1)
e_x, e_y = point_generator(gridMap, 1)
return s_x, s_y, e_x, e_y
def run(_ver, _return_dict, start, end, neigh_range, gridMap, background, openset_size, length_part, degree_delta,
roads=None):
# time1 = time.time()
# gridMap = np.load('../res/sampled_sketch.npy')
# gridMap = np.load('map.npy')
# time2 = time.time()
# print("图片加载完毕,耗时{}".format(time2 - time1))
# maze = cv2.inRange(gridMap, 2.9, 3.1)
# start = (0, 0)
# end = (2500, 1000)
# neigh_range = (200, 250)
# sample_n = 20
# road1 = [(776, 523), (1425, 393), (2930, 122)]
# road2 = [(1285, 166), (1425, 393), (1880, 1075), (2020, 1973), (2086, 3737)]
# com_line = [(3125, 718), (900, 1700), (1000, 2265), (1166, 3337), (3060, 3142)]
# forbidden =
# finder = pathfinder(maze, neigh_range, sample_n, [road1, road2], [com_line], gridMap)
# print("maze shape:{},{}".format(gridMap.shape[0], gridMap.shape[1]))
# print("类型:起点:{},终点:{}".format(gridMap[start[1]][start[0]], gridMap[end[1]][end[0]]))
time3 = time.time()
plt.figure()
finder = pathfinder(_ver, gridMap, neigh_range, openset_size=openset_size, length_part=length_part,
degree_delta=degree_delta, roads=roads)
path, close_list = finder.astar(start, end)
if path is None:
# print("查找失败,无解")
for p in close_list:
cv2.circle(background, p, 5, (255, 0, 0), 2)
plt.imshow(background)
plt.savefig("output/{}/fail_fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:], _ver))
_return_dict[_ver] = 0
return False
time4 = time.time()
print("寻路完毕,耗时{}".format(time4 - time3))
p1 = path[0]
for p in close_list:
cv2.circle(background, p, 10, (0, 0, 255), 5)
for index, p in enumerate(path):
if index == 0:
continue
p2 = p
cv2.line(background, p1, p2, (255, 0, 0), 40)
p1 = p
for p in path:
cv2.circle(background, p, 10, (0, 0, 0), 40)
# for p in finder.close_set:
# cv2.circle(background, p, 3, (0, 255, 0))
plt.imshow(background)
plt.savefig(
"output/{}/fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:],
_ver))
np.save("output/{}/path__{}_ver{}.npy".format(unique_tag, str(round(time.time()))[-5:], _ver), np.array(path))
_return_dict[_ver] = 1
return 1
if __name__ == "__main__":
if not os.path.exists("output/"):
os.makedirs("output/")
if not os.path.exists("output/{}/".format(unique_tag)):
os.makedirs("output/{}/".format(unique_tag))
parser = argparse.ArgumentParser(description='电力寻路程序')
parser.add_argument("--gridMap", help="地图的路径", type=str)
parser.add_argument("--start", nargs="+", help="起点", type=int)
parser.add_argument("--end", nargs="+", help="终点", type=int)
parser.add_argument("-v", "--voltage", help="电压等级", type=int)
parser.add_argument("-b", "--buffer", help="搜索集大小", type=int)
parser.add_argument("-p", "--precision", help="搜索精确等级", type=int)
parser.add_argument("-r", "--road", help="道路SHP文件的路径", type=str)
args = parser.parse_args()
try:
start = (args.start[0], args.start[1])
except:
print("没有输入起点!")
try:
end = (args.end[0], args.end[1])
except:
print("没有输入终点!")
voltage_level = args.voltage
neigh_range = (500, 600)
if voltage_level == 35:
neigh_range = (100, 150)
elif voltage_level == 110:
neigh_range = (150, 250)
elif voltage_level == 220:
neigh_range = (250, 450)
elif voltage_level == 330:
neigh_range = (300, 400)
elif voltage_level == 500:
neigh_range = (350, 450)
elif voltage_level == 750:
neigh_range = (450, 500)
elif voltage_level == 1000:
neigh_range = (500, 600)
else:
raise Exception("电压等级输入错误!")
try:
openset_size = args.buffer
except:
print("请输入合适的搜索集大小!")
try:
precision = args.precision
except:
print("请输入搜索精确度!")
if precision == 1:
length_part = 5
degree_delta = 90 # 20
elif precision == 2:
length_part = 10
degree_delta = 90 # 40
elif precision == 3:
length_part = 10
degree_delta = 90 # 40
elif precision == 4:
length_part = 5
degree_delta = 45 # 40
elif precision == 5:
length_part = 10
degree_delta = 45 # 80
elif precision == 6:
length_part = 20
degree_delta = 45 # 160
elif precision == 7 | = 30 # 240
else:
length_part = 5
degree_delta = 90 # 20
print("读取TIFF文件中...")
try:
tif = TIFF.open(args.gridMap, mode='r') # 打開tiff文件進行讀取
except:
print("输入的路径有误!")
im = tif.read_image()
print("正在分析各类地块...")
class4 = cv2.inRange(im, 3.9, 4.1)
class1 = cv2.inRange(im, 4.9, 5.1) + cv2.inRange(im, 5.9, 6.1)
class2 = cv2.inRange(im, 1.9, 2.1)
class3 = cv2.inRange(im, 0.9, 1.1) + cv2.inRange(im, 2.9, 3.1)
print("正在生成各类地块预览图并保存...")
plt.figure(num='sketch', figsize=(16, 16))
plt.subplot(2, 2, 1) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class1') # 第一幅图片标题
plt.imshow(class1) # 绘制第一幅图片
plt.subplot(2, 2, 2) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class2') # 第一幅图片标题
plt.imshow(class2) # 绘制第一幅图片
plt.subplot(2, 2, 3) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class3') # 第一幅图片标题
plt.imshow(class3) # 绘制第一幅图片
plt.subplot(2, 2, 4) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class4') # 第一幅图片标题
plt.imshow(class4) # 绘制第一幅图片
plt.savefig("output/{}/preview_landtype_tag{}.png".format(unique_tag, unique_tag))
del class1, class2, class3, class4
print("正在生成背景预览图...")
plt.imsave("output/{}/background_tag{}.png".format(unique_tag, unique_tag), im)
background = cv2.imread("output/{}/background_tag{}.png".format(unique_tag, unique_tag))
background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
np.save("output/{}/sketch_tag{}.npy".format(unique_tag,unique_tag), im)
print("提取道路信息...")
driver = ogr.GetDriverByName("ESRI Shapefile")
filename = args.road
dataSource = driver.Open(filename, 0)
try:
layer = dataSource.GetLayer(0)
except:
print("输入的道路文件有误!")
roads = road_extract(layer)
print("共有{}条道路".format(len(roads)))
im.astype(int)
# processes = []
# for ver in range(6):
# processes.append(Process(target=run, args=(ver, start, end, neigh_range, im, background, openset_size, length_part, degree_delta, roads)))
# for ver in range(6):
# processes[ver].start()
# for ver in range(6):
# processes[ver].join()
# print('Process will start.')
# for ver in range(5):
# processes[ver].start()
# for ver in range(5):
# processes[ver].join()
# print('Process end.')
print("开始跑程序...")
count = 0
ver_count = 0
processes = []
# pbar = tqdm(total=4)
while True:
manager = Manager()
d = manager.dict()
for ver in range(ver_count, ver_count + 5):
p = Process(target=run, args=(
ver, d, start, end, neigh_range, im, background, openset_size, length_part, degree_delta,
roads))
processes.append(p)
p.start()
print("载入进程...")
for i in range(ver_count, ver_count + 5):
processes[i].join()
for result in d.values():
count = count + result
# if result==1:
# pbar.update(1)
ver_count = ver_count + 5
# print(d.keys())
# print("count大小:{}".format(count))
if count > 4:
# pbar.close()
break
print('Process end.')
print("结束")
| :
length_part = 20
degree_delta | conditional_block |
pathfinding.py | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import cv2
from pathfinder import pathfinder
import random
import time
import math
from multiprocessing import Process, Manager
import os
import argparse
from libtiff import TIFF
from osgeo import osr, ogr
# from tqdm import tqdm
import multiprocessing
DL = 1 # 1 道路
SX = 2 # 2 水系
FW = 3 # 3 房屋(居民用地)
JZYD = 4 # 4 建设用地
NT = 5 # 5 农田
LM = 6 # 6 林木
class1 = [5, 6]
class2 = [2]
class3 = [3, 1]
class4 = [4]
unique_tag = str(round(time.time()))[-5:]
def output_profile():
if not os.path.exists("output/"):
os.makedirs("output/")
output_file = open("output/profile.txt", 'w')
output_file.write("地图大小:100,100\n")
output_file.write("电压等级为:3\n")
output_file.write("起点为:0,0;终点为:100,100\n")
output_file.write("起点与终点的直线距离为:100\n")
output_file.write("规划路线总长为:100,共架设:10个塔基\n")
output_file.write("综合代价为:1000\n")
output_file.write("跨越水系次数:3\n")
output_file.write("跨越交通线次数:3\n")
output_file.close()
def road_extract(layer):
line_list = []
layer_shape = layer.GetExtent()
for i in range(0, layer.GetFeatureCount()):
feat = layer.GetFeature(i)
geom = feat.geometry()
if geom is None:
continue
line = []
if geom.GetGeometryCount() > 0:
for j in range(0, geom.GetGeometryCount()):
g = feat.geometry().GetGeometryRef(j)
for p in range(0, g.GetPointCount()):
pt = g.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
else:
for p in range(0, geom.GetPointCount()):
pt = geom.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
line_list.append(line)
return line_list
def point_generator(gridMap, type):
X, Y = gridMap.shape
while True:
p_x = random.randint(0, X)
p_y = random.randint(0, Y)
if gridMap[p_x][p_y] == type:
return p_x, p_y
def se_generator(gridMap):
s_x, s_y = point_generator(gridMap, 1)
e_x, e_y = point_generator(gridMap, 1)
return s_x, s_y, e_x, e_y
def run(_ver, _return_dict, start, end, | , gridMap, background, openset_size, length_part, degree_delta,
roads=None):
# time1 = time.time()
# gridMap = np.load('../res/sampled_sketch.npy')
# gridMap = np.load('map.npy')
# time2 = time.time()
# print("图片加载完毕,耗时{}".format(time2 - time1))
# maze = cv2.inRange(gridMap, 2.9, 3.1)
# start = (0, 0)
# end = (2500, 1000)
# neigh_range = (200, 250)
# sample_n = 20
# road1 = [(776, 523), (1425, 393), (2930, 122)]
# road2 = [(1285, 166), (1425, 393), (1880, 1075), (2020, 1973), (2086, 3737)]
# com_line = [(3125, 718), (900, 1700), (1000, 2265), (1166, 3337), (3060, 3142)]
# forbidden =
# finder = pathfinder(maze, neigh_range, sample_n, [road1, road2], [com_line], gridMap)
# print("maze shape:{},{}".format(gridMap.shape[0], gridMap.shape[1]))
# print("类型:起点:{},终点:{}".format(gridMap[start[1]][start[0]], gridMap[end[1]][end[0]]))
time3 = time.time()
plt.figure()
finder = pathfinder(_ver, gridMap, neigh_range, openset_size=openset_size, length_part=length_part,
degree_delta=degree_delta, roads=roads)
path, close_list = finder.astar(start, end)
if path is None:
# print("查找失败,无解")
for p in close_list:
cv2.circle(background, p, 5, (255, 0, 0), 2)
plt.imshow(background)
plt.savefig("output/{}/fail_fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:], _ver))
_return_dict[_ver] = 0
return False
time4 = time.time()
print("寻路完毕,耗时{}".format(time4 - time3))
p1 = path[0]
for p in close_list:
cv2.circle(background, p, 10, (0, 0, 255), 5)
for index, p in enumerate(path):
if index == 0:
continue
p2 = p
cv2.line(background, p1, p2, (255, 0, 0), 40)
p1 = p
for p in path:
cv2.circle(background, p, 10, (0, 0, 0), 40)
# for p in finder.close_set:
# cv2.circle(background, p, 3, (0, 255, 0))
plt.imshow(background)
plt.savefig(
"output/{}/fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:],
_ver))
np.save("output/{}/path__{}_ver{}.npy".format(unique_tag, str(round(time.time()))[-5:], _ver), np.array(path))
_return_dict[_ver] = 1
return 1
if __name__ == "__main__":
if not os.path.exists("output/"):
os.makedirs("output/")
if not os.path.exists("output/{}/".format(unique_tag)):
os.makedirs("output/{}/".format(unique_tag))
parser = argparse.ArgumentParser(description='电力寻路程序')
parser.add_argument("--gridMap", help="地图的路径", type=str)
parser.add_argument("--start", nargs="+", help="起点", type=int)
parser.add_argument("--end", nargs="+", help="终点", type=int)
parser.add_argument("-v", "--voltage", help="电压等级", type=int)
parser.add_argument("-b", "--buffer", help="搜索集大小", type=int)
parser.add_argument("-p", "--precision", help="搜索精确等级", type=int)
parser.add_argument("-r", "--road", help="道路SHP文件的路径", type=str)
args = parser.parse_args()
try:
start = (args.start[0], args.start[1])
except:
print("没有输入起点!")
try:
end = (args.end[0], args.end[1])
except:
print("没有输入终点!")
voltage_level = args.voltage
neigh_range = (500, 600)
if voltage_level == 35:
neigh_range = (100, 150)
elif voltage_level == 110:
neigh_range = (150, 250)
elif voltage_level == 220:
neigh_range = (250, 450)
elif voltage_level == 330:
neigh_range = (300, 400)
elif voltage_level == 500:
neigh_range = (350, 450)
elif voltage_level == 750:
neigh_range = (450, 500)
elif voltage_level == 1000:
neigh_range = (500, 600)
else:
raise Exception("电压等级输入错误!")
try:
openset_size = args.buffer
except:
print("请输入合适的搜索集大小!")
try:
precision = args.precision
except:
print("请输入搜索精确度!")
if precision == 1:
length_part = 5
degree_delta = 90 # 20
elif precision == 2:
length_part = 10
degree_delta = 90 # 40
elif precision == 3:
length_part = 10
degree_delta = 90 # 40
elif precision == 4:
length_part = 5
degree_delta = 45 # 40
elif precision == 5:
length_part = 10
degree_delta = 45 # 80
elif precision == 6:
length_part = 20
degree_delta = 45 # 160
elif precision == 7:
length_part = 20
degree_delta = 30 # 240
else:
length_part = 5
degree_delta = 90 # 20
print("读取TIFF文件中...")
try:
tif = TIFF.open(args.gridMap, mode='r') # 打開tiff文件進行讀取
except:
print("输入的路径有误!")
im = tif.read_image()
print("正在分析各类地块...")
class4 = cv2.inRange(im, 3.9, 4.1)
class1 = cv2.inRange(im, 4.9, 5.1) + cv2.inRange(im, 5.9, 6.1)
class2 = cv2.inRange(im, 1.9, 2.1)
class3 = cv2.inRange(im, 0.9, 1.1) + cv2.inRange(im, 2.9, 3.1)
print("正在生成各类地块预览图并保存...")
plt.figure(num='sketch', figsize=(16, 16))
plt.subplot(2, 2, 1) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class1') # 第一幅图片标题
plt.imshow(class1) # 绘制第一幅图片
plt.subplot(2, 2, 2) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class2') # 第一幅图片标题
plt.imshow(class2) # 绘制第一幅图片
plt.subplot(2, 2, 3) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class3') # 第一幅图片标题
plt.imshow(class3) # 绘制第一幅图片
plt.subplot(2, 2, 4) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class4') # 第一幅图片标题
plt.imshow(class4) # 绘制第一幅图片
plt.savefig("output/{}/preview_landtype_tag{}.png".format(unique_tag, unique_tag))
del class1, class2, class3, class4
print("正在生成背景预览图...")
plt.imsave("output/{}/background_tag{}.png".format(unique_tag, unique_tag), im)
background = cv2.imread("output/{}/background_tag{}.png".format(unique_tag, unique_tag))
background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
np.save("output/{}/sketch_tag{}.npy".format(unique_tag,unique_tag), im)
print("提取道路信息...")
driver = ogr.GetDriverByName("ESRI Shapefile")
filename = args.road
dataSource = driver.Open(filename, 0)
try:
layer = dataSource.GetLayer(0)
except:
print("输入的道路文件有误!")
roads = road_extract(layer)
print("共有{}条道路".format(len(roads)))
im.astype(int)
# processes = []
# for ver in range(6):
# processes.append(Process(target=run, args=(ver, start, end, neigh_range, im, background, openset_size, length_part, degree_delta, roads)))
# for ver in range(6):
# processes[ver].start()
# for ver in range(6):
# processes[ver].join()
# print('Process will start.')
# for ver in range(5):
# processes[ver].start()
# for ver in range(5):
# processes[ver].join()
# print('Process end.')
print("开始跑程序...")
count = 0
ver_count = 0
processes = []
# pbar = tqdm(total=4)
while True:
manager = Manager()
d = manager.dict()
for ver in range(ver_count, ver_count + 5):
p = Process(target=run, args=(
ver, d, start, end, neigh_range, im, background, openset_size, length_part, degree_delta,
roads))
processes.append(p)
p.start()
print("载入进程...")
for i in range(ver_count, ver_count + 5):
processes[i].join()
for result in d.values():
count = count + result
# if result==1:
# pbar.update(1)
ver_count = ver_count + 5
# print(d.keys())
# print("count大小:{}".format(count))
if count > 4:
# pbar.close()
break
print('Process end.')
print("结束")
| neigh_range | identifier_name |
pathfinding.py | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import cv2
from pathfinder import pathfinder
import random
import time
import math
from multiprocessing import Process, Manager
import os
import argparse
from libtiff import TIFF
from osgeo import osr, ogr
# from tqdm import tqdm
import multiprocessing
DL = 1 # 1 道路
SX = 2 # 2 水系
FW = 3 # 3 房屋(居民用地)
JZYD = 4 # 4 建设用地
NT = 5 # 5 农田
LM = 6 # 6 林木
class1 = [5, 6]
class2 = [2]
class3 = [3, 1]
class4 = [4]
unique_tag = str(round(time.time()))[-5:]
def output_profile():
if not os.path.exists("output/"):
os.makedirs("output/")
output_file = open("output/profile.txt", 'w')
output_file.write("地图大小:100,100\n")
output_file.write("电压等级为:3\n")
output_file.write("起点为:0,0;终点为:100,100\n")
output_file.write("起点与终点的直线距离为:100\n")
output_file.write("规划路线总长为:100,共架设:10个塔基\n")
output_file.write("综合代价为:1000\n")
output_file.write("跨越水系次数:3\n")
output_file.write("跨越交通线次数:3\n")
output_file.close()
def road_extract(layer):
line_list = []
layer_shape = layer.GetExtent()
for i in range(0, layer.GetFeatureCount()):
feat = layer.GetFeature(i)
geom = feat.geometry()
if geom is None:
continue
line = []
if geom.GetGeometryCount() > 0:
for j in range(0, geom.GetGeometryCount()):
g = feat.geometry().GetGeometryRef(j)
for p in range(0, g.GetPointCount()):
pt = g.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
else:
for p in range(0, geom.GetPointCount()):
pt = geom.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
line_list.append(line)
return line_list
def point_generator(gridMap, type):
X, Y = gridMap.shape
while True:
p_x = random.randint(0, X)
p_y = random.randint(0, Y)
if gridMap[p_x][p_y] == type:
return p_x, p_y
def s | t, end, neigh_range, gridMap, background, openset_size, length_part, degree_delta,
roads=None):
# time1 = time.time()
# gridMap = np.load('../res/sampled_sketch.npy')
# gridMap = np.load('map.npy')
# time2 = time.time()
# print("图片加载完毕,耗时{}".format(time2 - time1))
# maze = cv2.inRange(gridMap, 2.9, 3.1)
# start = (0, 0)
# end = (2500, 1000)
# neigh_range = (200, 250)
# sample_n = 20
# road1 = [(776, 523), (1425, 393), (2930, 122)]
# road2 = [(1285, 166), (1425, 393), (1880, 1075), (2020, 1973), (2086, 3737)]
# com_line = [(3125, 718), (900, 1700), (1000, 2265), (1166, 3337), (3060, 3142)]
# forbidden =
# finder = pathfinder(maze, neigh_range, sample_n, [road1, road2], [com_line], gridMap)
# print("maze shape:{},{}".format(gridMap.shape[0], gridMap.shape[1]))
# print("类型:起点:{},终点:{}".format(gridMap[start[1]][start[0]], gridMap[end[1]][end[0]]))
time3 = time.time()
plt.figure()
finder = pathfinder(_ver, gridMap, neigh_range, openset_size=openset_size, length_part=length_part,
degree_delta=degree_delta, roads=roads)
path, close_list = finder.astar(start, end)
if path is None:
# print("查找失败,无解")
for p in close_list:
cv2.circle(background, p, 5, (255, 0, 0), 2)
plt.imshow(background)
plt.savefig("output/{}/fail_fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:], _ver))
_return_dict[_ver] = 0
return False
time4 = time.time()
print("寻路完毕,耗时{}".format(time4 - time3))
p1 = path[0]
for p in close_list:
cv2.circle(background, p, 10, (0, 0, 255), 5)
for index, p in enumerate(path):
if index == 0:
continue
p2 = p
cv2.line(background, p1, p2, (255, 0, 0), 40)
p1 = p
for p in path:
cv2.circle(background, p, 10, (0, 0, 0), 40)
# for p in finder.close_set:
# cv2.circle(background, p, 3, (0, 255, 0))
plt.imshow(background)
plt.savefig(
"output/{}/fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:],
_ver))
np.save("output/{}/path__{}_ver{}.npy".format(unique_tag, str(round(time.time()))[-5:], _ver), np.array(path))
_return_dict[_ver] = 1
return 1
if __name__ == "__main__":
if not os.path.exists("output/"):
os.makedirs("output/")
if not os.path.exists("output/{}/".format(unique_tag)):
os.makedirs("output/{}/".format(unique_tag))
parser = argparse.ArgumentParser(description='电力寻路程序')
parser.add_argument("--gridMap", help="地图的路径", type=str)
parser.add_argument("--start", nargs="+", help="起点", type=int)
parser.add_argument("--end", nargs="+", help="终点", type=int)
parser.add_argument("-v", "--voltage", help="电压等级", type=int)
parser.add_argument("-b", "--buffer", help="搜索集大小", type=int)
parser.add_argument("-p", "--precision", help="搜索精确等级", type=int)
parser.add_argument("-r", "--road", help="道路SHP文件的路径", type=str)
args = parser.parse_args()
try:
start = (args.start[0], args.start[1])
except:
print("没有输入起点!")
try:
end = (args.end[0], args.end[1])
except:
print("没有输入终点!")
voltage_level = args.voltage
neigh_range = (500, 600)
if voltage_level == 35:
neigh_range = (100, 150)
elif voltage_level == 110:
neigh_range = (150, 250)
elif voltage_level == 220:
neigh_range = (250, 450)
elif voltage_level == 330:
neigh_range = (300, 400)
elif voltage_level == 500:
neigh_range = (350, 450)
elif voltage_level == 750:
neigh_range = (450, 500)
elif voltage_level == 1000:
neigh_range = (500, 600)
else:
raise Exception("电压等级输入错误!")
try:
openset_size = args.buffer
except:
print("请输入合适的搜索集大小!")
try:
precision = args.precision
except:
print("请输入搜索精确度!")
if precision == 1:
length_part = 5
degree_delta = 90 # 20
elif precision == 2:
length_part = 10
degree_delta = 90 # 40
elif precision == 3:
length_part = 10
degree_delta = 90 # 40
elif precision == 4:
length_part = 5
degree_delta = 45 # 40
elif precision == 5:
length_part = 10
degree_delta = 45 # 80
elif precision == 6:
length_part = 20
degree_delta = 45 # 160
elif precision == 7:
length_part = 20
degree_delta = 30 # 240
else:
length_part = 5
degree_delta = 90 # 20
print("读取TIFF文件中...")
try:
tif = TIFF.open(args.gridMap, mode='r') # 打開tiff文件進行讀取
except:
print("输入的路径有误!")
im = tif.read_image()
print("正在分析各类地块...")
class4 = cv2.inRange(im, 3.9, 4.1)
class1 = cv2.inRange(im, 4.9, 5.1) + cv2.inRange(im, 5.9, 6.1)
class2 = cv2.inRange(im, 1.9, 2.1)
class3 = cv2.inRange(im, 0.9, 1.1) + cv2.inRange(im, 2.9, 3.1)
print("正在生成各类地块预览图并保存...")
plt.figure(num='sketch', figsize=(16, 16))
plt.subplot(2, 2, 1) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class1') # 第一幅图片标题
plt.imshow(class1) # 绘制第一幅图片
plt.subplot(2, 2, 2) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class2') # 第一幅图片标题
plt.imshow(class2) # 绘制第一幅图片
plt.subplot(2, 2, 3) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class3') # 第一幅图片标题
plt.imshow(class3) # 绘制第一幅图片
plt.subplot(2, 2, 4) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class4') # 第一幅图片标题
plt.imshow(class4) # 绘制第一幅图片
plt.savefig("output/{}/preview_landtype_tag{}.png".format(unique_tag, unique_tag))
del class1, class2, class3, class4
print("正在生成背景预览图...")
plt.imsave("output/{}/background_tag{}.png".format(unique_tag, unique_tag), im)
background = cv2.imread("output/{}/background_tag{}.png".format(unique_tag, unique_tag))
background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
np.save("output/{}/sketch_tag{}.npy".format(unique_tag,unique_tag), im)
print("提取道路信息...")
driver = ogr.GetDriverByName("ESRI Shapefile")
filename = args.road
dataSource = driver.Open(filename, 0)
try:
layer = dataSource.GetLayer(0)
except:
print("输入的道路文件有误!")
roads = road_extract(layer)
print("共有{}条道路".format(len(roads)))
im.astype(int)
# processes = []
# for ver in range(6):
# processes.append(Process(target=run, args=(ver, start, end, neigh_range, im, background, openset_size, length_part, degree_delta, roads)))
# for ver in range(6):
# processes[ver].start()
# for ver in range(6):
# processes[ver].join()
# print('Process will start.')
# for ver in range(5):
# processes[ver].start()
# for ver in range(5):
# processes[ver].join()
# print('Process end.')
print("开始跑程序...")
count = 0
ver_count = 0
processes = []
# pbar = tqdm(total=4)
while True:
manager = Manager()
d = manager.dict()
for ver in range(ver_count, ver_count + 5):
p = Process(target=run, args=(
ver, d, start, end, neigh_range, im, background, openset_size, length_part, degree_delta,
roads))
processes.append(p)
p.start()
print("载入进程...")
for i in range(ver_count, ver_count + 5):
processes[i].join()
for result in d.values():
count = count + result
# if result==1:
# pbar.update(1)
ver_count = ver_count + 5
# print(d.keys())
# print("count大小:{}".format(count))
if count > 4:
# pbar.close()
break
print('Process end.')
print("结束")
| e_generator(gridMap):
s_x, s_y = point_generator(gridMap, 1)
e_x, e_y = point_generator(gridMap, 1)
return s_x, s_y, e_x, e_y
def run(_ver, _return_dict, star | identifier_body |
pathfinding.py | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import cv2
from pathfinder import pathfinder
import random
import time
import math
from multiprocessing import Process, Manager
import os
import argparse
from libtiff import TIFF
from osgeo import osr, ogr
# from tqdm import tqdm
import multiprocessing
DL = 1 # 1 道路
SX = 2 # 2 水系
FW = 3 # 3 房屋(居民用地)
JZYD = 4 # 4 建设用地
NT = 5 # 5 农田
LM = 6 # 6 林木
class1 = [5, 6]
class2 = [2]
class3 = [3, 1]
class4 = [4]
unique_tag = str(round(time.time()))[-5:]
def output_profile():
if not os.path.exists("output/"):
os.makedirs("output/")
output_file = open("output/profile.txt", 'w')
output_file.write("地图大小:100,100\n")
output_file.write("电压等级为:3\n")
output_file.write("起点为:0,0;终点为:100,100\n")
output_file.write("起点与终点的直线距离为:100\n")
output_file.write("规划路线总长为:100,共架设:10个塔基\n")
output_file.write("综合代价为:1000\n")
output_file.write("跨越水系次数:3\n")
output_file.write("跨越交通线次数:3\n")
output_file.close()
def road_extract(layer):
line_list = []
layer_shape = layer.GetExtent()
for i in range(0, layer.GetFeatureCount()):
feat = layer.GetFeature(i)
geom = feat.geometry()
if geom is None:
continue
line = []
if geom.GetGeometryCount() > 0:
for j in range(0, geom.GetGeometryCount()):
g = feat.geometry().GetGeometryRef(j)
for p in range(0, g.GetPointCount()):
pt = g.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
else:
for p in range(0, geom.GetPointCount()):
pt = geom.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
line_list.append(line)
return line_list
def point_generator(gridMap, type):
X, Y = gridMap.shape
while True:
p_x = random.randint(0, X)
p_y = random.randint(0, Y)
if gridMap[p_x][p_y] == type:
return p_x, p_y
def se_generator(gridMap):
s_x, s_y = point_generator(gridMap, 1)
e_x, e_y = point_generator(gridMap, 1)
return s_x, s_y, e_x, e_y
def run(_ver, _return_dict, start, end, neigh_range, gridMap, background, openset_size, length_part, degree_delta,
roads=None):
# time1 = time.time()
# gridMap = np.load('../res/sampled_sketch.npy')
# gridMap = np.load('map.npy')
# time2 = time.time()
# print("图片加载完毕,耗时{}".format(time2 - time1))
# maze = cv2.inRange(gridMap, 2.9, 3.1)
# start = (0, 0)
# end = (2500, 1000)
# neigh_range = (200, 250)
# sample_n = 20
# road1 = [(776, 523), (1425, 393), (2930, 122)]
# road2 = [(1285, 166), (1425, 393), (1880, 1075), (2020, 1973), (2086, 3737)]
# com_line = [(3125, 718), (900, 1700), (1000, 2265), (1166, 3337), (3060, 3142)]
# forbidden =
# finder = pathfinder(maze, neigh_range, sample_n, [road1, road2], [com_line], gridMap)
# print("maze shape:{},{}".format(gridMap.shape[0], gridMap.shape[1]))
# print("类型:起点:{},终点:{}".format(gridMap[start[1]][start[0]], gridMap[end[1]][end[0]]))
time3 = time.time()
plt.figure()
finder = pathfinder(_ver, gridMap, neigh_range, openset_size=openset_size, length_part=length_part,
degree_delta=degree_delta, roads=roads)
path, close_list = finder.astar(start, end)
if path is None:
# print("查找失败,无解")
for p in close_list:
cv2.circle(background, p, 5, (255, 0, 0), 2)
plt.imshow(background)
plt.savefig("output/{}/fail_fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:], _ver))
_return_dict[_ver] = 0
return False
time4 = time.time()
print("寻路完毕,耗时{}".format(time4 - time3))
p1 = path[0]
for p in close_list: | cv2.circle(background, p, 10, (0, 0, 255), 5)
for index, p in enumerate(path):
if index == 0:
continue
p2 = p
cv2.line(background, p1, p2, (255, 0, 0), 40)
p1 = p
for p in path:
cv2.circle(background, p, 10, (0, 0, 0), 40)
# for p in finder.close_set:
# cv2.circle(background, p, 3, (0, 255, 0))
plt.imshow(background)
plt.savefig(
"output/{}/fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:],
_ver))
np.save("output/{}/path__{}_ver{}.npy".format(unique_tag, str(round(time.time()))[-5:], _ver), np.array(path))
_return_dict[_ver] = 1
return 1
if __name__ == "__main__":
if not os.path.exists("output/"):
os.makedirs("output/")
if not os.path.exists("output/{}/".format(unique_tag)):
os.makedirs("output/{}/".format(unique_tag))
parser = argparse.ArgumentParser(description='电力寻路程序')
parser.add_argument("--gridMap", help="地图的路径", type=str)
parser.add_argument("--start", nargs="+", help="起点", type=int)
parser.add_argument("--end", nargs="+", help="终点", type=int)
parser.add_argument("-v", "--voltage", help="电压等级", type=int)
parser.add_argument("-b", "--buffer", help="搜索集大小", type=int)
parser.add_argument("-p", "--precision", help="搜索精确等级", type=int)
parser.add_argument("-r", "--road", help="道路SHP文件的路径", type=str)
args = parser.parse_args()
try:
start = (args.start[0], args.start[1])
except:
print("没有输入起点!")
try:
end = (args.end[0], args.end[1])
except:
print("没有输入终点!")
voltage_level = args.voltage
neigh_range = (500, 600)
if voltage_level == 35:
neigh_range = (100, 150)
elif voltage_level == 110:
neigh_range = (150, 250)
elif voltage_level == 220:
neigh_range = (250, 450)
elif voltage_level == 330:
neigh_range = (300, 400)
elif voltage_level == 500:
neigh_range = (350, 450)
elif voltage_level == 750:
neigh_range = (450, 500)
elif voltage_level == 1000:
neigh_range = (500, 600)
else:
raise Exception("电压等级输入错误!")
try:
openset_size = args.buffer
except:
print("请输入合适的搜索集大小!")
try:
precision = args.precision
except:
print("请输入搜索精确度!")
if precision == 1:
length_part = 5
degree_delta = 90 # 20
elif precision == 2:
length_part = 10
degree_delta = 90 # 40
elif precision == 3:
length_part = 10
degree_delta = 90 # 40
elif precision == 4:
length_part = 5
degree_delta = 45 # 40
elif precision == 5:
length_part = 10
degree_delta = 45 # 80
elif precision == 6:
length_part = 20
degree_delta = 45 # 160
elif precision == 7:
length_part = 20
degree_delta = 30 # 240
else:
length_part = 5
degree_delta = 90 # 20
print("读取TIFF文件中...")
try:
tif = TIFF.open(args.gridMap, mode='r') # 打開tiff文件進行讀取
except:
print("输入的路径有误!")
im = tif.read_image()
print("正在分析各类地块...")
class4 = cv2.inRange(im, 3.9, 4.1)
class1 = cv2.inRange(im, 4.9, 5.1) + cv2.inRange(im, 5.9, 6.1)
class2 = cv2.inRange(im, 1.9, 2.1)
class3 = cv2.inRange(im, 0.9, 1.1) + cv2.inRange(im, 2.9, 3.1)
print("正在生成各类地块预览图并保存...")
plt.figure(num='sketch', figsize=(16, 16))
plt.subplot(2, 2, 1) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class1') # 第一幅图片标题
plt.imshow(class1) # 绘制第一幅图片
plt.subplot(2, 2, 2) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class2') # 第一幅图片标题
plt.imshow(class2) # 绘制第一幅图片
plt.subplot(2, 2, 3) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class3') # 第一幅图片标题
plt.imshow(class3) # 绘制第一幅图片
plt.subplot(2, 2, 4) # 将窗口分为两行两列四个子图,则可显示四幅图片
plt.title('class4') # 第一幅图片标题
plt.imshow(class4) # 绘制第一幅图片
plt.savefig("output/{}/preview_landtype_tag{}.png".format(unique_tag, unique_tag))
del class1, class2, class3, class4
print("正在生成背景预览图...")
plt.imsave("output/{}/background_tag{}.png".format(unique_tag, unique_tag), im)
background = cv2.imread("output/{}/background_tag{}.png".format(unique_tag, unique_tag))
background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
np.save("output/{}/sketch_tag{}.npy".format(unique_tag,unique_tag), im)
print("提取道路信息...")
driver = ogr.GetDriverByName("ESRI Shapefile")
filename = args.road
dataSource = driver.Open(filename, 0)
try:
layer = dataSource.GetLayer(0)
except:
print("输入的道路文件有误!")
roads = road_extract(layer)
print("共有{}条道路".format(len(roads)))
im.astype(int)
# processes = []
# for ver in range(6):
# processes.append(Process(target=run, args=(ver, start, end, neigh_range, im, background, openset_size, length_part, degree_delta, roads)))
# for ver in range(6):
# processes[ver].start()
# for ver in range(6):
# processes[ver].join()
# print('Process will start.')
# for ver in range(5):
# processes[ver].start()
# for ver in range(5):
# processes[ver].join()
# print('Process end.')
print("开始跑程序...")
count = 0
ver_count = 0
processes = []
# pbar = tqdm(total=4)
while True:
manager = Manager()
d = manager.dict()
for ver in range(ver_count, ver_count + 5):
p = Process(target=run, args=(
ver, d, start, end, neigh_range, im, background, openset_size, length_part, degree_delta,
roads))
processes.append(p)
p.start()
print("载入进程...")
for i in range(ver_count, ver_count + 5):
processes[i].join()
for result in d.values():
count = count + result
# if result==1:
# pbar.update(1)
ver_count = ver_count + 5
# print(d.keys())
# print("count大小:{}".format(count))
if count > 4:
# pbar.close()
break
print('Process end.')
print("结束") | random_line_split | |
get_tips_nonlocal.py | #use the nonlocal topological method to detect tips.
# also records topologcially preserved values.
#Tim Tyree
#9.13.2021
from skimage import measure
from numba import jit, njit
from numba.typed import List
import numpy as np, os
from . import *
# from .intersection import *
from scipy.interpolate import interp2d
from .intersection import *
# from . import find_contours
# from ._utils_find_contours import *
# from ._utils_find_tips import *
# from ._find_tips import *
@njit#(cache=True)#, nogil = True)
def get_tips(contours_a,contours_b):
'''Must recieve contours that make no attempt to jump the boundaries
returns tips with indices of parent contours returned as the nested list, n_list.
tuple(contours_a),tuple(contours_b) are each tuples of m-by-2 np.ndarrays. m is any positive int.
each member is a 1D line.
get_tips returns all intersections of
contours_a with contours_b.
will throw a TypingError exception if either input tuple is empty.
if you get a nonsingular matrix error, make sure that you`re not comparing a contour to itself.'''
n_list = List(); x_list = List(); y_list = List();
ncr = len(contours_a); nci = len(contours_b)
for n1 in range(ncr):
for n2 in range(nci):
# for n1, c1 in enumerate(contours_a):
# for n2, c2 in enumerate(contours_b):
c1 = contours_a[n1]
c2 = contours_b[n2]
x1 = c1[:, 0]
y1 = c1[:, 1]
x2 = c2[:, 0]
y2 = c2[:, 1]
x,y = intersection(x1, y1, x2, y2)
if len(x)>0:
s = (n1,n2)
xl = list(x)
yl = list(y)
n_list.append(s)
x_list.append(xl)
y_list.append(yl)
return n_list, x_list, y_list
def enumerate_tips(tips):
'''returns n_list, x_list, y_list
gets tips into neat sorted python primitives'''
n_list = []; x_lst = []; y_lst = []
if len(tips)==0:
return None # [],[],[]
for n,q in enumerate(tips):
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
n_list.append(n)
x_lst.append(x)
y_lst.append(y)
return n_list, x_lst, y_lst
def list_tips(tips):
return tips_to_list(tips)
def tips_to_list(tips):
'''returns x_list, y_list
ets tips into neat sorted python primitives'''
x_lst = []; y_lst = []
if len(tips)==0:
return x_lst, y_lst#None # [],[]
for q in tips:
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
x_lst.append(x)
y_lst.append(y)
return x_lst, y_lst
def my_numba_list_to_python_list(numba_lst):
normal_list = []
for lst in numba_lst:
normal_list.append(list(lst))
return normal_list
@njit
def unpad_xy_position (position, pad_x, width, rejection_distance_x,
pad_y, height, rejection_distance_y):
x = unpad(X=position[0], pad=pad_x, width=width, rejection_distance=rejection_distance_x)
y = unpad(X=position[1], pad=pad_y, width=height, rejection_distance=rejection_distance_y)
return x,y
@njit
def unpad(X, pad, width, rejection_distance):
'''unpads 1 coordinate x or y for the padding:
[0... pad | pad ... width + pad | width + pad ... width + 2 * pad]
return -9999 if X is within rejection_distance of the edge,
return X if X is in [pad ... width + pad], which is if X is in the unpadded frame, which has width = width
else return X reflected onto the unpadded frame'''
P = rejection_distance
X -= pad
if X < -pad+P:
X = -9999 # throw out X later
elif X < 0:
X += width
if X > width+pad-P:
X = -9999 # throw out X later
elif X >= width:
X -= width
return X
# @njit
def textures_to_padded_textures(txt,dtexture_dt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
def matrices_to_padded_matrices(txt,dtexture_dt, pad):
'''txt and dtexture_dt are rank two tensors. i.e. the channel_no is 1.
large pad allows knots to be recorded right.
'''
# width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt, pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
# #informal test for ^that
# padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
# print(txt[0,0])
# print(padded_txt[...,2:5][pad,pad])
# @njit
def | (mat, pad, channel_no=3):
''''''
return np.pad(array = mat, pad_width = pad, mode = 'wrap')[...,pad:pad+channel_no]
# width, height = mat.shape[:2]
# padded_width = 512 + pad #pixels
# padded_mat = np.pad(array = mat, pad_width = pad, mode = 'wrap')
# return padded_mat[...,2:5]
# @njit
def pad_texture(txt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txta = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
padded_txtb = np.pad(array = txt[...,1], pad_width = pad, mode = 'wrap')
padded_txtc = np.pad(array = txt[...,2], pad_width = pad, mode = 'wrap')
# dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return np.array([padded_txta,padded_txtb,padded_txtc]).T
def map_pbc_tips_back(tips, pad, width, height, edge_tolerance, atol = 1e-11):
'''width and height are from the shape of the unpadded buffer.
TODO: get intersection to be njit compiled, then njit map_pbc_tips_back,
for which I'll need to return to using numba.typed.List() instead of [].'''
atol_squared = atol**2
min_dist_squared_init = width**2
s_tips, x_tips, y_tips = tips
s1_mapped_lst = []; s2_mapped_lst = [];
x_mapped_lst = []; y_mapped_lst = [];
# s1_mapped_lst = List(); s2_mapped_lst = List();
# x_mapped_lst = List(); y_mapped_lst = List();
for n, x in enumerate(x_tips):
y = y_tips[n]; s = s_tips[n]
S1, S2 = s_tips[n]
y = y_tips[n]
for X, Y in zip(x, y):
X = unpad(X=X, pad=pad, width=width , rejection_distance=edge_tolerance)
if not (X == -9999):
Y = unpad(X=Y, pad=pad, width=height, rejection_distance=edge_tolerance)
if not (Y == -9999):
# find the index and distance to the nearest tip already on the mapped_lsts
min_dist_squared = min_dist_squared_init; min_index = -1
for j0, (x0,y0) in enumerate(zip(x_mapped_lst,y_mapped_lst)):
# compute the distance between x0,y0 and X,Y
dist_squared = (X-x0)**2+(Y-y0)**2
# if ^that distance is the smallest, update min_dist with it
if dist_squared < min_dist_squared:
min_dist_squared = dist_squared
min_index = j0
#if this new tip is sufficiently far from all other recorded tips,
if min_dist_squared >= atol:
# then append the entry to all four lists
x_mapped_lst.append(X)
y_mapped_lst.append(Y)
lst_S1 = []#List()
lst_S1.append(S1)
lst_S2 = []#List()
lst_S2.append(S2)
s1_mapped_lst.append(lst_S1)
s2_mapped_lst.append(lst_S2)
else:
#just append to the previous entry in the s1 and s2 lists if the contour isn't already there
s1_mapped_lst[min_index].append(S1)
s2_mapped_lst[min_index].append(S2)
return s1_mapped_lst, s2_mapped_lst, x_mapped_lst, y_mapped_lst
#########################################################################
# Interpolating Electrophysiological state values to spiral tip locations
#########################################################################
def get_state_nearest(x, y, txt):
'''nearest local texture values, ignore any index errors and/or periodic boundary conditions'''
xint = np.round(x).astype(dtype=int)
yint = np.round(y).astype(dtype=int)
try:
state_nearest = list(txt[xint,yint])
except IndexError:
state_nearest = nanstate
return state_nearest
#for get_state_interpolated
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning, lineno=0, append=False)
#TODO: restrict ^this warning filter to onlyt get_state_interpolated
def get_state_interpolated(x, y, txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = 3, rad = 0.5, kind='linear'):
'''linear interpolation of local texture values to subpixel precision
using 2D linear interpolation with scipy.interpolate.interp2d.
channel_no must be len(nanstate).
for channel_no = 3, use nanstate = [np.nan,np.nan,np.nan].
rad = the pixel radius considered in interpolation.
kind can be "linear" or "cubic".
if kind="cubic", then set rad = 3.5.'''
state_interpolated = nanstate #.copy() if you change nanstate to a numpy array
try:
xlo = np.round(x-rad).astype(dtype=int)
ylo = np.round(y-rad).astype(dtype=int)
xhi = np.round(x+rad).astype(dtype=int)
yhi = np.round(y+rad).astype(dtype=int)
yloc = ycoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
xloc = xcoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
local_values = txt[ylo:yhi+1,xlo:xhi+1]
interp_foo = lambda x,y,zloc: interp2d(yloc,xloc,zloc,kind=kind)(y,x)
for c in range(channel_no):
zloc = local_values[...,c].flatten().copy()
state_interpolated[c] = float(interp_foo(x,y,zloc))
except IndexError:
pass
except RuntimeWarning:
pass
return state_interpolated
# ###############
# # Example Usage
# ###############
# #Caution! : check whether spiral tips are recorded as 'x': x coordinate or 'x': y coordinate
# #precompute the following the __padded__ coordinates
# xcoord_mesh, ycoord_mesh = np.meshgrid(np.arange(0,200),np.arange(0,200))
# x = 169.75099760896785
# y = 68.05364536542943
# nanstate = [np.nan,np.nan,np.nan]
# txt = np.stack([texture,texture,texture]).T
# print(
# get_state_nearest(x,y,txt)
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 3.5, kind='cubic')
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 0.5, kind='linear')
# )
##############################################
## Get Electrophysiological (EP) State Data #
##############################################
def get_states(x_values, y_values, txt, pad,
nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
'''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = txt
padded_txt = pad_matrix(txt, pad)
n_lst, x_lst, y_lst = tips_mapped
y_locations = np.array(flatten(x_lst))+pad#np.array(tips_mapped[2])
x_locations = np.array(flatten(y_lst))+pad#np.array(tips_mapped[3])
states_nearest = []; states_interpolated_linear = []; states_interpolated_cubic = [];
for x,y in zip(x_locations,y_locations):
state_nearest = get_state_nearest(x,y,txt=padded_txt)
state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 0.5, kind='linear')
state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 3.5, kind='cubic')
states_nearest.append(state_nearest)
states_interpolated_linear.append(state_interpolated_linear)
states_interpolated_cubic.append(state_interpolated_cubic)
return states_nearest, states_interpolated_linear, states_interpolated_cubic
def add_states(tips_mapped, states_EP):
tips_mapped = list(tips_mapped)
tips_mapped.extend(states_EP)
return tuple(tips_mapped)
def unwrap_EP(df,
EP_col_name = 'states_interpolated_linear',
drop_original_column=False):
'''If this function is slow, it may be a result of df[EP_col_name] containing strings.'''
EP_col_exists = EP_col_name in df.columns.values
if not EP_col_exists:
print(f"Caution! EP_col_name '{EP_col_exists}' does not exist. Returning input df.")
return df
else:
V_lst = []
f_lst = []
s_lst = []
for index, row in df.iterrows():
try:
V,f,s = row[EP_col_name]
except Exception as e:
V,f,s = eval(row[EP_col_name])
V_lst.append(V)
f_lst.append(f)
s_lst.append(s)
df['V'] = V_lst
df['f'] = f_lst
df['s'] = s_lst
df.drop(columns=[EP_col_name], inplace=True)
return df
@njit
def get_grad_direction(texture):
'''get the gradient direction field, N
out_Nx, out_Ny = get_grad_direction(texture)
'''
height, width = texture.shape
out_Nx = np.zeros_like(texture, dtype=np.float64)
out_Ny = np.zeros_like(texture, dtype=np.float64)
DX = 1/0.025; DY = 1/0.025;
for y in range(height):
for x in range(width):
up = _pbc(texture,y+1,x,height,width)
down = _pbc(texture,y-1,x,height,width)
left = _pbc(texture,y,x-1,height,width)
right = _pbc(texture,y,x+1,height,width)
Nx = (right-left)/DX
Ny = (up-down)/DY
norm = np.sqrt( Nx**2 + Ny**2 )
if norm == 0:
out_Nx[y,x] = -10.
out_Ny[y,x] = -10.
else:
out_Nx[y,x] = Nx/norm
out_Ny[y,x] = Ny/norm
return out_Nx, out_Ny
# ################################
# deprecated
# ################################
#deprecated - needs parameters
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.9,fully_connected='low',positive_orientation='low')
# return contours_raw,contours_inc
#tip locating for stable parameters
# img_inc = (img_nxt * ifilter(dtexture_dt[..., 0]))**2 #mask of instantaneously increasing voltages
# img_inc = filters.gaussian(img_inc,sigma=2., mode='wrap')
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# @jit
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# return contours_raw, contours_inc
# # @njit
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# x, y = intersection(x1, y1, x2, y2)
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# y = list(y)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# # tmp = intersection(x1, y1, x2, y2)
# x, y = intersection(x1, y1, x2, y2)
# # if a tip has been detected, save it and its contour ids
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# # x.sort()
# y = list(y)
# # y.sort()
# # tmp = (s,x,y)
# # tips.append(tmp)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_states(tips_mapped, txt, pad,
# nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
# '''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# # tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = pad_matrix(txt, pad)
# y_locations = np.array(tips_mapped[2]) + pad
# x_locations = np.array(tips_mapped[3]) + pad
#
# states_nearest = states_interpolated_linear = states_interpolated_cubic = [];
# for x,y in zip(x_locations,y_locations):
# state_nearest = get_state_nearest(x,y,txt=padded_txt)
# state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 0.5, kind='linear')
# state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 3.5, kind='cubic')
# states_nearest.append(state_nearest)
# states_interpolated_linear.append(state_interpolated_linear)
# states_interpolated_cubic.append(state_interpolated_cubic)
# return states_nearest, states_interpolated_linear, states_interpolated_cubic
| pad_matrix | identifier_name |
get_tips_nonlocal.py | #use the nonlocal topological method to detect tips.
# also records topologcially preserved values.
#Tim Tyree
#9.13.2021
from skimage import measure
from numba import jit, njit
from numba.typed import List
import numpy as np, os
from . import *
# from .intersection import *
from scipy.interpolate import interp2d
from .intersection import *
# from . import find_contours
# from ._utils_find_contours import *
# from ._utils_find_tips import *
# from ._find_tips import *
@njit#(cache=True)#, nogil = True)
def get_tips(contours_a,contours_b):
'''Must recieve contours that make no attempt to jump the boundaries
returns tips with indices of parent contours returned as the nested list, n_list.
tuple(contours_a),tuple(contours_b) are each tuples of m-by-2 np.ndarrays. m is any positive int.
each member is a 1D line.
get_tips returns all intersections of
contours_a with contours_b.
will throw a TypingError exception if either input tuple is empty.
if you get a nonsingular matrix error, make sure that you`re not comparing a contour to itself.'''
n_list = List(); x_list = List(); y_list = List();
ncr = len(contours_a); nci = len(contours_b)
for n1 in range(ncr):
for n2 in range(nci):
# for n1, c1 in enumerate(contours_a):
# for n2, c2 in enumerate(contours_b):
c1 = contours_a[n1]
c2 = contours_b[n2]
x1 = c1[:, 0]
y1 = c1[:, 1]
x2 = c2[:, 0]
y2 = c2[:, 1]
x,y = intersection(x1, y1, x2, y2)
if len(x)>0:
s = (n1,n2)
xl = list(x)
yl = list(y)
n_list.append(s)
x_list.append(xl)
y_list.append(yl)
return n_list, x_list, y_list
def enumerate_tips(tips):
'''returns n_list, x_list, y_list
gets tips into neat sorted python primitives'''
n_list = []; x_lst = []; y_lst = []
if len(tips)==0:
return None # [],[],[]
for n,q in enumerate(tips):
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
n_list.append(n)
x_lst.append(x)
y_lst.append(y)
return n_list, x_lst, y_lst
def list_tips(tips):
return tips_to_list(tips)
def tips_to_list(tips):
'''returns x_list, y_list
ets tips into neat sorted python primitives'''
x_lst = []; y_lst = []
if len(tips)==0:
return x_lst, y_lst#None # [],[]
for q in tips:
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
x_lst.append(x)
y_lst.append(y)
return x_lst, y_lst
def my_numba_list_to_python_list(numba_lst):
normal_list = []
for lst in numba_lst:
normal_list.append(list(lst))
return normal_list
@njit
def unpad_xy_position (position, pad_x, width, rejection_distance_x,
pad_y, height, rejection_distance_y):
x = unpad(X=position[0], pad=pad_x, width=width, rejection_distance=rejection_distance_x)
y = unpad(X=position[1], pad=pad_y, width=height, rejection_distance=rejection_distance_y)
return x,y
@njit
def unpad(X, pad, width, rejection_distance):
'''unpads 1 coordinate x or y for the padding:
[0... pad | pad ... width + pad | width + pad ... width + 2 * pad]
return -9999 if X is within rejection_distance of the edge,
return X if X is in [pad ... width + pad], which is if X is in the unpadded frame, which has width = width
else return X reflected onto the unpadded frame'''
P = rejection_distance
X -= pad
if X < -pad+P:
X = -9999 # throw out X later
elif X < 0:
X += width
if X > width+pad-P:
X = -9999 # throw out X later
elif X >= width:
X -= width
return X
# @njit
def textures_to_padded_textures(txt,dtexture_dt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
def matrices_to_padded_matrices(txt,dtexture_dt, pad):
'''txt and dtexture_dt are rank two tensors. i.e. the channel_no is 1.
large pad allows knots to be recorded right.
'''
# width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt, pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
# #informal test for ^that
# padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
# print(txt[0,0])
# print(padded_txt[...,2:5][pad,pad])
# @njit
def pad_matrix(mat, pad, channel_no=3):
''''''
return np.pad(array = mat, pad_width = pad, mode = 'wrap')[...,pad:pad+channel_no]
# width, height = mat.shape[:2]
# padded_width = 512 + pad #pixels
# padded_mat = np.pad(array = mat, pad_width = pad, mode = 'wrap')
# return padded_mat[...,2:5]
# @njit
def pad_texture(txt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txta = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
padded_txtb = np.pad(array = txt[...,1], pad_width = pad, mode = 'wrap')
padded_txtc = np.pad(array = txt[...,2], pad_width = pad, mode = 'wrap')
# dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return np.array([padded_txta,padded_txtb,padded_txtc]).T
def map_pbc_tips_back(tips, pad, width, height, edge_tolerance, atol = 1e-11):
'''width and height are from the shape of the unpadded buffer.
TODO: get intersection to be njit compiled, then njit map_pbc_tips_back,
for which I'll need to return to using numba.typed.List() instead of [].'''
atol_squared = atol**2
min_dist_squared_init = width**2
s_tips, x_tips, y_tips = tips
s1_mapped_lst = []; s2_mapped_lst = [];
x_mapped_lst = []; y_mapped_lst = [];
# s1_mapped_lst = List(); s2_mapped_lst = List();
# x_mapped_lst = List(); y_mapped_lst = List();
for n, x in enumerate(x_tips):
y = y_tips[n]; s = s_tips[n]
S1, S2 = s_tips[n]
y = y_tips[n]
for X, Y in zip(x, y):
X = unpad(X=X, pad=pad, width=width , rejection_distance=edge_tolerance)
if not (X == -9999):
Y = unpad(X=Y, pad=pad, width=height, rejection_distance=edge_tolerance)
if not (Y == -9999):
# find the index and distance to the nearest tip already on the mapped_lsts
min_dist_squared = min_dist_squared_init; min_index = -1
for j0, (x0,y0) in enumerate(zip(x_mapped_lst,y_mapped_lst)):
# compute the distance between x0,y0 and X,Y
dist_squared = (X-x0)**2+(Y-y0)**2
# if ^that distance is the smallest, update min_dist with it
if dist_squared < min_dist_squared:
min_dist_squared = dist_squared
min_index = j0
#if this new tip is sufficiently far from all other recorded tips,
if min_dist_squared >= atol:
# then append the entry to all four lists
x_mapped_lst.append(X)
y_mapped_lst.append(Y)
lst_S1 = []#List()
lst_S1.append(S1)
lst_S2 = []#List()
lst_S2.append(S2)
s1_mapped_lst.append(lst_S1)
s2_mapped_lst.append(lst_S2)
else:
#just append to the previous entry in the s1 and s2 lists if the contour isn't already there
s1_mapped_lst[min_index].append(S1)
s2_mapped_lst[min_index].append(S2)
return s1_mapped_lst, s2_mapped_lst, x_mapped_lst, y_mapped_lst
#########################################################################
# Interpolating Electrophysiological state values to spiral tip locations
#########################################################################
def get_state_nearest(x, y, txt):
'''nearest local texture values, ignore any index errors and/or periodic boundary conditions'''
xint = np.round(x).astype(dtype=int)
yint = np.round(y).astype(dtype=int)
try:
state_nearest = list(txt[xint,yint])
except IndexError:
state_nearest = nanstate
return state_nearest
#for get_state_interpolated
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning, lineno=0, append=False)
#TODO: restrict ^this warning filter to onlyt get_state_interpolated
def get_state_interpolated(x, y, txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = 3, rad = 0.5, kind='linear'):
'''linear interpolation of local texture values to subpixel precision
using 2D linear interpolation with scipy.interpolate.interp2d.
channel_no must be len(nanstate).
for channel_no = 3, use nanstate = [np.nan,np.nan,np.nan].
rad = the pixel radius considered in interpolation.
kind can be "linear" or "cubic".
if kind="cubic", then set rad = 3.5.'''
state_interpolated = nanstate #.copy() if you change nanstate to a numpy array
try:
xlo = np.round(x-rad).astype(dtype=int)
ylo = np.round(y-rad).astype(dtype=int)
xhi = np.round(x+rad).astype(dtype=int)
yhi = np.round(y+rad).astype(dtype=int)
yloc = ycoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
xloc = xcoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
local_values = txt[ylo:yhi+1,xlo:xhi+1]
interp_foo = lambda x,y,zloc: interp2d(yloc,xloc,zloc,kind=kind)(y,x)
for c in range(channel_no):
zloc = local_values[...,c].flatten().copy()
state_interpolated[c] = float(interp_foo(x,y,zloc))
except IndexError:
pass
except RuntimeWarning:
pass
return state_interpolated
# ###############
# # Example Usage
# ###############
# #Caution! : check whether spiral tips are recorded as 'x': x coordinate or 'x': y coordinate
# #precompute the following the __padded__ coordinates
# xcoord_mesh, ycoord_mesh = np.meshgrid(np.arange(0,200),np.arange(0,200))
# x = 169.75099760896785
# y = 68.05364536542943
# nanstate = [np.nan,np.nan,np.nan]
# txt = np.stack([texture,texture,texture]).T
# print(
# get_state_nearest(x,y,txt)
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 3.5, kind='cubic')
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 0.5, kind='linear')
# )
##############################################
## Get Electrophysiological (EP) State Data #
##############################################
def get_states(x_values, y_values, txt, pad,
nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
'''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = txt
padded_txt = pad_matrix(txt, pad)
n_lst, x_lst, y_lst = tips_mapped
y_locations = np.array(flatten(x_lst))+pad#np.array(tips_mapped[2])
x_locations = np.array(flatten(y_lst))+pad#np.array(tips_mapped[3])
states_nearest = []; states_interpolated_linear = []; states_interpolated_cubic = [];
for x,y in zip(x_locations,y_locations):
state_nearest = get_state_nearest(x,y,txt=padded_txt)
state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 0.5, kind='linear')
state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 3.5, kind='cubic')
states_nearest.append(state_nearest)
states_interpolated_linear.append(state_interpolated_linear)
states_interpolated_cubic.append(state_interpolated_cubic)
return states_nearest, states_interpolated_linear, states_interpolated_cubic
def add_states(tips_mapped, states_EP):
tips_mapped = list(tips_mapped)
tips_mapped.extend(states_EP)
return tuple(tips_mapped)
def unwrap_EP(df,
EP_col_name = 'states_interpolated_linear',
drop_original_column=False):
'''If this function is slow, it may be a result of df[EP_col_name] containing strings.'''
EP_col_exists = EP_col_name in df.columns.values
if not EP_col_exists:
print(f"Caution! EP_col_name '{EP_col_exists}' does not exist. Returning input df.")
return df
else:
V_lst = []
f_lst = []
s_lst = []
for index, row in df.iterrows():
try:
V,f,s = row[EP_col_name]
except Exception as e:
V,f,s = eval(row[EP_col_name])
V_lst.append(V)
f_lst.append(f)
s_lst.append(s)
df['V'] = V_lst
df['f'] = f_lst
df['s'] = s_lst
df.drop(columns=[EP_col_name], inplace=True)
return df
@njit
def get_grad_direction(texture):
|
# ################################
# deprecated
# ################################
#deprecated - needs parameters
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.9,fully_connected='low',positive_orientation='low')
# return contours_raw,contours_inc
#tip locating for stable parameters
# img_inc = (img_nxt * ifilter(dtexture_dt[..., 0]))**2 #mask of instantaneously increasing voltages
# img_inc = filters.gaussian(img_inc,sigma=2., mode='wrap')
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# @jit
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# return contours_raw, contours_inc
# # @njit
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# x, y = intersection(x1, y1, x2, y2)
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# y = list(y)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# # tmp = intersection(x1, y1, x2, y2)
# x, y = intersection(x1, y1, x2, y2)
# # if a tip has been detected, save it and its contour ids
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# # x.sort()
# y = list(y)
# # y.sort()
# # tmp = (s,x,y)
# # tips.append(tmp)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_states(tips_mapped, txt, pad,
# nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
# '''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# # tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = pad_matrix(txt, pad)
# y_locations = np.array(tips_mapped[2]) + pad
# x_locations = np.array(tips_mapped[3]) + pad
#
# states_nearest = states_interpolated_linear = states_interpolated_cubic = [];
# for x,y in zip(x_locations,y_locations):
# state_nearest = get_state_nearest(x,y,txt=padded_txt)
# state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 0.5, kind='linear')
# state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 3.5, kind='cubic')
# states_nearest.append(state_nearest)
# states_interpolated_linear.append(state_interpolated_linear)
# states_interpolated_cubic.append(state_interpolated_cubic)
# return states_nearest, states_interpolated_linear, states_interpolated_cubic
| '''get the gradient direction field, N
out_Nx, out_Ny = get_grad_direction(texture)
'''
height, width = texture.shape
out_Nx = np.zeros_like(texture, dtype=np.float64)
out_Ny = np.zeros_like(texture, dtype=np.float64)
DX = 1/0.025; DY = 1/0.025;
for y in range(height):
for x in range(width):
up = _pbc(texture,y+1,x,height,width)
down = _pbc(texture,y-1,x,height,width)
left = _pbc(texture,y,x-1,height,width)
right = _pbc(texture,y,x+1,height,width)
Nx = (right-left)/DX
Ny = (up-down)/DY
norm = np.sqrt( Nx**2 + Ny**2 )
if norm == 0:
out_Nx[y,x] = -10.
out_Ny[y,x] = -10.
else:
out_Nx[y,x] = Nx/norm
out_Ny[y,x] = Ny/norm
return out_Nx, out_Ny | identifier_body |
get_tips_nonlocal.py | #use the nonlocal topological method to detect tips.
# also records topologcially preserved values.
#Tim Tyree
#9.13.2021
from skimage import measure
from numba import jit, njit
from numba.typed import List
import numpy as np, os
from . import *
# from .intersection import *
from scipy.interpolate import interp2d
from .intersection import *
# from . import find_contours
# from ._utils_find_contours import *
# from ._utils_find_tips import *
# from ._find_tips import *
@njit#(cache=True)#, nogil = True)
def get_tips(contours_a,contours_b):
'''Must recieve contours that make no attempt to jump the boundaries
returns tips with indices of parent contours returned as the nested list, n_list.
tuple(contours_a),tuple(contours_b) are each tuples of m-by-2 np.ndarrays. m is any positive int.
each member is a 1D line.
get_tips returns all intersections of
contours_a with contours_b.
will throw a TypingError exception if either input tuple is empty.
if you get a nonsingular matrix error, make sure that you`re not comparing a contour to itself.'''
n_list = List(); x_list = List(); y_list = List();
ncr = len(contours_a); nci = len(contours_b)
for n1 in range(ncr):
for n2 in range(nci):
# for n1, c1 in enumerate(contours_a):
# for n2, c2 in enumerate(contours_b):
c1 = contours_a[n1]
c2 = contours_b[n2]
x1 = c1[:, 0]
y1 = c1[:, 1]
x2 = c2[:, 0]
y2 = c2[:, 1]
x,y = intersection(x1, y1, x2, y2)
if len(x)>0:
s = (n1,n2)
xl = list(x)
yl = list(y)
n_list.append(s)
x_list.append(xl)
y_list.append(yl)
return n_list, x_list, y_list
def enumerate_tips(tips):
'''returns n_list, x_list, y_list
gets tips into neat sorted python primitives'''
n_list = []; x_lst = []; y_lst = []
if len(tips)==0:
return None # [],[],[]
for n,q in enumerate(tips):
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
n_list.append(n)
x_lst.append(x)
y_lst.append(y)
return n_list, x_lst, y_lst
def list_tips(tips):
return tips_to_list(tips)
def tips_to_list(tips):
'''returns x_list, y_list
ets tips into neat sorted python primitives'''
x_lst = []; y_lst = []
if len(tips)==0:
return x_lst, y_lst#None # [],[]
for q in tips:
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
x_lst.append(x)
y_lst.append(y)
return x_lst, y_lst
def my_numba_list_to_python_list(numba_lst):
normal_list = []
for lst in numba_lst:
normal_list.append(list(lst))
return normal_list
@njit
def unpad_xy_position (position, pad_x, width, rejection_distance_x,
pad_y, height, rejection_distance_y):
x = unpad(X=position[0], pad=pad_x, width=width, rejection_distance=rejection_distance_x)
y = unpad(X=position[1], pad=pad_y, width=height, rejection_distance=rejection_distance_y)
return x,y
@njit
def unpad(X, pad, width, rejection_distance):
'''unpads 1 coordinate x or y for the padding:
[0... pad | pad ... width + pad | width + pad ... width + 2 * pad]
return -9999 if X is within rejection_distance of the edge,
return X if X is in [pad ... width + pad], which is if X is in the unpadded frame, which has width = width
else return X reflected onto the unpadded frame'''
P = rejection_distance
X -= pad
if X < -pad+P:
X = -9999 # throw out X later
elif X < 0:
X += width
if X > width+pad-P:
X = -9999 # throw out X later
elif X >= width:
X -= width
return X
# @njit
def textures_to_padded_textures(txt,dtexture_dt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
def matrices_to_padded_matrices(txt,dtexture_dt, pad):
'''txt and dtexture_dt are rank two tensors. i.e. the channel_no is 1.
large pad allows knots to be recorded right.
'''
# width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt, pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
# #informal test for ^that
# padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
# print(txt[0,0])
# print(padded_txt[...,2:5][pad,pad])
# @njit
def pad_matrix(mat, pad, channel_no=3):
''''''
return np.pad(array = mat, pad_width = pad, mode = 'wrap')[...,pad:pad+channel_no]
# width, height = mat.shape[:2]
# padded_width = 512 + pad #pixels
# padded_mat = np.pad(array = mat, pad_width = pad, mode = 'wrap')
# return padded_mat[...,2:5]
# @njit
def pad_texture(txt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txta = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
padded_txtb = np.pad(array = txt[...,1], pad_width = pad, mode = 'wrap')
padded_txtc = np.pad(array = txt[...,2], pad_width = pad, mode = 'wrap')
# dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return np.array([padded_txta,padded_txtb,padded_txtc]).T
def map_pbc_tips_back(tips, pad, width, height, edge_tolerance, atol = 1e-11):
'''width and height are from the shape of the unpadded buffer.
TODO: get intersection to be njit compiled, then njit map_pbc_tips_back,
for which I'll need to return to using numba.typed.List() instead of [].'''
atol_squared = atol**2
min_dist_squared_init = width**2
s_tips, x_tips, y_tips = tips
s1_mapped_lst = []; s2_mapped_lst = [];
x_mapped_lst = []; y_mapped_lst = [];
# s1_mapped_lst = List(); s2_mapped_lst = List();
# x_mapped_lst = List(); y_mapped_lst = List();
for n, x in enumerate(x_tips):
y = y_tips[n]; s = s_tips[n]
S1, S2 = s_tips[n]
y = y_tips[n]
for X, Y in zip(x, y):
X = unpad(X=X, pad=pad, width=width , rejection_distance=edge_tolerance)
if not (X == -9999):
Y = unpad(X=Y, pad=pad, width=height, rejection_distance=edge_tolerance)
if not (Y == -9999):
# find the index and distance to the nearest tip already on the mapped_lsts
min_dist_squared = min_dist_squared_init; min_index = -1
for j0, (x0,y0) in enumerate(zip(x_mapped_lst,y_mapped_lst)):
# compute the distance between x0,y0 and X,Y
dist_squared = (X-x0)**2+(Y-y0)**2
# if ^that distance is the smallest, update min_dist with it
if dist_squared < min_dist_squared:
min_dist_squared = dist_squared
min_index = j0
#if this new tip is sufficiently far from all other recorded tips,
if min_dist_squared >= atol:
# then append the entry to all four lists
x_mapped_lst.append(X)
y_mapped_lst.append(Y) | s1_mapped_lst.append(lst_S1)
s2_mapped_lst.append(lst_S2)
else:
#just append to the previous entry in the s1 and s2 lists if the contour isn't already there
s1_mapped_lst[min_index].append(S1)
s2_mapped_lst[min_index].append(S2)
return s1_mapped_lst, s2_mapped_lst, x_mapped_lst, y_mapped_lst
#########################################################################
# Interpolating Electrophysiological state values to spiral tip locations
#########################################################################
def get_state_nearest(x, y, txt):
'''nearest local texture values, ignore any index errors and/or periodic boundary conditions'''
xint = np.round(x).astype(dtype=int)
yint = np.round(y).astype(dtype=int)
try:
state_nearest = list(txt[xint,yint])
except IndexError:
state_nearest = nanstate
return state_nearest
#for get_state_interpolated
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning, lineno=0, append=False)
#TODO: restrict ^this warning filter to onlyt get_state_interpolated
def get_state_interpolated(x, y, txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = 3, rad = 0.5, kind='linear'):
'''linear interpolation of local texture values to subpixel precision
using 2D linear interpolation with scipy.interpolate.interp2d.
channel_no must be len(nanstate).
for channel_no = 3, use nanstate = [np.nan,np.nan,np.nan].
rad = the pixel radius considered in interpolation.
kind can be "linear" or "cubic".
if kind="cubic", then set rad = 3.5.'''
state_interpolated = nanstate #.copy() if you change nanstate to a numpy array
try:
xlo = np.round(x-rad).astype(dtype=int)
ylo = np.round(y-rad).astype(dtype=int)
xhi = np.round(x+rad).astype(dtype=int)
yhi = np.round(y+rad).astype(dtype=int)
yloc = ycoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
xloc = xcoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
local_values = txt[ylo:yhi+1,xlo:xhi+1]
interp_foo = lambda x,y,zloc: interp2d(yloc,xloc,zloc,kind=kind)(y,x)
for c in range(channel_no):
zloc = local_values[...,c].flatten().copy()
state_interpolated[c] = float(interp_foo(x,y,zloc))
except IndexError:
pass
except RuntimeWarning:
pass
return state_interpolated
# ###############
# # Example Usage
# ###############
# #Caution! : check whether spiral tips are recorded as 'x': x coordinate or 'x': y coordinate
# #precompute the following the __padded__ coordinates
# xcoord_mesh, ycoord_mesh = np.meshgrid(np.arange(0,200),np.arange(0,200))
# x = 169.75099760896785
# y = 68.05364536542943
# nanstate = [np.nan,np.nan,np.nan]
# txt = np.stack([texture,texture,texture]).T
# print(
# get_state_nearest(x,y,txt)
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 3.5, kind='cubic')
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 0.5, kind='linear')
# )
##############################################
## Get Electrophysiological (EP) State Data #
##############################################
def get_states(x_values, y_values, txt, pad,
nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
'''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = txt
padded_txt = pad_matrix(txt, pad)
n_lst, x_lst, y_lst = tips_mapped
y_locations = np.array(flatten(x_lst))+pad#np.array(tips_mapped[2])
x_locations = np.array(flatten(y_lst))+pad#np.array(tips_mapped[3])
states_nearest = []; states_interpolated_linear = []; states_interpolated_cubic = [];
for x,y in zip(x_locations,y_locations):
state_nearest = get_state_nearest(x,y,txt=padded_txt)
state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 0.5, kind='linear')
state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 3.5, kind='cubic')
states_nearest.append(state_nearest)
states_interpolated_linear.append(state_interpolated_linear)
states_interpolated_cubic.append(state_interpolated_cubic)
return states_nearest, states_interpolated_linear, states_interpolated_cubic
def add_states(tips_mapped, states_EP):
tips_mapped = list(tips_mapped)
tips_mapped.extend(states_EP)
return tuple(tips_mapped)
def unwrap_EP(df,
EP_col_name = 'states_interpolated_linear',
drop_original_column=False):
'''If this function is slow, it may be a result of df[EP_col_name] containing strings.'''
EP_col_exists = EP_col_name in df.columns.values
if not EP_col_exists:
print(f"Caution! EP_col_name '{EP_col_exists}' does not exist. Returning input df.")
return df
else:
V_lst = []
f_lst = []
s_lst = []
for index, row in df.iterrows():
try:
V,f,s = row[EP_col_name]
except Exception as e:
V,f,s = eval(row[EP_col_name])
V_lst.append(V)
f_lst.append(f)
s_lst.append(s)
df['V'] = V_lst
df['f'] = f_lst
df['s'] = s_lst
df.drop(columns=[EP_col_name], inplace=True)
return df
@njit
def get_grad_direction(texture):
'''get the gradient direction field, N
out_Nx, out_Ny = get_grad_direction(texture)
'''
height, width = texture.shape
out_Nx = np.zeros_like(texture, dtype=np.float64)
out_Ny = np.zeros_like(texture, dtype=np.float64)
DX = 1/0.025; DY = 1/0.025;
for y in range(height):
for x in range(width):
up = _pbc(texture,y+1,x,height,width)
down = _pbc(texture,y-1,x,height,width)
left = _pbc(texture,y,x-1,height,width)
right = _pbc(texture,y,x+1,height,width)
Nx = (right-left)/DX
Ny = (up-down)/DY
norm = np.sqrt( Nx**2 + Ny**2 )
if norm == 0:
out_Nx[y,x] = -10.
out_Ny[y,x] = -10.
else:
out_Nx[y,x] = Nx/norm
out_Ny[y,x] = Ny/norm
return out_Nx, out_Ny
# ################################
# deprecated
# ################################
#deprecated - needs parameters
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.9,fully_connected='low',positive_orientation='low')
# return contours_raw,contours_inc
#tip locating for stable parameters
# img_inc = (img_nxt * ifilter(dtexture_dt[..., 0]))**2 #mask of instantaneously increasing voltages
# img_inc = filters.gaussian(img_inc,sigma=2., mode='wrap')
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# @jit
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# return contours_raw, contours_inc
# # @njit
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# x, y = intersection(x1, y1, x2, y2)
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# y = list(y)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# # tmp = intersection(x1, y1, x2, y2)
# x, y = intersection(x1, y1, x2, y2)
# # if a tip has been detected, save it and its contour ids
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# # x.sort()
# y = list(y)
# # y.sort()
# # tmp = (s,x,y)
# # tips.append(tmp)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_states(tips_mapped, txt, pad,
# nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
# '''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# # tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = pad_matrix(txt, pad)
# y_locations = np.array(tips_mapped[2]) + pad
# x_locations = np.array(tips_mapped[3]) + pad
#
# states_nearest = states_interpolated_linear = states_interpolated_cubic = [];
# for x,y in zip(x_locations,y_locations):
# state_nearest = get_state_nearest(x,y,txt=padded_txt)
# state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 0.5, kind='linear')
# state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 3.5, kind='cubic')
# states_nearest.append(state_nearest)
# states_interpolated_linear.append(state_interpolated_linear)
# states_interpolated_cubic.append(state_interpolated_cubic)
# return states_nearest, states_interpolated_linear, states_interpolated_cubic | lst_S1 = []#List()
lst_S1.append(S1)
lst_S2 = []#List()
lst_S2.append(S2) | random_line_split |
get_tips_nonlocal.py | #use the nonlocal topological method to detect tips.
# also records topologcially preserved values.
#Tim Tyree
#9.13.2021
from skimage import measure
from numba import jit, njit
from numba.typed import List
import numpy as np, os
from . import *
# from .intersection import *
from scipy.interpolate import interp2d
from .intersection import *
# from . import find_contours
# from ._utils_find_contours import *
# from ._utils_find_tips import *
# from ._find_tips import *
@njit#(cache=True)#, nogil = True)
def get_tips(contours_a,contours_b):
'''Must recieve contours that make no attempt to jump the boundaries
returns tips with indices of parent contours returned as the nested list, n_list.
tuple(contours_a),tuple(contours_b) are each tuples of m-by-2 np.ndarrays. m is any positive int.
each member is a 1D line.
get_tips returns all intersections of
contours_a with contours_b.
will throw a TypingError exception if either input tuple is empty.
if you get a nonsingular matrix error, make sure that you`re not comparing a contour to itself.'''
n_list = List(); x_list = List(); y_list = List();
ncr = len(contours_a); nci = len(contours_b)
for n1 in range(ncr):
for n2 in range(nci):
# for n1, c1 in enumerate(contours_a):
# for n2, c2 in enumerate(contours_b):
c1 = contours_a[n1]
c2 = contours_b[n2]
x1 = c1[:, 0]
y1 = c1[:, 1]
x2 = c2[:, 0]
y2 = c2[:, 1]
x,y = intersection(x1, y1, x2, y2)
if len(x)>0:
s = (n1,n2)
xl = list(x)
yl = list(y)
n_list.append(s)
x_list.append(xl)
y_list.append(yl)
return n_list, x_list, y_list
def enumerate_tips(tips):
'''returns n_list, x_list, y_list
gets tips into neat sorted python primitives'''
n_list = []; x_lst = []; y_lst = []
if len(tips)==0:
return None # [],[],[]
for n,q in enumerate(tips):
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
n_list.append(n)
x_lst.append(x)
y_lst.append(y)
return n_list, x_lst, y_lst
def list_tips(tips):
return tips_to_list(tips)
def tips_to_list(tips):
'''returns x_list, y_list
ets tips into neat sorted python primitives'''
x_lst = []; y_lst = []
if len(tips)==0:
return x_lst, y_lst#None # [],[]
for q in tips:
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
x_lst.append(x)
y_lst.append(y)
return x_lst, y_lst
def my_numba_list_to_python_list(numba_lst):
normal_list = []
for lst in numba_lst:
normal_list.append(list(lst))
return normal_list
@njit
def unpad_xy_position (position, pad_x, width, rejection_distance_x,
pad_y, height, rejection_distance_y):
x = unpad(X=position[0], pad=pad_x, width=width, rejection_distance=rejection_distance_x)
y = unpad(X=position[1], pad=pad_y, width=height, rejection_distance=rejection_distance_y)
return x,y
@njit
def unpad(X, pad, width, rejection_distance):
'''unpads 1 coordinate x or y for the padding:
[0... pad | pad ... width + pad | width + pad ... width + 2 * pad]
return -9999 if X is within rejection_distance of the edge,
return X if X is in [pad ... width + pad], which is if X is in the unpadded frame, which has width = width
else return X reflected onto the unpadded frame'''
P = rejection_distance
X -= pad
if X < -pad+P:
X = -9999 # throw out X later
elif X < 0:
X += width
if X > width+pad-P:
X = -9999 # throw out X later
elif X >= width:
X -= width
return X
# @njit
def textures_to_padded_textures(txt,dtexture_dt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
def matrices_to_padded_matrices(txt,dtexture_dt, pad):
'''txt and dtexture_dt are rank two tensors. i.e. the channel_no is 1.
large pad allows knots to be recorded right.
'''
# width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt, pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
# #informal test for ^that
# padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
# print(txt[0,0])
# print(padded_txt[...,2:5][pad,pad])
# @njit
def pad_matrix(mat, pad, channel_no=3):
''''''
return np.pad(array = mat, pad_width = pad, mode = 'wrap')[...,pad:pad+channel_no]
# width, height = mat.shape[:2]
# padded_width = 512 + pad #pixels
# padded_mat = np.pad(array = mat, pad_width = pad, mode = 'wrap')
# return padded_mat[...,2:5]
# @njit
def pad_texture(txt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txta = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
padded_txtb = np.pad(array = txt[...,1], pad_width = pad, mode = 'wrap')
padded_txtc = np.pad(array = txt[...,2], pad_width = pad, mode = 'wrap')
# dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return np.array([padded_txta,padded_txtb,padded_txtc]).T
def map_pbc_tips_back(tips, pad, width, height, edge_tolerance, atol = 1e-11):
'''width and height are from the shape of the unpadded buffer.
TODO: get intersection to be njit compiled, then njit map_pbc_tips_back,
for which I'll need to return to using numba.typed.List() instead of [].'''
atol_squared = atol**2
min_dist_squared_init = width**2
s_tips, x_tips, y_tips = tips
s1_mapped_lst = []; s2_mapped_lst = [];
x_mapped_lst = []; y_mapped_lst = [];
# s1_mapped_lst = List(); s2_mapped_lst = List();
# x_mapped_lst = List(); y_mapped_lst = List();
for n, x in enumerate(x_tips):
y = y_tips[n]; s = s_tips[n]
S1, S2 = s_tips[n]
y = y_tips[n]
for X, Y in zip(x, y):
X = unpad(X=X, pad=pad, width=width , rejection_distance=edge_tolerance)
if not (X == -9999):
Y = unpad(X=Y, pad=pad, width=height, rejection_distance=edge_tolerance)
if not (Y == -9999):
# find the index and distance to the nearest tip already on the mapped_lsts
min_dist_squared = min_dist_squared_init; min_index = -1
for j0, (x0,y0) in enumerate(zip(x_mapped_lst,y_mapped_lst)):
# compute the distance between x0,y0 and X,Y
dist_squared = (X-x0)**2+(Y-y0)**2
# if ^that distance is the smallest, update min_dist with it
if dist_squared < min_dist_squared:
min_dist_squared = dist_squared
min_index = j0
#if this new tip is sufficiently far from all other recorded tips,
if min_dist_squared >= atol:
# then append the entry to all four lists
x_mapped_lst.append(X)
y_mapped_lst.append(Y)
lst_S1 = []#List()
lst_S1.append(S1)
lst_S2 = []#List()
lst_S2.append(S2)
s1_mapped_lst.append(lst_S1)
s2_mapped_lst.append(lst_S2)
else:
#just append to the previous entry in the s1 and s2 lists if the contour isn't already there
s1_mapped_lst[min_index].append(S1)
s2_mapped_lst[min_index].append(S2)
return s1_mapped_lst, s2_mapped_lst, x_mapped_lst, y_mapped_lst
#########################################################################
# Interpolating Electrophysiological state values to spiral tip locations
#########################################################################
def get_state_nearest(x, y, txt):
'''nearest local texture values, ignore any index errors and/or periodic boundary conditions'''
xint = np.round(x).astype(dtype=int)
yint = np.round(y).astype(dtype=int)
try:
state_nearest = list(txt[xint,yint])
except IndexError:
state_nearest = nanstate
return state_nearest
#for get_state_interpolated
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning, lineno=0, append=False)
#TODO: restrict ^this warning filter to onlyt get_state_interpolated
def get_state_interpolated(x, y, txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = 3, rad = 0.5, kind='linear'):
'''linear interpolation of local texture values to subpixel precision
using 2D linear interpolation with scipy.interpolate.interp2d.
channel_no must be len(nanstate).
for channel_no = 3, use nanstate = [np.nan,np.nan,np.nan].
rad = the pixel radius considered in interpolation.
kind can be "linear" or "cubic".
if kind="cubic", then set rad = 3.5.'''
state_interpolated = nanstate #.copy() if you change nanstate to a numpy array
try:
xlo = np.round(x-rad).astype(dtype=int)
ylo = np.round(y-rad).astype(dtype=int)
xhi = np.round(x+rad).astype(dtype=int)
yhi = np.round(y+rad).astype(dtype=int)
yloc = ycoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
xloc = xcoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
local_values = txt[ylo:yhi+1,xlo:xhi+1]
interp_foo = lambda x,y,zloc: interp2d(yloc,xloc,zloc,kind=kind)(y,x)
for c in range(channel_no):
zloc = local_values[...,c].flatten().copy()
state_interpolated[c] = float(interp_foo(x,y,zloc))
except IndexError:
pass
except RuntimeWarning:
pass
return state_interpolated
# ###############
# # Example Usage
# ###############
# #Caution! : check whether spiral tips are recorded as 'x': x coordinate or 'x': y coordinate
# #precompute the following the __padded__ coordinates
# xcoord_mesh, ycoord_mesh = np.meshgrid(np.arange(0,200),np.arange(0,200))
# x = 169.75099760896785
# y = 68.05364536542943
# nanstate = [np.nan,np.nan,np.nan]
# txt = np.stack([texture,texture,texture]).T
# print(
# get_state_nearest(x,y,txt)
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 3.5, kind='cubic')
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 0.5, kind='linear')
# )
##############################################
## Get Electrophysiological (EP) State Data #
##############################################
def get_states(x_values, y_values, txt, pad,
nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
'''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = txt
padded_txt = pad_matrix(txt, pad)
n_lst, x_lst, y_lst = tips_mapped
y_locations = np.array(flatten(x_lst))+pad#np.array(tips_mapped[2])
x_locations = np.array(flatten(y_lst))+pad#np.array(tips_mapped[3])
states_nearest = []; states_interpolated_linear = []; states_interpolated_cubic = [];
for x,y in zip(x_locations,y_locations):
state_nearest = get_state_nearest(x,y,txt=padded_txt)
state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 0.5, kind='linear')
state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 3.5, kind='cubic')
states_nearest.append(state_nearest)
states_interpolated_linear.append(state_interpolated_linear)
states_interpolated_cubic.append(state_interpolated_cubic)
return states_nearest, states_interpolated_linear, states_interpolated_cubic
def add_states(tips_mapped, states_EP):
tips_mapped = list(tips_mapped)
tips_mapped.extend(states_EP)
return tuple(tips_mapped)
def unwrap_EP(df,
EP_col_name = 'states_interpolated_linear',
drop_original_column=False):
'''If this function is slow, it may be a result of df[EP_col_name] containing strings.'''
EP_col_exists = EP_col_name in df.columns.values
if not EP_col_exists:
print(f"Caution! EP_col_name '{EP_col_exists}' does not exist. Returning input df.")
return df
else:
V_lst = []
f_lst = []
s_lst = []
for index, row in df.iterrows():
try:
V,f,s = row[EP_col_name]
except Exception as e:
V,f,s = eval(row[EP_col_name])
V_lst.append(V)
f_lst.append(f)
s_lst.append(s)
df['V'] = V_lst
df['f'] = f_lst
df['s'] = s_lst
df.drop(columns=[EP_col_name], inplace=True)
return df
@njit
def get_grad_direction(texture):
'''get the gradient direction field, N
out_Nx, out_Ny = get_grad_direction(texture)
'''
height, width = texture.shape
out_Nx = np.zeros_like(texture, dtype=np.float64)
out_Ny = np.zeros_like(texture, dtype=np.float64)
DX = 1/0.025; DY = 1/0.025;
for y in range(height):
for x in range(width):
up = _pbc(texture,y+1,x,height,width)
down = _pbc(texture,y-1,x,height,width)
left = _pbc(texture,y,x-1,height,width)
right = _pbc(texture,y,x+1,height,width)
Nx = (right-left)/DX
Ny = (up-down)/DY
norm = np.sqrt( Nx**2 + Ny**2 )
if norm == 0:
out_Nx[y,x] = -10.
out_Ny[y,x] = -10.
else:
|
return out_Nx, out_Ny
# ################################
# deprecated
# ################################
#deprecated - needs parameters
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.9,fully_connected='low',positive_orientation='low')
# return contours_raw,contours_inc
#tip locating for stable parameters
# img_inc = (img_nxt * ifilter(dtexture_dt[..., 0]))**2 #mask of instantaneously increasing voltages
# img_inc = filters.gaussian(img_inc,sigma=2., mode='wrap')
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# @jit
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# return contours_raw, contours_inc
# # @njit
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# x, y = intersection(x1, y1, x2, y2)
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# y = list(y)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# # tmp = intersection(x1, y1, x2, y2)
# x, y = intersection(x1, y1, x2, y2)
# # if a tip has been detected, save it and its contour ids
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# # x.sort()
# y = list(y)
# # y.sort()
# # tmp = (s,x,y)
# # tips.append(tmp)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_states(tips_mapped, txt, pad,
# nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
# '''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# # tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = pad_matrix(txt, pad)
# y_locations = np.array(tips_mapped[2]) + pad
# x_locations = np.array(tips_mapped[3]) + pad
#
# states_nearest = states_interpolated_linear = states_interpolated_cubic = [];
# for x,y in zip(x_locations,y_locations):
# state_nearest = get_state_nearest(x,y,txt=padded_txt)
# state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 0.5, kind='linear')
# state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 3.5, kind='cubic')
# states_nearest.append(state_nearest)
# states_interpolated_linear.append(state_interpolated_linear)
# states_interpolated_cubic.append(state_interpolated_cubic)
# return states_nearest, states_interpolated_linear, states_interpolated_cubic
| out_Nx[y,x] = Nx/norm
out_Ny[y,x] = Ny/norm | conditional_block |
details.js | $(function(){
$(".header").load("../html/common/header.html")
$(".footer").load("../html/common/footer02.html")
$(".souS").load("../html/common/shousuolan.html")
$(".nav").load("../html/common/nav2016.html")
})
$(function(e){
// $(".jqzoomDiv ul li").each(function(){
// $(this).mouseover(function(){
// $(".zoomShow").find("img").removeAttr("src").attr("src","../img/details/detalis-zoomdiv-zhong"+($(this).index()+1)+".JPG")
// $(".bigShow").find("img").removeAttr("src").attr("src","../img/details/detalis-zoomdiv-big"+($(this).index()+1)+".JPG")
// })
// })
// $(".bx-data-lt-2").mouseover(function(){
// $(".zoomShow-pud").css({"display":"block"})
// $(".bigShow").css({"display":"block"})
// })
// $(".bx-data-lt-2").mouseout(function(){
// $(".zoomShow-pud").css({"display":"none"})
// $(".bigShow").css({"display":"none"})
// })
// $(".zoomShow").mousemove(function(e){
//// var iLeft=e.pageX-$(window).scrollLeft()-30-$(".bx-data-lt-1").offset().left-($(".zoomShow-pud").width()/2)
//// var iTop=e.clientY-$(window).scrollTop()+$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
// var iLeft=e.clientX-$(window).scrollLeft()-$(".bx-data-lt-2").offset().left-($(".zoomShow-pud").width()/2)
// var iTop=e.clientY+$(window).scrollTop()-$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
// var iWidht=$(".bx-data-lt-2").width()-$(".zoomShow-pud").width()
// var iHeight=$(".bx-data-lt-2").height()-$(".zoomShow-pud").height()
// if(iLeft<0) {
// iLeft = 0;
// } else if(iLeft>iWidht) {
// iLeft = iWidht;
// }
//
// if(iTop<0) {
// iTop = 0;
// } else if(iTop>iHeight) {
// iTop = iHeight;
// }
// $(".zoomShow-pud").css({"left":iLeft,"top":iTop})
// $(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
// //不想写了就用死值了
// console.log(iTop)
// })
})
$(function(){
//json加载图片与页面
var goodid
$.get("../data/details/imgs.json",function(data){
var con=data
for(var i in con){
$(".bx-bo-left-con").append('<ul><li><a href="details.html"><img src='+con[i].src+'/></a></li><li class="left-conli-txt"><a href="##" >'+con[i].userName+'</a></li><li class="left-conli-val">'+con[i].price+'</li></ul>')
}
})
// 热门商品组
// for(var i=1;i<=4;i++){
// $(".right-bom-dtu p").append("<img src='../img/details/right-bom"+i+".jpg'/>")
// }
//下方商品详情图片
$.get("../data/details/comlist.json",function(data){
if(parseInt($.cookie("comid"))%2==0){
var cl=data.comList2
}else{
var cl=data.comList1
}
$("#listName").html(cl.comname)
$("#bx-tit>P").html(cl.comname)
$(".pval-ms").find("em").html(cl.price)
$(".pval-ms").find("del").html(cl.yPrice)
$(".data-attr2").eq(0).find("span").html(cl.color)
$(".data-attr2").eq(1).find("span").html(cl.size)
for(var i=1;i<=5;i++){
$(".jqzoomDiv>ul").find("li").eq(i-1).find("img").attr("src",cl.imgSrc+"detalis-zoomdiv"+i+".JPG")
}
for(var i=1;i<=4;i++){
$(".right-bom-dtu p").append("<img src='"+cl.imgSrc+"right-bom"+i+".jpg'/>")
}
//放大镜
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong1.JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big1.JPG")
$(".jqzoomDiv ul li").each(function(){
$(this).mouseover(function(){
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong"+($(this).index()+1)+".JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big"+($(this).index()+1)+".JPG")
})
})
$(".bx-data-lt-2").mouseover(function(){
$(".zoomShow-pud").css({"display":"block"})
$(".bigShow").css({"display":"block"})
})
$(".bx-data-lt-2").mouseout(function(){
$(".zoomShow-pud").css({"display":"none"})
$(".bigShow").css({"display":"none"})
})
$(".zoomShow").mousemove(function(e){
// var iLeft=e.pageX-$(window).scrollLeft()-30-$(".bx-data-lt-1").offset().left-($(".zoomShow-pud").width()/2)
// var iTop=e.clientY-$(window).scrollTop()+$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iLeft=e.clientX-$(window).scrollLeft()-$(".bx-data-lt-2").offset().left-($(".zoomShow-pud").width()/2)
var iTop=e.clientY+$(window).scrollTop()-$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iWidht=$(".bx-data-lt-2").width()-$(".zoomShow-pud").width()
var iHeight=$(".bx-data-lt-2").height()-$(".zoomShow-pud").height()
if(iLeft<0) {
iLeft = 0;
} else if(iLeft>iWidht) {
iLeft = iWidht;
}
if(iTop<0) {
iTop = 0;
} else if(iTop>iHeight) {
iTop = iHeight;
}
$(".zoomShow-pud").css({"left":iLeft,"top":iTop})
$(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
//不想写了就用死值了
//console.log(iTop)
})
//点击按钮事件
downBtn(cl)
})
})
function downBtn(cl){
//从cookie中 取出数据的正确方式
//console.log(JSON.parse($.cookie("carList")).ID1.iNum)
goodid=cl.id
| min=parseInt(time/3600000*60)%60
var sec=parseInt(time/3600000*24*60)%60
timer.find("em").eq(0).html(hour)
timer.find("em").eq(1).html(min)
timer.find("em").eq(2).html(sec)
// console.log(time)
},500)
})
//添加商品按钮
$(function(){
var oAdd=$(".buyAdd")
var oRed=$(".buyReduce")
var oValue=$("#buyNum")
oAdd.css("cursor","pointer")
oRed.css("cursor","pointer")
var val=oValue.val()
oAdd.mousedown(function(){
val++
oValue.val(val)
$(this).css("color","red")
})
oAdd.mouseup(function(){
$(this).css("color","#000000")
})
oRed.mousedown(function(){
val--
if(val<1){
val=1
}
oValue.val(val)
$(this).css("color","red")
})
oRed.mouseup(function(){
$(this).css("color","#000000")
})
oValue.blur(function(){
var rex=/\d/gi
//console.log(rex.test(oValue.val()))
//console.log(oValue.val())
if(!rex.test(oValue.val())){
oValue.val(val)
}
})
})
//点击加入购物车事件
$(function(){
// $(".addCart").css("cursor","pointer")
// console.log($.cookie("carList"))
// var num
// if($.cookie("carList")){
// num=JSON.parse($.cookie("carList")).num
// }else{
// num=0
// }
// $(".addCart").mousedown(function(){
//
// //console.log($("#buyNum").val())
// if(parseInt($("#buyNum").val())){
// //判断添加数量的值是否为0
// var com1={}
// com1.userName="BURBERRY 巴宝莉 新品女款单肩包4014739 2160T"
// com1.imgSrc="../img/detalis-zoomdiv-zhong1.JPG"
// com1.id=1
// com1.color="卡其"
// com1.size="//"
// num+=parseInt($("#buyNum").val())
//// if(com1){
//// num+=parseInt($("#buyNum").val())
//// }else{
//// num=parseInt($("#buyNum").val())
//// }
// com1.num=num
// $.cookie("carList",JSON.stringify(com1),{expires:7,path:"/"})
// }
// //上方导航栏nav中购物车应显示的值
// var value=0
// if($.cookie("carList")){
// value=parseInt(JSON.parse($.cookie("carList")).num)
// }else{
// value=0
// }
// $("#carNum").html("("+value+")")
// })
})
$(function(){
var oInput=$("#key")
oInput.focus(function(){
$("#tipUl").css({"display":"block"})
})
oInput.blur(function(){
$("#tipUl").css({"display":"none"})
})
oInput.on("keydown",function() {
$.ajax({
url: "https://sp0.baidu.com/5a1Fazu8AA54nxGko9WTAnF6hhy/su?wd="+oInput.val()+"&json=1&p=3&t",
dataType: "jsonp",
jsonp: "cb",
success:function(data){
var lists = data.g;
var oUl = $("#tipUl");
oUl.html("");
for(var i in lists) {
var oLi = $("<li></li>");
oLi.html(lists[i].q);
oUl.append(oLi);
}
$("#tipUl li").hover(function(){
$(this).css({color:"red",background:"#999"})
},function(){
$(this).css({color:"#000",background:"#fff"})
})
var height=$("#tipUl").height()
$("#tipUl").css({"bottom":-$("#tipUl").height()-3})
}
})
}
)
$("#tipUl").find("li").each(function(){
$(this).on("click",function(){
oInput.val($(this).text())
})
})
})
//添加热门商品的代码
$(function(){
$.get("../data/list/hot.json",function(data){
var re=data
// console.log(re[i])
for(var i in re){
// console.log(re[i])
$(".s-Hotv3-rt>ul").append(
"<li>"+
"<div class='hotv3-rtlia'>"+
"<a href='details.html'>"+
"<img src="+re[i].imgSrc+" />"+
"</a>"+
"</div>"+
"<div class='hotv3-rtlib'>"+
"<div class='hotv3-rtlib-tit'><a href='details.html' target='_blank'>"+re[i].userName+"</a></div>"+
"<div class='hotv3-rtlib-val'>"+re[i].price+"</div>"+
"<div class='hotv3-rtlib-btn'>"+
"<a href='details.html' target='_blank'>"+
"立即抢购"+
"</a>"+
"</div>"+
"</div>"+
"</li>"
)
}
})
})
| $(".addCart").css("cursor","pointer")
var num1=0;num2=0;
if($.cookie("carList1")){
num1=parseInt(JSON.parse($.cookie("carList1")).iNum)
}
if($.cookie("carList2")){
num2=parseInt(JSON.parse($.cookie("carList2")).iNum)
}
//判断是否存在cookie
// if($.cookie("carList")){
// for(var m in JSON.parse($.cookie("carList"))){
// num+=parseInt(JSON.parse($.cookie("carList"))[m].iNum)
// //这里有bug每次累加是2个加起来的和
// }
// }else{
// num=0
// }
var com1= $.cookie("carList")?JSON.parse($.cookie("carList")):{}
com1={
userName:cl.comname,
imgSrc:cl.imgSrc+"detalis-zoomdiv-zhong1.JPG",
id:cl.id,
color:cl.color,
size:cl.size,
price:cl.price,
//iNum:num
}
// var value=0
// if($.cookie("carList")){
// for(var i in JSON.parse($.cookie("carList"))){
// value +=parseInt(JSON.parse($.cookie("carList"))[i].iNum)
// }
// //value=parseInt(JSON.parse($.cookie("carList")).ID1.iNum)+parseInt(JSON.parse($.cookie("carList")).ID2.iNum)
// }
$(".addCart").mousedown(function(){
//console.log($("#buyNum").val())
if(parseInt($("#buyNum").val())){
//判断添加数量的值是否为0
if(cl.id=="1"){
num1+=parseInt($("#buyNum").val())
com1.iNum=num1
}else{
num2+=parseInt($("#buyNum").val())
com1.iNum=num2
}
//计算购物车处的值
$.cookie("carList"+cl.id,JSON.stringify(com1),{expires:7,path:"/"})
//console.log($.cookie("carList1"))
//console.log($.cookie("carList2"))
var value=0
value=num1+num2
// if($.cookie("carList")){
// value+=parseInt($("#buyNum").val())
// }else{
// value=0
// }
$("#carNum").html("("+value+")")
}
})
}
//上方导航栏nav中购物车应显示的值
//活动倒计时
$(function(){
var timer=$(".time-end span")
var time=24*3600*1000 //活动倒计时
setInterval(function(){
time-=1000
var hour=parseInt(time/3600000)%24
var | identifier_body |
details.js | $(function(){
$(".header").load("../html/common/header.html")
$(".footer").load("../html/common/footer02.html")
$(".souS").load("../html/common/shousuolan.html")
$(".nav").load("../html/common/nav2016.html")
})
$(function(e){
// $(".jqzoomDiv ul li").each(function(){
// $(this).mouseover(function(){
// $(".zoomShow").find("img").removeAttr("src").attr("src","../img/details/detalis-zoomdiv-zhong"+($(this).index()+1)+".JPG")
// $(".bigShow").find("img").removeAttr("src").attr("src","../img/details/detalis-zoomdiv-big"+($(this).index()+1)+".JPG")
// })
// })
// $(".bx-data-lt-2").mouseover(function(){
// $(".zoomShow-pud").css({"display":"block"})
// $(".bigShow").css({"display":"block"})
// })
// $(".bx-data-lt-2").mouseout(function(){
// $(".zoomShow-pud").css({"display":"none"})
// $(".bigShow").css({"display":"none"})
// })
// $(".zoomShow").mousemove(function(e){
//// var iLeft=e.pageX-$(window).scrollLeft()-30-$(".bx-data-lt-1").offset().left-($(".zoomShow-pud").width()/2)
//// var iTop=e.clientY-$(window).scrollTop()+$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
// var iLeft=e.clientX-$(window).scrollLeft()-$(".bx-data-lt-2").offset().left-($(".zoomShow-pud").width()/2)
// var iTop=e.clientY+$(window).scrollTop()-$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
// var iWidht=$(".bx-data-lt-2").width()-$(".zoomShow-pud").width()
// var iHeight=$(".bx-data-lt-2").height()-$(".zoomShow-pud").height()
// if(iLeft<0) {
// iLeft = 0;
// } else if(iLeft>iWidht) {
// iLeft = iWidht;
// }
//
// if(iTop<0) {
// iTop = 0;
// } else if(iTop>iHeight) {
// iTop = iHeight;
// }
// $(".zoomShow-pud").css({"left":iLeft,"top":iTop})
// $(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
// //不想写了就用死值了
// console.log(iTop)
// })
})
$(function(){
//json加载图片与页面
var goodid
$.get("../data/details/imgs.json",function(data){
var con=data
for(var i in con){
$(".bx-bo-left-con").append('<ul><li><a href="details.html"><img src='+con[i].src+'/></a></li><li class="left-conli-txt"><a href="##" >'+con[i].userName+'</a></li><li class="left-conli-val">'+con[i].price+'</li></ul>')
}
})
// 热门商品组
// for(var i=1;i<=4;i++){
// $(".right-bom-dtu p").append("<img src='../img/details/right-bom"+i+".jpg'/>")
// }
//下方商品详情图片
$.get("../data/details/comlist.json",function(data){
if(parseInt($.cookie("comid"))%2==0){
var cl=data.comList2
}else{
var cl=data.comList1
}
$("#listName").html(cl.comname)
$("#bx-tit>P").html(cl.comname)
$(".pval-ms").find("em").html(cl.price)
$(".pval-ms").find("del").html(cl.yPrice)
$(".data-attr2").eq(0).find("span").html(cl.color)
$(".data-attr2").eq(1).find("span").html(cl.size)
for(var i=1;i<=5;i++){
$(".jqzoomDiv>ul").find("li").eq(i-1).find("img").attr("src",cl.imgSrc+"detalis-zoomdiv"+i+".JPG")
}
for(var i=1;i<=4;i++){
$(".right-bom-dtu p").append("<img src='"+cl.imgSrc+"right-bom"+i+".jpg'/>")
}
//放大镜
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong1.JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big1.JPG")
$(".jqzoomDiv ul li").each(function(){
$(this).mouseover(function(){
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong"+($(this).index()+1)+".JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big"+($(this).index()+1)+".JPG")
})
})
$(".bx-data-lt-2").mouseover(function(){
$(".zoomShow-pud").css({"display":"block"})
$(".bigShow").css({"display":"block"})
})
$(".bx-data-lt-2").mouseout(function(){
$(".zoomShow-pud").css({"display":"none"})
$(".bigShow").css({"display":"none"})
})
$(".zoomShow").mousemove(function(e){
// var iLeft=e.pageX-$(window).scrollLeft()-30-$(".bx-data-lt-1").offset().left-($(".zoomShow-pud").width()/2)
// var iTop=e.clientY-$(window).scrollTop()+$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iLeft=e.clientX-$(window).scrollLeft()-$(".bx-data-lt-2").offset().left-($(".zoomShow-pud").width()/2)
var iTop=e.clientY+$(window).scrollTop()-$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iWidht=$(".bx-data-lt-2").width()-$(".zoomShow-pud").width()
var iHeight=$(".bx-data-lt-2").height()-$(".zoomShow-pud").height()
if(iLeft<0) {
iLeft = 0;
} else if(iLeft>iWidht) {
iLeft = iWidht;
}
if(iTop<0) {
iTop = 0;
} else if(iTop>iHeight) {
iTop = iHeight;
}
$(".zoomShow-pud").css({"left":iLeft,"top":iTop})
$(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
//不想写了就用死值了
//console.log(iTop)
})
//点击按钮事件
downBtn(cl)
})
})
function downBtn(cl){
//从cookie中 取出数据的正确方式
//console.log(JSON.parse($.cookie("carList")).ID1.iNum)
go | .id
$(".addCart").css("cursor","pointer")
var num1=0;num2=0;
if($.cookie("carList1")){
num1=parseInt(JSON.parse($.cookie("carList1")).iNum)
}
if($.cookie("carList2")){
num2=parseInt(JSON.parse($.cookie("carList2")).iNum)
}
//判断是否存在cookie
// if($.cookie("carList")){
// for(var m in JSON.parse($.cookie("carList"))){
// num+=parseInt(JSON.parse($.cookie("carList"))[m].iNum)
// //这里有bug每次累加是2个加起来的和
// }
// }else{
// num=0
// }
var com1= $.cookie("carList")?JSON.parse($.cookie("carList")):{}
com1={
userName:cl.comname,
imgSrc:cl.imgSrc+"detalis-zoomdiv-zhong1.JPG",
id:cl.id,
color:cl.color,
size:cl.size,
price:cl.price,
//iNum:num
}
// var value=0
// if($.cookie("carList")){
// for(var i in JSON.parse($.cookie("carList"))){
// value +=parseInt(JSON.parse($.cookie("carList"))[i].iNum)
// }
// //value=parseInt(JSON.parse($.cookie("carList")).ID1.iNum)+parseInt(JSON.parse($.cookie("carList")).ID2.iNum)
// }
$(".addCart").mousedown(function(){
//console.log($("#buyNum").val())
if(parseInt($("#buyNum").val())){
//判断添加数量的值是否为0
if(cl.id=="1"){
num1+=parseInt($("#buyNum").val())
com1.iNum=num1
}else{
num2+=parseInt($("#buyNum").val())
com1.iNum=num2
}
//计算购物车处的值
$.cookie("carList"+cl.id,JSON.stringify(com1),{expires:7,path:"/"})
//console.log($.cookie("carList1"))
//console.log($.cookie("carList2"))
var value=0
value=num1+num2
// if($.cookie("carList")){
// value+=parseInt($("#buyNum").val())
// }else{
// value=0
// }
$("#carNum").html("("+value+")")
}
})
}
//上方导航栏nav中购物车应显示的值
//活动倒计时
$(function(){
var timer=$(".time-end span")
var time=24*3600*1000 //活动倒计时
setInterval(function(){
time-=1000
var hour=parseInt(time/3600000)%24
var min=parseInt(time/3600000*60)%60
var sec=parseInt(time/3600000*24*60)%60
timer.find("em").eq(0).html(hour)
timer.find("em").eq(1).html(min)
timer.find("em").eq(2).html(sec)
// console.log(time)
},500)
})
//添加商品按钮
$(function(){
var oAdd=$(".buyAdd")
var oRed=$(".buyReduce")
var oValue=$("#buyNum")
oAdd.css("cursor","pointer")
oRed.css("cursor","pointer")
var val=oValue.val()
oAdd.mousedown(function(){
val++
oValue.val(val)
$(this).css("color","red")
})
oAdd.mouseup(function(){
$(this).css("color","#000000")
})
oRed.mousedown(function(){
val--
if(val<1){
val=1
}
oValue.val(val)
$(this).css("color","red")
})
oRed.mouseup(function(){
$(this).css("color","#000000")
})
oValue.blur(function(){
var rex=/\d/gi
//console.log(rex.test(oValue.val()))
//console.log(oValue.val())
if(!rex.test(oValue.val())){
oValue.val(val)
}
})
})
//点击加入购物车事件
$(function(){
// $(".addCart").css("cursor","pointer")
// console.log($.cookie("carList"))
// var num
// if($.cookie("carList")){
// num=JSON.parse($.cookie("carList")).num
// }else{
// num=0
// }
// $(".addCart").mousedown(function(){
//
// //console.log($("#buyNum").val())
// if(parseInt($("#buyNum").val())){
// //判断添加数量的值是否为0
// var com1={}
// com1.userName="BURBERRY 巴宝莉 新品女款单肩包4014739 2160T"
// com1.imgSrc="../img/detalis-zoomdiv-zhong1.JPG"
// com1.id=1
// com1.color="卡其"
// com1.size="//"
// num+=parseInt($("#buyNum").val())
//// if(com1){
//// num+=parseInt($("#buyNum").val())
//// }else{
//// num=parseInt($("#buyNum").val())
//// }
// com1.num=num
// $.cookie("carList",JSON.stringify(com1),{expires:7,path:"/"})
// }
// //上方导航栏nav中购物车应显示的值
// var value=0
// if($.cookie("carList")){
// value=parseInt(JSON.parse($.cookie("carList")).num)
// }else{
// value=0
// }
// $("#carNum").html("("+value+")")
// })
})
$(function(){
var oInput=$("#key")
oInput.focus(function(){
$("#tipUl").css({"display":"block"})
})
oInput.blur(function(){
$("#tipUl").css({"display":"none"})
})
oInput.on("keydown",function() {
$.ajax({
url: "https://sp0.baidu.com/5a1Fazu8AA54nxGko9WTAnF6hhy/su?wd="+oInput.val()+"&json=1&p=3&t",
dataType: "jsonp",
jsonp: "cb",
success:function(data){
var lists = data.g;
var oUl = $("#tipUl");
oUl.html("");
for(var i in lists) {
var oLi = $("<li></li>");
oLi.html(lists[i].q);
oUl.append(oLi);
}
$("#tipUl li").hover(function(){
$(this).css({color:"red",background:"#999"})
},function(){
$(this).css({color:"#000",background:"#fff"})
})
var height=$("#tipUl").height()
$("#tipUl").css({"bottom":-$("#tipUl").height()-3})
}
})
}
)
$("#tipUl").find("li").each(function(){
$(this).on("click",function(){
oInput.val($(this).text())
})
})
})
//添加热门商品的代码
$(function(){
$.get("../data/list/hot.json",function(data){
var re=data
// console.log(re[i])
for(var i in re){
// console.log(re[i])
$(".s-Hotv3-rt>ul").append(
"<li>"+
"<div class='hotv3-rtlia'>"+
"<a href='details.html'>"+
"<img src="+re[i].imgSrc+" />"+
"</a>"+
"</div>"+
"<div class='hotv3-rtlib'>"+
"<div class='hotv3-rtlib-tit'><a href='details.html' target='_blank'>"+re[i].userName+"</a></div>"+
"<div class='hotv3-rtlib-val'>"+re[i].price+"</div>"+
"<div class='hotv3-rtlib-btn'>"+
"<a href='details.html' target='_blank'>"+
"立即抢购"+
"</a>"+
"</div>"+
"</div>"+
"</li>"
)
}
})
})
| odid=cl | identifier_name |
details.js | $(function(){
$(".header").load("../html/common/header.html")
$(".footer").load("../html/common/footer02.html")
$(".souS").load("../html/common/shousuolan.html")
$(".nav").load("../html/common/nav2016.html")
})
$(function(e){
// $(".jqzoomDiv ul li").each(function(){
// $(this).mouseover(function(){
// $(".zoomShow").find("img").removeAttr("src").attr("src","../img/details/detalis-zoomdiv-zhong"+($(this).index()+1)+".JPG")
// $(".bigShow").find("img").removeAttr("src").attr("src","../img/details/detalis-zoomdiv-big"+($(this).index()+1)+".JPG")
// })
// })
// $(".bx-data-lt-2").mouseover(function(){
// $(".zoomShow-pud").css({"display":"block"})
// $(".bigShow").css({"display":"block"})
// })
// $(".bx-data-lt-2").mouseout(function(){
// $(".zoomShow-pud").css({"display":"none"})
// $(".bigShow").css({"display":"none"})
// })
// $(".zoomShow").mousemove(function(e){
//// var iLeft=e.pageX-$(window).scrollLeft()-30-$(".bx-data-lt-1").offset().left-($(".zoomShow-pud").width()/2)
//// var iTop=e.clientY-$(window).scrollTop()+$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
// var iLeft=e.clientX-$(window).scrollLeft()-$(".bx-data-lt-2").offset().left-($(".zoomShow-pud").width()/2)
// var iTop=e.clientY+$(window).scrollTop()-$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
// var iWidht=$(".bx-data-lt-2").width()-$(".zoomShow-pud").width()
// var iHeight=$(".bx-data-lt-2").height()-$(".zoomShow-pud").height()
// if(iLeft<0) {
// iLeft = 0;
// } else if(iLeft>iWidht) {
// iLeft = iWidht;
// }
//
// if(iTop<0) {
// iTop = 0;
// } else if(iTop>iHeight) {
// iTop = iHeight;
// }
// $(".zoomShow-pud").css({"left":iLeft,"top":iTop})
// $(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
// //不想写了就用死值了
// console.log(iTop)
// })
})
$(function(){
//json加载图片与页面
var goodid
$.get("../data/details/imgs.json",function(data){
var con=data
for(var i in con){
$(".bx-bo-left-con").append('<ul><li><a href="details.html"><img src='+con[i].src+'/></a></li><li class="left-conli-txt"><a href="##" >'+con[i].userName+'</a></li><li class="left-conli-val">'+con[i].price+'</li></ul>')
}
})
// 热门商品组
// for(var i=1;i<=4;i++){
// $(".right-bom-dtu p").append("<img src='../img/details/right-bom"+i+".jpg'/>")
// }
//下方商品详情图片
$.get("../data/details/comlist.json",function(data){
if(parseInt($.cookie("comid"))%2==0){
var cl=data.comList2
}else{
var cl=data.comList1
}
$("#listName").html(cl.comname)
$("#bx-tit>P").html(cl.comname)
$(".pval-ms").find("em").html(cl.price)
$(".pval-ms").find("del").html(cl.yPrice)
$(".data-attr2").eq(0).find("span").html(cl.color)
$(".data-attr2").eq(1).find("span").html(cl.size)
for(var i=1;i<=5;i++){
$(".jqzoomDiv>ul").find("li").eq(i-1).find("img").attr("src",cl.imgSrc+"detalis-zoomdiv"+i+".JPG")
}
for(var i=1;i<=4;i++){
$(".right-bom-dtu p").append("<img src='"+cl.imgSrc+"right-bom"+i+".jpg'/>")
}
//放大镜
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong1.JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big1.JPG")
$(".jqzoomDiv ul li").each(function(){
$(this).mouseover(function(){
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong"+($(this).index()+1)+".JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big"+($(this).index()+1)+".JPG")
})
})
$(".bx-data-lt-2").mouseover(function(){
$(".zoomShow-pud").css({"display":"block"})
$(".bigShow").css({"display":"block"})
})
$(".bx-data-lt-2").mouseout(function(){
$(".zoomShow-pud").css({"display":"none"})
$(".bigShow").css({"display":"none"})
})
$(".zoomShow").mousemove(function(e){
// var iLeft=e.pageX-$(window).scrollLeft()-30-$(".bx-data-lt-1").offset().left-($(".zoomShow-pud").width()/2)
// var iTop=e.clientY-$(window).scrollTop()+$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iLeft=e.clientX-$(window).scrollLeft()-$(".bx-data-lt-2").offset().left-($(".zoomShow-pud").width()/2)
var iTop=e.clientY+$(window).scrollTop()-$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iWidht=$(".bx-data-lt-2").width()-$(".zoomShow-pud").width()
var iHeight=$(".bx-data-lt-2").height()-$(".zoomShow-pud").height()
if(iLeft<0) {
iLeft = 0;
} else if(iLeft>iWidht) {
iLeft = iWidht;
}
if(iTop<0) {
iTop = 0;
} else if(iTop>iHeight) {
iTop = iHeight;
}
$(".zoomShow-pud").css({"left":iLeft,"top":iTop})
$(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
//不想写了就用死值了
//console.log(iTop)
})
//点击按钮事件
downBtn(cl)
})
})
function downBtn(cl){
//从cookie中 取出数据的正确方式
//console.log(JSON.parse($.cookie("carList")).ID1.iNum)
goodid=cl.id | num1=parseInt(JSON.parse($.cookie("carList1")).iNum)
}
if($.cookie("carList2")){
num2=parseInt(JSON.parse($.cookie("carList2")).iNum)
}
//判断是否存在cookie
// if($.cookie("carList")){
// for(var m in JSON.parse($.cookie("carList"))){
// num+=parseInt(JSON.parse($.cookie("carList"))[m].iNum)
// //这里有bug每次累加是2个加起来的和
// }
// }else{
// num=0
// }
var com1= $.cookie("carList")?JSON.parse($.cookie("carList")):{}
com1={
userName:cl.comname,
imgSrc:cl.imgSrc+"detalis-zoomdiv-zhong1.JPG",
id:cl.id,
color:cl.color,
size:cl.size,
price:cl.price,
//iNum:num
}
// var value=0
// if($.cookie("carList")){
// for(var i in JSON.parse($.cookie("carList"))){
// value +=parseInt(JSON.parse($.cookie("carList"))[i].iNum)
// }
// //value=parseInt(JSON.parse($.cookie("carList")).ID1.iNum)+parseInt(JSON.parse($.cookie("carList")).ID2.iNum)
// }
$(".addCart").mousedown(function(){
//console.log($("#buyNum").val())
if(parseInt($("#buyNum").val())){
//判断添加数量的值是否为0
if(cl.id=="1"){
num1+=parseInt($("#buyNum").val())
com1.iNum=num1
}else{
num2+=parseInt($("#buyNum").val())
com1.iNum=num2
}
//计算购物车处的值
$.cookie("carList"+cl.id,JSON.stringify(com1),{expires:7,path:"/"})
//console.log($.cookie("carList1"))
//console.log($.cookie("carList2"))
var value=0
value=num1+num2
// if($.cookie("carList")){
// value+=parseInt($("#buyNum").val())
// }else{
// value=0
// }
$("#carNum").html("("+value+")")
}
})
}
//上方导航栏nav中购物车应显示的值
//活动倒计时
$(function(){
var timer=$(".time-end span")
var time=24*3600*1000 //活动倒计时
setInterval(function(){
time-=1000
var hour=parseInt(time/3600000)%24
var min=parseInt(time/3600000*60)%60
var sec=parseInt(time/3600000*24*60)%60
timer.find("em").eq(0).html(hour)
timer.find("em").eq(1).html(min)
timer.find("em").eq(2).html(sec)
// console.log(time)
},500)
})
//添加商品按钮
$(function(){
var oAdd=$(".buyAdd")
var oRed=$(".buyReduce")
var oValue=$("#buyNum")
oAdd.css("cursor","pointer")
oRed.css("cursor","pointer")
var val=oValue.val()
oAdd.mousedown(function(){
val++
oValue.val(val)
$(this).css("color","red")
})
oAdd.mouseup(function(){
$(this).css("color","#000000")
})
oRed.mousedown(function(){
val--
if(val<1){
val=1
}
oValue.val(val)
$(this).css("color","red")
})
oRed.mouseup(function(){
$(this).css("color","#000000")
})
oValue.blur(function(){
var rex=/\d/gi
//console.log(rex.test(oValue.val()))
//console.log(oValue.val())
if(!rex.test(oValue.val())){
oValue.val(val)
}
})
})
//点击加入购物车事件
$(function(){
// $(".addCart").css("cursor","pointer")
// console.log($.cookie("carList"))
// var num
// if($.cookie("carList")){
// num=JSON.parse($.cookie("carList")).num
// }else{
// num=0
// }
// $(".addCart").mousedown(function(){
//
// //console.log($("#buyNum").val())
// if(parseInt($("#buyNum").val())){
// //判断添加数量的值是否为0
// var com1={}
// com1.userName="BURBERRY 巴宝莉 新品女款单肩包4014739 2160T"
// com1.imgSrc="../img/detalis-zoomdiv-zhong1.JPG"
// com1.id=1
// com1.color="卡其"
// com1.size="//"
// num+=parseInt($("#buyNum").val())
//// if(com1){
//// num+=parseInt($("#buyNum").val())
//// }else{
//// num=parseInt($("#buyNum").val())
//// }
// com1.num=num
// $.cookie("carList",JSON.stringify(com1),{expires:7,path:"/"})
// }
// //上方导航栏nav中购物车应显示的值
// var value=0
// if($.cookie("carList")){
// value=parseInt(JSON.parse($.cookie("carList")).num)
// }else{
// value=0
// }
// $("#carNum").html("("+value+")")
// })
})
$(function(){
var oInput=$("#key")
oInput.focus(function(){
$("#tipUl").css({"display":"block"})
})
oInput.blur(function(){
$("#tipUl").css({"display":"none"})
})
oInput.on("keydown",function() {
$.ajax({
url: "https://sp0.baidu.com/5a1Fazu8AA54nxGko9WTAnF6hhy/su?wd="+oInput.val()+"&json=1&p=3&t",
dataType: "jsonp",
jsonp: "cb",
success:function(data){
var lists = data.g;
var oUl = $("#tipUl");
oUl.html("");
for(var i in lists) {
var oLi = $("<li></li>");
oLi.html(lists[i].q);
oUl.append(oLi);
}
$("#tipUl li").hover(function(){
$(this).css({color:"red",background:"#999"})
},function(){
$(this).css({color:"#000",background:"#fff"})
})
var height=$("#tipUl").height()
$("#tipUl").css({"bottom":-$("#tipUl").height()-3})
}
})
}
)
$("#tipUl").find("li").each(function(){
$(this).on("click",function(){
oInput.val($(this).text())
})
})
})
//添加热门商品的代码
$(function(){
$.get("../data/list/hot.json",function(data){
var re=data
// console.log(re[i])
for(var i in re){
// console.log(re[i])
$(".s-Hotv3-rt>ul").append(
"<li>"+
"<div class='hotv3-rtlia'>"+
"<a href='details.html'>"+
"<img src="+re[i].imgSrc+" />"+
"</a>"+
"</div>"+
"<div class='hotv3-rtlib'>"+
"<div class='hotv3-rtlib-tit'><a href='details.html' target='_blank'>"+re[i].userName+"</a></div>"+
"<div class='hotv3-rtlib-val'>"+re[i].price+"</div>"+
"<div class='hotv3-rtlib-btn'>"+
"<a href='details.html' target='_blank'>"+
"立即抢购"+
"</a>"+
"</div>"+
"</div>"+
"</li>"
)
}
})
}) | $(".addCart").css("cursor","pointer")
var num1=0;num2=0;
if($.cookie("carList1")){ | random_line_split |
reduc_spec.py | import in_out
import numpy as np
from copy import deepcopy as dc
from IPython.core.debugger import Tracer; debug_here=Tracer()
from matplotlib.pyplot import *
# Useful trig functions
def asind(x):
return np.arcsin(x)*180/np.pi
def sind(x):
return np.sin(x*np.pi/180)
def cosd(x):
return np.cos(x*np.pi/180)
def atand2(x,y):
return np.arctan2(x,y)*180/np.pi
#planck fxn lives here now
def planck(f, T):
h = 6.62606957e-34
c = 2.99792458e8
k = 1.3806488e-23
x = 8 * h * np.pi / c**3
y = f**3
ex = np.exp(h * f / (k * T)) - 1
return x * y / ex
def arr(x):
if type(x) is int:
x=np.array([x])
return x
def azel2radec(az,el,mjd,lat=47.8781,lon=-87.6298):
"""Convert az/del to ra/dec, Chicago lat/lon by default. Input/output in
degrees."""
T_UT1 = (mjd-51544.5)/36525;
ThetaGMST = 67310.54841 + (876600*3600 + 8640184.812866)*T_UT1 + \
.093104*(T_UT1**2) - (6.2e-6)*(T_UT1**3)
ThetaGMST = np.mod((np.mod(ThetaGMST,86400*(ThetaGMST/np.abs(ThetaGMST)))/240),360)
ThetaLST = ThetaGMST + lon
DEC = asind(sind(el)*sind(lat)+cosd(el)*cosd(lat)*cosd(az))
LHA = atand2(-sind(az)*cosd(el)/cosd(DEC),
(sind(el)-sind(DEC)*sind(lat))/(cosd(DEC)*cosd(lat)))*(180/np.pi);
RA = np.mod(ThetaLST-LHA,360);
return RA,DEC
class data:
def __init__(self, *args, **kwargs):
'''
Read in data, t0 and t1 as tuples, e.g. (2016,5,3,0,0,0)
'''
if all([((type(arg) is tuple) or (arg is None)) for arg in args]):
ts, tf = args[0], args[1]
d=in_out.read_time_range(dt_0=ts,dt_f=tf, ext=kwargs.get('ext'))
elif any(('.h5' in arg for arg in args)):
d=in_out.read_to_arrays([arg for arg in args if '.h5' in arg])
else:
raise ValueError
# Modified Julian date
self.mjd=d['mjd'][:,0]
# Hours since beginning of read
self.t=(self.mjd-self.mjd[0])*24;
# Sample rate in MHz
if np.unique(d['samp_rate_mhz']).size > 1:
raise NameError('Sample rate changed during accumulation')
else:
self.samp_rate = d['samp_rate_mhz'][0]
# Accumulation length in s
if np.unique(d['acc_len_secs']).size > 1:
raise NameError('Accumulation length changed during accumulation')
else:
self.acc_len = d['acc_len_secs'][0]
# spectrum data
self.spec=d['spec'].astype(float)
# mask data
m=self.getmask()
self.applymask(m)
# Frequency axis
self.f = np.linspace(0,self.samp_rate/2,self.spec.shape[1])
# Add LO frequency
if d.has_key('LO_freq'):
self.lo=d['LO_freq']
else:
self.lo=9500
# Zenith angle in degrees.
self.za=d['angle_degs']-d['zenith_degs']
self.za=self.za[:,0]
# Airmass
self.am=self.za2am(self.za)
# az/el -> RA/Dec
az=120.0 # Hard coded to SE for now
self.ra,self.dec=azel2radec(az,90-self.za,self.mjd)
# Get stepping/cal indices
self._getscanind()
# Useful information
self.nf = self.f.size
##################
# Do the reduction
##################
#zarange=[20,50]
#self.reduc(zarange)
def splitbylo(self,lo):
"""Split out structure into a single LO, lo in GHz"""
ind=np.where(self.lo==lo)[0]
self.splitbyind(ind)
def splitbyscans(self,scans):
"""Split out structure, keeping scans in scans"""
ind=np.where(np.array([val in scans for dum,val in enumerate(self.scan)]))[0]
self.splitbyind(ind)
def splitbyind(self,ind):
"""Split out by time index"""
fields=['mjd','dec','ra','lo','scan','t','spec','za','am']
for k,val in enumerate(fields):
x=getattr(self,val)
setattr(self,val,x[ind])
self._getscanind()
def za2am(self,x):
"""Zenith angle in degrees to airmass"""
return 1/cosd(x)
def reduc(self,zarange=[20,50]):
"""Main reduction script. Elrange is two element list or tuple over
which to perform airmass regression (inclusive)"""
# First, take out a secular gain drift for each constant elevation
# stare. Fit P(t) to each channel in a contiguous elevation stare,
# normalize fit to mean=1, and normalize each chan to this.
#deg=10
#self.removedrift(deg)
# Convert P-> T RJ
#self.P2T()
# Now fit a line to P(am) in each scan and store the results.
self.fitam(zarange)
def | (self):
"""Scale by P->TRJ factor"""
# Convert to RJ temperature
#fac=planck.I2Ta(self.f*1e6,1).value
fac = planck(self.f*1e6, 1)
fac=fac/fac[0]
self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))
def _getscanind(self):
"""Identify start/stop indices of cal and scanning"""
zamin = self.za.min()
first = np.where(self.za==zamin)[0]
self.scan = np.zeros(self.spec.shape[0])
if zamin < 0:
cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]
ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1
ce = ss
se = np.roll((cs - 1) % self.za.size, -1) + 1
for k, val in enumerate(cs):
self.scan[val:se[k] + 1] = k
else:
moves = np.diff(self.za)
max_ind = np.where(moves==moves.max())[0]
turnover = self.za.size
diffs = np.diff(max_ind)
if np.unique(diffs).size > 1:
raise ValueError, 'Can\'t deal with non-uniform cal data yet.'
if max_ind.size > 1:
turnover = diffs[0]
cs = ce = np.array([])
ss = np.arange(self.za.size)[::turnover]
se = np.roll((ss - 1) % self.za.size, -1)
for k, val in enumerate(ss):
self.scan[val:se[k] + 1] = k
self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}
self.nscan = np.unique(self.scan).size
def getind(self,start,end,blk):
"""Return indices corresponding to start and end indices, strings as
defined in self.ind"""
if blk is None:
# Return all blocks
blk = np.arange(self.ind[start].size)
ind=np.array([])
for k,val in enumerate(blk):
ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))
return ind.astype(int)
def getscanind(self,blk=None,zarange=[0,90]):
"""Return indices of periods of stepping. Scanblock goes from 0 to
Nscans-1, and will return the indices of the scan blocks requested.
Default is to return all scan blocks and all zenith angles."""
ind=self.getind('ss','se',arr(blk))
ind=ind[(self.za[ind]>=zarange[0]) & (self.za[ind]<=zarange[1])]
return ind
def getcalind(self,blk=None):
"""Return indices of periods of calibrator staring. If blk is defined,
return all indices of cal stares, including leading and trailing for
each block."""
if blk!=None:
blk=arr(blk)
cblk=np.array([]).astype(int) # Initialize cal stare indices
cs=self.ind['cs']
ce=self.ind['ce']
for k,val in enumerate(blk):
ss=self.ind['ss'][val] # Scan start
se=self.ind['se'][val] # Scan stop
# Find leading cal stare
ind=np.where(ce<=ss)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[-1])
# Find trailing cal stare
ind=np.where(cs>=se)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[0])
else:
cblk=None
return np.unique(self.getind('cs','ce',blk=cblk))
def calccalmean(self,blk):
"""Calculate mean of lead/trail cal stare for each scan block"""
calind=self.getcalind(blk)
x=self.spec[calind,:]
return np.nanmean(x,axis=0)
def getmask(self):
""" Get NtxNf mask"""
mask=np.ones(self.spec.shape)
# Right now just make out DC and clock freq
mask[:,1024]=0;
mask[:,0]=0;
return mask
def applymask(self,mask):
"""Set spec values to 0 where mask is zero"""
self.spec[mask==0]=np.nan
def removedrift(self,deg=10):
"""Fit and remove a polynomial from P(t) for each frequency channel for
a block of contiguous, constant elevation stares"""
# First remove a secular zeropoint drift over the entire scanset. Fit just the
# scans but remove from cal stares as well.
x=self.t
scanind=self.getscanind()
for k in range(self.nf):
y=self.spec[:,k]
if not np.any(np.isnan(y)):
p=np.polyfit(x[scanind],y[scanind],deg=deg)
# Don't remove mean
p[-1]=0
self.spec[:,k]=self.spec[:,k]-np.poly1d(p)(x)
return
for k in range(self.nscan):
# Each scan
ind=self.getscanind(k)
for j,val in enumerate(np.unique(self.za[ind])):
# Find contiguous blocks
doind=ind[np.where(self.za[ind]==val)]
dx=doind-np.roll(doind,1)
startind=np.where(dx!=1)[0]
endind=np.append(startind[1:],dx.size)
for l,val in enumerate(startind):
# For each contiguous block
s=doind[startind[l]]
e=doind[endind[l]-1]+1
x=self.t[s:e]
x=x-x.mean()
y=self.spec[s:e,:]
for m in range(self.nf):
# For each channel
yy=y[:,m];
if not np.any(np.isnan(yy)):
p=np.polyfit(x,yy,deg=deg); # Fit p3
z=np.poly1d(p)
yy=yy-z(x)
self.spec[s:e,m]=yy # replace data
def fitam(self,zarange=[20,50]):
"""Fit a line to P(am)"""
# Loop over scan blocks and fit each
m=np.zeros([self.nscan,self.nf]) # slope of P(agm) fit
b=np.zeros([self.nscan,self.nf]) # intercept of P(am) fit
q=np.zeros([self.nscan,self.nf]) # quadratic term
g=np.zeros([self.nscan,self.nf]) # gain
c=np.zeros([self.nscan,self.nf]) # mean of cal stare
Trx=np.zeros([self.nscan,self.nf]) # noise temperature
Th=290 # hot load
Tz=2 # Atmospheric zenith temp (not including CMB)
Tiso=2.7 # T above atmosphere
for k in range(self.nscan):
# Pull out scanning data for this block
ind=self.getscanind(k)
za=self.za[ind]
am=self.am[ind]
s=self.spec[ind,:]
# Find where elevation is in range
fitind=np.where((za>=zarange[0]) & (za<=zarange[1]))[0]
x=am[fitind]
y=s[fitind,:]
# Fit P(am) for each frequency
#for j in range(self.nf):
# yy=y[:,j]
# if not np.any(np.isnan(yy)):
#p=np.polyfit(x,yy,deg=1);
#m[k,j]=p[0]
#b[k,j]=p[1]
# Try to get gain
# Mean of lead/trail cal stare
c[k,:] = self.calccalmean(k)
cold=y.mean(0)
Tc = am.mean()*Tz + Tiso
for j in range(self.nf):
p=np.polyfit([Tc,Th],[cold[j],c[k,j]],deg=1)
g[k,j]=dc(p[0])
b[k,j]=dc(p[1])
# Noise temperature
Trx0=np.linspace(0,500,1000);
rhs = (Trx0+Th)/(Trx0+Tc)
for j in range(self.nf):
Ph=c[k,j] # hot load
Pc=b[k,j] # cold load
lhs = Ph/Pc
Trx[k,j]=np.interp(0,lhs-rhs,Trx0)
#g[k,:]=(c[k,:]-b[k,:])/(Th-Tc)
self.c = c
self.m = m
self.b = b
self.g = g
self.Trx=Trx
def expandcal(self):
"""Return an index array to expand an nscan x nf array of cal data to an array
of size self.spec (nt x nf).
ex. ind=expandcal(); ratio=self.scan/self.c[ind]
"""
ind=np.zeros(self.spec.shape[0]).astype(int)
for k in range(self.nscan):
ind[self.getscanind(k)]=k
ind[self.getcalind(k)]=k
return ind
def svd(self):
"""SVD filter"""
self.fspec=np.zeros(self.spec.shape)
self.u=[]
self.s=[]
self.v=[]
for k in range(self.nscan):
sind=self.getscanind(k)
x=self.spec[sind]
x[np.isnan(x)]=0
u,s,v=np.linalg.svd(x,full_matrices=True)
ss=dc(s)
ss[0]=0
SS=np.zeros(x.shape)
sz=x.shape[0]
SS[:sz,:sz]=np.diag(ss);
z=np.dot(u,np.dot(SS,v))
#for k in range(sz):
# y=z[k];ind=np.arange(500,1000);p=np.polyfit(self.f[ind],y[ind],deg=3);
# z[k]=y-np.poly1d(p)(self.f);
self.fspec[sind]=z
self.u.append(u)
self.s.append(s)
self.v.append(v)
return u,s,v,self.fspec
def atmgaincal(self,flim=[10000,10500]):
"""Gain cal on atmosphere"""
sind=self.getscanind()
ind=self.expandcal()
amm=np.tile(self.am,(self.nf,1)).T
x=(self.spec-self.b[ind])/self.g[ind];x=x[sind]
x=x/amm[sind]
y=x[ np.where((self.za[sind]>=25) & (self.za[sind]<=35))[0] ]
#for k in range(y.shape[0]):
# y[k]=y[k]-np.nanmedian(y[k])
f=self.f+9500
find=np.where((f>=flim[0]) & (f<=flim[1]))[0]
for k in range(y.shape[0]):
x=self.f[find];
yy=y[k,find];
p=np.polyfit(x,yy,deg=1);
y[k]=y[k]-np.poly1d(p)(self.f)
return y
def wmean(self,y,flim=[10000,10500]):
f=self.f+9500
find=np.where((f>=flim[0]) & (f<=flim[1]))[0]
w=np.tile(1/np.nanstd(y[:,find],axis=1)**2,(2048,1)).T;
ym=np.sum(y*w,axis=0)/np.sum(w,axis=0)
return ym
| P2T | identifier_name |
reduc_spec.py | import in_out
import numpy as np
from copy import deepcopy as dc
from IPython.core.debugger import Tracer; debug_here=Tracer()
from matplotlib.pyplot import *
# Useful trig functions
def asind(x):
return np.arcsin(x)*180/np.pi
def sind(x):
return np.sin(x*np.pi/180)
def cosd(x):
return np.cos(x*np.pi/180)
def atand2(x,y):
return np.arctan2(x,y)*180/np.pi
#planck fxn lives here now
def planck(f, T):
h = 6.62606957e-34
c = 2.99792458e8
k = 1.3806488e-23
x = 8 * h * np.pi / c**3
y = f**3
ex = np.exp(h * f / (k * T)) - 1
return x * y / ex
def arr(x):
if type(x) is int:
x=np.array([x])
return x
def azel2radec(az,el,mjd,lat=47.8781,lon=-87.6298):
"""Convert az/del to ra/dec, Chicago lat/lon by default. Input/output in
degrees."""
T_UT1 = (mjd-51544.5)/36525;
ThetaGMST = 67310.54841 + (876600*3600 + 8640184.812866)*T_UT1 + \
.093104*(T_UT1**2) - (6.2e-6)*(T_UT1**3)
ThetaGMST = np.mod((np.mod(ThetaGMST,86400*(ThetaGMST/np.abs(ThetaGMST)))/240),360)
ThetaLST = ThetaGMST + lon
DEC = asind(sind(el)*sind(lat)+cosd(el)*cosd(lat)*cosd(az))
LHA = atand2(-sind(az)*cosd(el)/cosd(DEC),
(sind(el)-sind(DEC)*sind(lat))/(cosd(DEC)*cosd(lat)))*(180/np.pi);
RA = np.mod(ThetaLST-LHA,360);
return RA,DEC
class data:
def __init__(self, *args, **kwargs):
'''
Read in data, t0 and t1 as tuples, e.g. (2016,5,3,0,0,0)
'''
if all([((type(arg) is tuple) or (arg is None)) for arg in args]):
ts, tf = args[0], args[1]
d=in_out.read_time_range(dt_0=ts,dt_f=tf, ext=kwargs.get('ext'))
elif any(('.h5' in arg for arg in args)):
d=in_out.read_to_arrays([arg for arg in args if '.h5' in arg])
else:
raise ValueError
# Modified Julian date
self.mjd=d['mjd'][:,0]
# Hours since beginning of read
self.t=(self.mjd-self.mjd[0])*24;
# Sample rate in MHz
if np.unique(d['samp_rate_mhz']).size > 1:
raise NameError('Sample rate changed during accumulation')
else:
self.samp_rate = d['samp_rate_mhz'][0]
# Accumulation length in s
if np.unique(d['acc_len_secs']).size > 1:
raise NameError('Accumulation length changed during accumulation')
else:
self.acc_len = d['acc_len_secs'][0]
# spectrum data
self.spec=d['spec'].astype(float)
# mask data
m=self.getmask()
self.applymask(m)
# Frequency axis
self.f = np.linspace(0,self.samp_rate/2,self.spec.shape[1])
# Add LO frequency
if d.has_key('LO_freq'):
self.lo=d['LO_freq']
else:
self.lo=9500
# Zenith angle in degrees.
self.za=d['angle_degs']-d['zenith_degs']
self.za=self.za[:,0]
# Airmass
self.am=self.za2am(self.za)
# az/el -> RA/Dec
az=120.0 # Hard coded to SE for now
self.ra,self.dec=azel2radec(az,90-self.za,self.mjd)
# Get stepping/cal indices
self._getscanind()
# Useful information
self.nf = self.f.size
##################
# Do the reduction
##################
#zarange=[20,50]
#self.reduc(zarange)
def splitbylo(self,lo):
"""Split out structure into a single LO, lo in GHz"""
ind=np.where(self.lo==lo)[0]
self.splitbyind(ind)
def splitbyscans(self,scans):
"""Split out structure, keeping scans in scans"""
ind=np.where(np.array([val in scans for dum,val in enumerate(self.scan)]))[0]
self.splitbyind(ind)
def splitbyind(self,ind):
"""Split out by time index"""
fields=['mjd','dec','ra','lo','scan','t','spec','za','am']
for k,val in enumerate(fields):
x=getattr(self,val)
setattr(self,val,x[ind])
self._getscanind()
def za2am(self,x):
"""Zenith angle in degrees to airmass"""
return 1/cosd(x)
def reduc(self,zarange=[20,50]):
"""Main reduction script. Elrange is two element list or tuple over
which to perform airmass regression (inclusive)"""
# First, take out a secular gain drift for each constant elevation
# stare. Fit P(t) to each channel in a contiguous elevation stare,
# normalize fit to mean=1, and normalize each chan to this.
#deg=10
#self.removedrift(deg)
# Convert P-> T RJ
#self.P2T()
# Now fit a line to P(am) in each scan and store the results.
self.fitam(zarange)
def P2T(self):
"""Scale by P->TRJ factor"""
# Convert to RJ temperature
#fac=planck.I2Ta(self.f*1e6,1).value
fac = planck(self.f*1e6, 1)
fac=fac/fac[0]
self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))
def _getscanind(self):
"""Identify start/stop indices of cal and scanning"""
zamin = self.za.min()
first = np.where(self.za==zamin)[0]
self.scan = np.zeros(self.spec.shape[0])
if zamin < 0:
cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]
ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1
ce = ss
se = np.roll((cs - 1) % self.za.size, -1) + 1
for k, val in enumerate(cs):
self.scan[val:se[k] + 1] = k
else:
moves = np.diff(self.za)
max_ind = np.where(moves==moves.max())[0]
turnover = self.za.size
diffs = np.diff(max_ind)
if np.unique(diffs).size > 1:
raise ValueError, 'Can\'t deal with non-uniform cal data yet.'
if max_ind.size > 1:
turnover = diffs[0]
cs = ce = np.array([])
ss = np.arange(self.za.size)[::turnover]
se = np.roll((ss - 1) % self.za.size, -1)
for k, val in enumerate(ss):
self.scan[val:se[k] + 1] = k
self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}
self.nscan = np.unique(self.scan).size
def getind(self,start,end,blk):
"""Return indices corresponding to start and end indices, strings as
defined in self.ind"""
if blk is None:
# Return all blocks
blk = np.arange(self.ind[start].size)
ind=np.array([])
for k,val in enumerate(blk):
ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))
return ind.astype(int)
def getscanind(self,blk=None,zarange=[0,90]):
"""Return indices of periods of stepping. Scanblock goes from 0 to
Nscans-1, and will return the indices of the scan blocks requested.
Default is to return all scan blocks and all zenith angles."""
ind=self.getind('ss','se',arr(blk))
ind=ind[(self.za[ind]>=zarange[0]) & (self.za[ind]<=zarange[1])]
return ind
def getcalind(self,blk=None):
"""Return indices of periods of calibrator staring. If blk is defined,
return all indices of cal stares, including leading and trailing for
each block."""
if blk!=None:
blk=arr(blk)
cblk=np.array([]).astype(int) # Initialize cal stare indices
cs=self.ind['cs']
ce=self.ind['ce']
for k,val in enumerate(blk):
ss=self.ind['ss'][val] # Scan start
se=self.ind['se'][val] # Scan stop
# Find leading cal stare | if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[-1])
# Find trailing cal stare
ind=np.where(cs>=se)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[0])
else:
cblk=None
return np.unique(self.getind('cs','ce',blk=cblk))
def calccalmean(self,blk):
"""Calculate mean of lead/trail cal stare for each scan block"""
calind=self.getcalind(blk)
x=self.spec[calind,:]
return np.nanmean(x,axis=0)
def getmask(self):
""" Get NtxNf mask"""
mask=np.ones(self.spec.shape)
# Right now just make out DC and clock freq
mask[:,1024]=0;
mask[:,0]=0;
return mask
def applymask(self,mask):
"""Set spec values to 0 where mask is zero"""
self.spec[mask==0]=np.nan
def removedrift(self,deg=10):
"""Fit and remove a polynomial from P(t) for each frequency channel for
a block of contiguous, constant elevation stares"""
# First remove a secular zeropoint drift over the entire scanset. Fit just the
# scans but remove from cal stares as well.
x=self.t
scanind=self.getscanind()
for k in range(self.nf):
y=self.spec[:,k]
if not np.any(np.isnan(y)):
p=np.polyfit(x[scanind],y[scanind],deg=deg)
# Don't remove mean
p[-1]=0
self.spec[:,k]=self.spec[:,k]-np.poly1d(p)(x)
return
for k in range(self.nscan):
# Each scan
ind=self.getscanind(k)
for j,val in enumerate(np.unique(self.za[ind])):
# Find contiguous blocks
doind=ind[np.where(self.za[ind]==val)]
dx=doind-np.roll(doind,1)
startind=np.where(dx!=1)[0]
endind=np.append(startind[1:],dx.size)
for l,val in enumerate(startind):
# For each contiguous block
s=doind[startind[l]]
e=doind[endind[l]-1]+1
x=self.t[s:e]
x=x-x.mean()
y=self.spec[s:e,:]
for m in range(self.nf):
# For each channel
yy=y[:,m];
if not np.any(np.isnan(yy)):
p=np.polyfit(x,yy,deg=deg); # Fit p3
z=np.poly1d(p)
yy=yy-z(x)
self.spec[s:e,m]=yy # replace data
def fitam(self,zarange=[20,50]):
"""Fit a line to P(am)"""
# Loop over scan blocks and fit each
m=np.zeros([self.nscan,self.nf]) # slope of P(agm) fit
b=np.zeros([self.nscan,self.nf]) # intercept of P(am) fit
q=np.zeros([self.nscan,self.nf]) # quadratic term
g=np.zeros([self.nscan,self.nf]) # gain
c=np.zeros([self.nscan,self.nf]) # mean of cal stare
Trx=np.zeros([self.nscan,self.nf]) # noise temperature
Th=290 # hot load
Tz=2 # Atmospheric zenith temp (not including CMB)
Tiso=2.7 # T above atmosphere
for k in range(self.nscan):
# Pull out scanning data for this block
ind=self.getscanind(k)
za=self.za[ind]
am=self.am[ind]
s=self.spec[ind,:]
# Find where elevation is in range
fitind=np.where((za>=zarange[0]) & (za<=zarange[1]))[0]
x=am[fitind]
y=s[fitind,:]
# Fit P(am) for each frequency
#for j in range(self.nf):
# yy=y[:,j]
# if not np.any(np.isnan(yy)):
#p=np.polyfit(x,yy,deg=1);
#m[k,j]=p[0]
#b[k,j]=p[1]
# Try to get gain
# Mean of lead/trail cal stare
c[k,:] = self.calccalmean(k)
cold=y.mean(0)
Tc = am.mean()*Tz + Tiso
for j in range(self.nf):
p=np.polyfit([Tc,Th],[cold[j],c[k,j]],deg=1)
g[k,j]=dc(p[0])
b[k,j]=dc(p[1])
# Noise temperature
Trx0=np.linspace(0,500,1000);
rhs = (Trx0+Th)/(Trx0+Tc)
for j in range(self.nf):
Ph=c[k,j] # hot load
Pc=b[k,j] # cold load
lhs = Ph/Pc
Trx[k,j]=np.interp(0,lhs-rhs,Trx0)
#g[k,:]=(c[k,:]-b[k,:])/(Th-Tc)
self.c = c
self.m = m
self.b = b
self.g = g
self.Trx=Trx
def expandcal(self):
"""Return an index array to expand an nscan x nf array of cal data to an array
of size self.spec (nt x nf).
ex. ind=expandcal(); ratio=self.scan/self.c[ind]
"""
ind=np.zeros(self.spec.shape[0]).astype(int)
for k in range(self.nscan):
ind[self.getscanind(k)]=k
ind[self.getcalind(k)]=k
return ind
def svd(self):
"""SVD filter"""
self.fspec=np.zeros(self.spec.shape)
self.u=[]
self.s=[]
self.v=[]
for k in range(self.nscan):
sind=self.getscanind(k)
x=self.spec[sind]
x[np.isnan(x)]=0
u,s,v=np.linalg.svd(x,full_matrices=True)
ss=dc(s)
ss[0]=0
SS=np.zeros(x.shape)
sz=x.shape[0]
SS[:sz,:sz]=np.diag(ss);
z=np.dot(u,np.dot(SS,v))
#for k in range(sz):
# y=z[k];ind=np.arange(500,1000);p=np.polyfit(self.f[ind],y[ind],deg=3);
# z[k]=y-np.poly1d(p)(self.f);
self.fspec[sind]=z
self.u.append(u)
self.s.append(s)
self.v.append(v)
return u,s,v,self.fspec
def atmgaincal(self,flim=[10000,10500]):
"""Gain cal on atmosphere"""
sind=self.getscanind()
ind=self.expandcal()
amm=np.tile(self.am,(self.nf,1)).T
x=(self.spec-self.b[ind])/self.g[ind];x=x[sind]
x=x/amm[sind]
y=x[ np.where((self.za[sind]>=25) & (self.za[sind]<=35))[0] ]
#for k in range(y.shape[0]):
# y[k]=y[k]-np.nanmedian(y[k])
f=self.f+9500
find=np.where((f>=flim[0]) & (f<=flim[1]))[0]
for k in range(y.shape[0]):
x=self.f[find];
yy=y[k,find];
p=np.polyfit(x,yy,deg=1);
y[k]=y[k]-np.poly1d(p)(self.f)
return y
def wmean(self,y,flim=[10000,10500]):
f=self.f+9500
find=np.where((f>=flim[0]) & (f<=flim[1]))[0]
w=np.tile(1/np.nanstd(y[:,find],axis=1)**2,(2048,1)).T;
ym=np.sum(y*w,axis=0)/np.sum(w,axis=0)
return ym | ind=np.where(ce<=ss)[0] | random_line_split |
reduc_spec.py | import in_out
import numpy as np
from copy import deepcopy as dc
from IPython.core.debugger import Tracer; debug_here=Tracer()
from matplotlib.pyplot import *
# Useful trig functions
def asind(x):
return np.arcsin(x)*180/np.pi
def sind(x):
return np.sin(x*np.pi/180)
def cosd(x):
return np.cos(x*np.pi/180)
def atand2(x,y):
return np.arctan2(x,y)*180/np.pi
#planck fxn lives here now
def planck(f, T):
h = 6.62606957e-34
c = 2.99792458e8
k = 1.3806488e-23
x = 8 * h * np.pi / c**3
y = f**3
ex = np.exp(h * f / (k * T)) - 1
return x * y / ex
def arr(x):
if type(x) is int:
x=np.array([x])
return x
def azel2radec(az,el,mjd,lat=47.8781,lon=-87.6298):
"""Convert az/del to ra/dec, Chicago lat/lon by default. Input/output in
degrees."""
T_UT1 = (mjd-51544.5)/36525;
ThetaGMST = 67310.54841 + (876600*3600 + 8640184.812866)*T_UT1 + \
.093104*(T_UT1**2) - (6.2e-6)*(T_UT1**3)
ThetaGMST = np.mod((np.mod(ThetaGMST,86400*(ThetaGMST/np.abs(ThetaGMST)))/240),360)
ThetaLST = ThetaGMST + lon
DEC = asind(sind(el)*sind(lat)+cosd(el)*cosd(lat)*cosd(az))
LHA = atand2(-sind(az)*cosd(el)/cosd(DEC),
(sind(el)-sind(DEC)*sind(lat))/(cosd(DEC)*cosd(lat)))*(180/np.pi);
RA = np.mod(ThetaLST-LHA,360);
return RA,DEC
class data:
def __init__(self, *args, **kwargs):
'''
Read in data, t0 and t1 as tuples, e.g. (2016,5,3,0,0,0)
'''
if all([((type(arg) is tuple) or (arg is None)) for arg in args]):
ts, tf = args[0], args[1]
d=in_out.read_time_range(dt_0=ts,dt_f=tf, ext=kwargs.get('ext'))
elif any(('.h5' in arg for arg in args)):
d=in_out.read_to_arrays([arg for arg in args if '.h5' in arg])
else:
raise ValueError
# Modified Julian date
self.mjd=d['mjd'][:,0]
# Hours since beginning of read
self.t=(self.mjd-self.mjd[0])*24;
# Sample rate in MHz
if np.unique(d['samp_rate_mhz']).size > 1:
raise NameError('Sample rate changed during accumulation')
else:
self.samp_rate = d['samp_rate_mhz'][0]
# Accumulation length in s
if np.unique(d['acc_len_secs']).size > 1:
raise NameError('Accumulation length changed during accumulation')
else:
self.acc_len = d['acc_len_secs'][0]
# spectrum data
self.spec=d['spec'].astype(float)
# mask data
m=self.getmask()
self.applymask(m)
# Frequency axis
self.f = np.linspace(0,self.samp_rate/2,self.spec.shape[1])
# Add LO frequency
if d.has_key('LO_freq'):
self.lo=d['LO_freq']
else:
self.lo=9500
# Zenith angle in degrees.
self.za=d['angle_degs']-d['zenith_degs']
self.za=self.za[:,0]
# Airmass
self.am=self.za2am(self.za)
# az/el -> RA/Dec
az=120.0 # Hard coded to SE for now
self.ra,self.dec=azel2radec(az,90-self.za,self.mjd)
# Get stepping/cal indices
self._getscanind()
# Useful information
self.nf = self.f.size
##################
# Do the reduction
##################
#zarange=[20,50]
#self.reduc(zarange)
def splitbylo(self,lo):
"""Split out structure into a single LO, lo in GHz"""
ind=np.where(self.lo==lo)[0]
self.splitbyind(ind)
def splitbyscans(self,scans):
"""Split out structure, keeping scans in scans"""
ind=np.where(np.array([val in scans for dum,val in enumerate(self.scan)]))[0]
self.splitbyind(ind)
def splitbyind(self,ind):
"""Split out by time index"""
fields=['mjd','dec','ra','lo','scan','t','spec','za','am']
for k,val in enumerate(fields):
x=getattr(self,val)
setattr(self,val,x[ind])
self._getscanind()
def za2am(self,x):
"""Zenith angle in degrees to airmass"""
return 1/cosd(x)
def reduc(self,zarange=[20,50]):
"""Main reduction script. Elrange is two element list or tuple over
which to perform airmass regression (inclusive)"""
# First, take out a secular gain drift for each constant elevation
# stare. Fit P(t) to each channel in a contiguous elevation stare,
# normalize fit to mean=1, and normalize each chan to this.
#deg=10
#self.removedrift(deg)
# Convert P-> T RJ
#self.P2T()
# Now fit a line to P(am) in each scan and store the results.
self.fitam(zarange)
def P2T(self):
"""Scale by P->TRJ factor"""
# Convert to RJ temperature
#fac=planck.I2Ta(self.f*1e6,1).value
fac = planck(self.f*1e6, 1)
fac=fac/fac[0]
self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))
def _getscanind(self):
"""Identify start/stop indices of cal and scanning"""
zamin = self.za.min()
first = np.where(self.za==zamin)[0]
self.scan = np.zeros(self.spec.shape[0])
if zamin < 0:
cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]
ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1
ce = ss
se = np.roll((cs - 1) % self.za.size, -1) + 1
for k, val in enumerate(cs):
self.scan[val:se[k] + 1] = k
else:
moves = np.diff(self.za)
max_ind = np.where(moves==moves.max())[0]
turnover = self.za.size
diffs = np.diff(max_ind)
if np.unique(diffs).size > 1:
raise ValueError, 'Can\'t deal with non-uniform cal data yet.'
if max_ind.size > 1:
turnover = diffs[0]
cs = ce = np.array([])
ss = np.arange(self.za.size)[::turnover]
se = np.roll((ss - 1) % self.za.size, -1)
for k, val in enumerate(ss):
self.scan[val:se[k] + 1] = k
self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}
self.nscan = np.unique(self.scan).size
def getind(self,start,end,blk):
"""Return indices corresponding to start and end indices, strings as
defined in self.ind"""
if blk is None:
# Return all blocks
blk = np.arange(self.ind[start].size)
ind=np.array([])
for k,val in enumerate(blk):
ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))
return ind.astype(int)
def getscanind(self,blk=None,zarange=[0,90]):
"""Return indices of periods of stepping. Scanblock goes from 0 to
Nscans-1, and will return the indices of the scan blocks requested.
Default is to return all scan blocks and all zenith angles."""
ind=self.getind('ss','se',arr(blk))
ind=ind[(self.za[ind]>=zarange[0]) & (self.za[ind]<=zarange[1])]
return ind
def getcalind(self,blk=None):
"""Return indices of periods of calibrator staring. If blk is defined,
return all indices of cal stares, including leading and trailing for
each block."""
if blk!=None:
blk=arr(blk)
cblk=np.array([]).astype(int) # Initialize cal stare indices
cs=self.ind['cs']
ce=self.ind['ce']
for k,val in enumerate(blk):
ss=self.ind['ss'][val] # Scan start
se=self.ind['se'][val] # Scan stop
# Find leading cal stare
ind=np.where(ce<=ss)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[-1])
# Find trailing cal stare
ind=np.where(cs>=se)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[0])
else:
cblk=None
return np.unique(self.getind('cs','ce',blk=cblk))
def calccalmean(self,blk):
"""Calculate mean of lead/trail cal stare for each scan block"""
calind=self.getcalind(blk)
x=self.spec[calind,:]
return np.nanmean(x,axis=0)
def getmask(self):
""" Get NtxNf mask"""
mask=np.ones(self.spec.shape)
# Right now just make out DC and clock freq
mask[:,1024]=0;
mask[:,0]=0;
return mask
def applymask(self,mask):
"""Set spec values to 0 where mask is zero"""
self.spec[mask==0]=np.nan
def removedrift(self,deg=10):
"""Fit and remove a polynomial from P(t) for each frequency channel for
a block of contiguous, constant elevation stares"""
# First remove a secular zeropoint drift over the entire scanset. Fit just the
# scans but remove from cal stares as well.
x=self.t
scanind=self.getscanind()
for k in range(self.nf):
y=self.spec[:,k]
if not np.any(np.isnan(y)):
|
return
for k in range(self.nscan):
# Each scan
ind=self.getscanind(k)
for j,val in enumerate(np.unique(self.za[ind])):
# Find contiguous blocks
doind=ind[np.where(self.za[ind]==val)]
dx=doind-np.roll(doind,1)
startind=np.where(dx!=1)[0]
endind=np.append(startind[1:],dx.size)
for l,val in enumerate(startind):
# For each contiguous block
s=doind[startind[l]]
e=doind[endind[l]-1]+1
x=self.t[s:e]
x=x-x.mean()
y=self.spec[s:e,:]
for m in range(self.nf):
# For each channel
yy=y[:,m];
if not np.any(np.isnan(yy)):
p=np.polyfit(x,yy,deg=deg); # Fit p3
z=np.poly1d(p)
yy=yy-z(x)
self.spec[s:e,m]=yy # replace data
def fitam(self,zarange=[20,50]):
"""Fit a line to P(am)"""
# Loop over scan blocks and fit each
m=np.zeros([self.nscan,self.nf]) # slope of P(agm) fit
b=np.zeros([self.nscan,self.nf]) # intercept of P(am) fit
q=np.zeros([self.nscan,self.nf]) # quadratic term
g=np.zeros([self.nscan,self.nf]) # gain
c=np.zeros([self.nscan,self.nf]) # mean of cal stare
Trx=np.zeros([self.nscan,self.nf]) # noise temperature
Th=290 # hot load
Tz=2 # Atmospheric zenith temp (not including CMB)
Tiso=2.7 # T above atmosphere
for k in range(self.nscan):
# Pull out scanning data for this block
ind=self.getscanind(k)
za=self.za[ind]
am=self.am[ind]
s=self.spec[ind,:]
# Find where elevation is in range
fitind=np.where((za>=zarange[0]) & (za<=zarange[1]))[0]
x=am[fitind]
y=s[fitind,:]
# Fit P(am) for each frequency
#for j in range(self.nf):
# yy=y[:,j]
# if not np.any(np.isnan(yy)):
#p=np.polyfit(x,yy,deg=1);
#m[k,j]=p[0]
#b[k,j]=p[1]
# Try to get gain
# Mean of lead/trail cal stare
c[k,:] = self.calccalmean(k)
cold=y.mean(0)
Tc = am.mean()*Tz + Tiso
for j in range(self.nf):
p=np.polyfit([Tc,Th],[cold[j],c[k,j]],deg=1)
g[k,j]=dc(p[0])
b[k,j]=dc(p[1])
# Noise temperature
Trx0=np.linspace(0,500,1000);
rhs = (Trx0+Th)/(Trx0+Tc)
for j in range(self.nf):
Ph=c[k,j] # hot load
Pc=b[k,j] # cold load
lhs = Ph/Pc
Trx[k,j]=np.interp(0,lhs-rhs,Trx0)
#g[k,:]=(c[k,:]-b[k,:])/(Th-Tc)
self.c = c
self.m = m
self.b = b
self.g = g
self.Trx=Trx
def expandcal(self):
"""Return an index array to expand an nscan x nf array of cal data to an array
of size self.spec (nt x nf).
ex. ind=expandcal(); ratio=self.scan/self.c[ind]
"""
ind=np.zeros(self.spec.shape[0]).astype(int)
for k in range(self.nscan):
ind[self.getscanind(k)]=k
ind[self.getcalind(k)]=k
return ind
def svd(self):
"""SVD filter"""
self.fspec=np.zeros(self.spec.shape)
self.u=[]
self.s=[]
self.v=[]
for k in range(self.nscan):
sind=self.getscanind(k)
x=self.spec[sind]
x[np.isnan(x)]=0
u,s,v=np.linalg.svd(x,full_matrices=True)
ss=dc(s)
ss[0]=0
SS=np.zeros(x.shape)
sz=x.shape[0]
SS[:sz,:sz]=np.diag(ss);
z=np.dot(u,np.dot(SS,v))
#for k in range(sz):
# y=z[k];ind=np.arange(500,1000);p=np.polyfit(self.f[ind],y[ind],deg=3);
# z[k]=y-np.poly1d(p)(self.f);
self.fspec[sind]=z
self.u.append(u)
self.s.append(s)
self.v.append(v)
return u,s,v,self.fspec
def atmgaincal(self,flim=[10000,10500]):
"""Gain cal on atmosphere"""
sind=self.getscanind()
ind=self.expandcal()
amm=np.tile(self.am,(self.nf,1)).T
x=(self.spec-self.b[ind])/self.g[ind];x=x[sind]
x=x/amm[sind]
y=x[ np.where((self.za[sind]>=25) & (self.za[sind]<=35))[0] ]
#for k in range(y.shape[0]):
# y[k]=y[k]-np.nanmedian(y[k])
f=self.f+9500
find=np.where((f>=flim[0]) & (f<=flim[1]))[0]
for k in range(y.shape[0]):
x=self.f[find];
yy=y[k,find];
p=np.polyfit(x,yy,deg=1);
y[k]=y[k]-np.poly1d(p)(self.f)
return y
def wmean(self,y,flim=[10000,10500]):
f=self.f+9500
find=np.where((f>=flim[0]) & (f<=flim[1]))[0]
w=np.tile(1/np.nanstd(y[:,find],axis=1)**2,(2048,1)).T;
ym=np.sum(y*w,axis=0)/np.sum(w,axis=0)
return ym
| p=np.polyfit(x[scanind],y[scanind],deg=deg)
# Don't remove mean
p[-1]=0
self.spec[:,k]=self.spec[:,k]-np.poly1d(p)(x) | conditional_block |
reduc_spec.py | import in_out
import numpy as np
from copy import deepcopy as dc
from IPython.core.debugger import Tracer; debug_here=Tracer()
from matplotlib.pyplot import *
# Useful trig functions
def asind(x):
return np.arcsin(x)*180/np.pi
def sind(x):
return np.sin(x*np.pi/180)
def cosd(x):
return np.cos(x*np.pi/180)
def atand2(x,y):
return np.arctan2(x,y)*180/np.pi
#planck fxn lives here now
def planck(f, T):
h = 6.62606957e-34
c = 2.99792458e8
k = 1.3806488e-23
x = 8 * h * np.pi / c**3
y = f**3
ex = np.exp(h * f / (k * T)) - 1
return x * y / ex
def arr(x):
if type(x) is int:
x=np.array([x])
return x
def azel2radec(az,el,mjd,lat=47.8781,lon=-87.6298):
"""Convert az/del to ra/dec, Chicago lat/lon by default. Input/output in
degrees."""
T_UT1 = (mjd-51544.5)/36525;
ThetaGMST = 67310.54841 + (876600*3600 + 8640184.812866)*T_UT1 + \
.093104*(T_UT1**2) - (6.2e-6)*(T_UT1**3)
ThetaGMST = np.mod((np.mod(ThetaGMST,86400*(ThetaGMST/np.abs(ThetaGMST)))/240),360)
ThetaLST = ThetaGMST + lon
DEC = asind(sind(el)*sind(lat)+cosd(el)*cosd(lat)*cosd(az))
LHA = atand2(-sind(az)*cosd(el)/cosd(DEC),
(sind(el)-sind(DEC)*sind(lat))/(cosd(DEC)*cosd(lat)))*(180/np.pi);
RA = np.mod(ThetaLST-LHA,360);
return RA,DEC
class data:
def __init__(self, *args, **kwargs):
'''
Read in data, t0 and t1 as tuples, e.g. (2016,5,3,0,0,0)
'''
if all([((type(arg) is tuple) or (arg is None)) for arg in args]):
ts, tf = args[0], args[1]
d=in_out.read_time_range(dt_0=ts,dt_f=tf, ext=kwargs.get('ext'))
elif any(('.h5' in arg for arg in args)):
d=in_out.read_to_arrays([arg for arg in args if '.h5' in arg])
else:
raise ValueError
# Modified Julian date
self.mjd=d['mjd'][:,0]
# Hours since beginning of read
self.t=(self.mjd-self.mjd[0])*24;
# Sample rate in MHz
if np.unique(d['samp_rate_mhz']).size > 1:
raise NameError('Sample rate changed during accumulation')
else:
self.samp_rate = d['samp_rate_mhz'][0]
# Accumulation length in s
if np.unique(d['acc_len_secs']).size > 1:
raise NameError('Accumulation length changed during accumulation')
else:
self.acc_len = d['acc_len_secs'][0]
# spectrum data
self.spec=d['spec'].astype(float)
# mask data
m=self.getmask()
self.applymask(m)
# Frequency axis
self.f = np.linspace(0,self.samp_rate/2,self.spec.shape[1])
# Add LO frequency
if d.has_key('LO_freq'):
self.lo=d['LO_freq']
else:
self.lo=9500
# Zenith angle in degrees.
self.za=d['angle_degs']-d['zenith_degs']
self.za=self.za[:,0]
# Airmass
self.am=self.za2am(self.za)
# az/el -> RA/Dec
az=120.0 # Hard coded to SE for now
self.ra,self.dec=azel2radec(az,90-self.za,self.mjd)
# Get stepping/cal indices
self._getscanind()
# Useful information
self.nf = self.f.size
##################
# Do the reduction
##################
#zarange=[20,50]
#self.reduc(zarange)
def splitbylo(self,lo):
"""Split out structure into a single LO, lo in GHz"""
ind=np.where(self.lo==lo)[0]
self.splitbyind(ind)
def splitbyscans(self,scans):
"""Split out structure, keeping scans in scans"""
ind=np.where(np.array([val in scans for dum,val in enumerate(self.scan)]))[0]
self.splitbyind(ind)
def splitbyind(self,ind):
"""Split out by time index"""
fields=['mjd','dec','ra','lo','scan','t','spec','za','am']
for k,val in enumerate(fields):
x=getattr(self,val)
setattr(self,val,x[ind])
self._getscanind()
def za2am(self,x):
|
def reduc(self,zarange=[20,50]):
"""Main reduction script. Elrange is two element list or tuple over
which to perform airmass regression (inclusive)"""
# First, take out a secular gain drift for each constant elevation
# stare. Fit P(t) to each channel in a contiguous elevation stare,
# normalize fit to mean=1, and normalize each chan to this.
#deg=10
#self.removedrift(deg)
# Convert P-> T RJ
#self.P2T()
# Now fit a line to P(am) in each scan and store the results.
self.fitam(zarange)
def P2T(self):
"""Scale by P->TRJ factor"""
# Convert to RJ temperature
#fac=planck.I2Ta(self.f*1e6,1).value
fac = planck(self.f*1e6, 1)
fac=fac/fac[0]
self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))
def _getscanind(self):
"""Identify start/stop indices of cal and scanning"""
zamin = self.za.min()
first = np.where(self.za==zamin)[0]
self.scan = np.zeros(self.spec.shape[0])
if zamin < 0:
cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]
ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1
ce = ss
se = np.roll((cs - 1) % self.za.size, -1) + 1
for k, val in enumerate(cs):
self.scan[val:se[k] + 1] = k
else:
moves = np.diff(self.za)
max_ind = np.where(moves==moves.max())[0]
turnover = self.za.size
diffs = np.diff(max_ind)
if np.unique(diffs).size > 1:
raise ValueError, 'Can\'t deal with non-uniform cal data yet.'
if max_ind.size > 1:
turnover = diffs[0]
cs = ce = np.array([])
ss = np.arange(self.za.size)[::turnover]
se = np.roll((ss - 1) % self.za.size, -1)
for k, val in enumerate(ss):
self.scan[val:se[k] + 1] = k
self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}
self.nscan = np.unique(self.scan).size
def getind(self,start,end,blk):
"""Return indices corresponding to start and end indices, strings as
defined in self.ind"""
if blk is None:
# Return all blocks
blk = np.arange(self.ind[start].size)
ind=np.array([])
for k,val in enumerate(blk):
ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))
return ind.astype(int)
def getscanind(self,blk=None,zarange=[0,90]):
"""Return indices of periods of stepping. Scanblock goes from 0 to
Nscans-1, and will return the indices of the scan blocks requested.
Default is to return all scan blocks and all zenith angles."""
ind=self.getind('ss','se',arr(blk))
ind=ind[(self.za[ind]>=zarange[0]) & (self.za[ind]<=zarange[1])]
return ind
def getcalind(self,blk=None):
"""Return indices of periods of calibrator staring. If blk is defined,
return all indices of cal stares, including leading and trailing for
each block."""
if blk!=None:
blk=arr(blk)
cblk=np.array([]).astype(int) # Initialize cal stare indices
cs=self.ind['cs']
ce=self.ind['ce']
for k,val in enumerate(blk):
ss=self.ind['ss'][val] # Scan start
se=self.ind['se'][val] # Scan stop
# Find leading cal stare
ind=np.where(ce<=ss)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[-1])
# Find trailing cal stare
ind=np.where(cs>=se)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[0])
else:
cblk=None
return np.unique(self.getind('cs','ce',blk=cblk))
def calccalmean(self,blk):
"""Calculate mean of lead/trail cal stare for each scan block"""
calind=self.getcalind(blk)
x=self.spec[calind,:]
return np.nanmean(x,axis=0)
def getmask(self):
""" Get NtxNf mask"""
mask=np.ones(self.spec.shape)
# Right now just make out DC and clock freq
mask[:,1024]=0;
mask[:,0]=0;
return mask
def applymask(self,mask):
"""Set spec values to 0 where mask is zero"""
self.spec[mask==0]=np.nan
def removedrift(self,deg=10):
"""Fit and remove a polynomial from P(t) for each frequency channel for
a block of contiguous, constant elevation stares"""
# First remove a secular zeropoint drift over the entire scanset. Fit just the
# scans but remove from cal stares as well.
x=self.t
scanind=self.getscanind()
for k in range(self.nf):
y=self.spec[:,k]
if not np.any(np.isnan(y)):
p=np.polyfit(x[scanind],y[scanind],deg=deg)
# Don't remove mean
p[-1]=0
self.spec[:,k]=self.spec[:,k]-np.poly1d(p)(x)
return
for k in range(self.nscan):
# Each scan
ind=self.getscanind(k)
for j,val in enumerate(np.unique(self.za[ind])):
# Find contiguous blocks
doind=ind[np.where(self.za[ind]==val)]
dx=doind-np.roll(doind,1)
startind=np.where(dx!=1)[0]
endind=np.append(startind[1:],dx.size)
for l,val in enumerate(startind):
# For each contiguous block
s=doind[startind[l]]
e=doind[endind[l]-1]+1
x=self.t[s:e]
x=x-x.mean()
y=self.spec[s:e,:]
for m in range(self.nf):
# For each channel
yy=y[:,m];
if not np.any(np.isnan(yy)):
p=np.polyfit(x,yy,deg=deg); # Fit p3
z=np.poly1d(p)
yy=yy-z(x)
self.spec[s:e,m]=yy # replace data
def fitam(self,zarange=[20,50]):
"""Fit a line to P(am)"""
# Loop over scan blocks and fit each
m=np.zeros([self.nscan,self.nf]) # slope of P(agm) fit
b=np.zeros([self.nscan,self.nf]) # intercept of P(am) fit
q=np.zeros([self.nscan,self.nf]) # quadratic term
g=np.zeros([self.nscan,self.nf]) # gain
c=np.zeros([self.nscan,self.nf]) # mean of cal stare
Trx=np.zeros([self.nscan,self.nf]) # noise temperature
Th=290 # hot load
Tz=2 # Atmospheric zenith temp (not including CMB)
Tiso=2.7 # T above atmosphere
for k in range(self.nscan):
# Pull out scanning data for this block
ind=self.getscanind(k)
za=self.za[ind]
am=self.am[ind]
s=self.spec[ind,:]
# Find where elevation is in range
fitind=np.where((za>=zarange[0]) & (za<=zarange[1]))[0]
x=am[fitind]
y=s[fitind,:]
# Fit P(am) for each frequency
#for j in range(self.nf):
# yy=y[:,j]
# if not np.any(np.isnan(yy)):
#p=np.polyfit(x,yy,deg=1);
#m[k,j]=p[0]
#b[k,j]=p[1]
# Try to get gain
# Mean of lead/trail cal stare
c[k,:] = self.calccalmean(k)
cold=y.mean(0)
Tc = am.mean()*Tz + Tiso
for j in range(self.nf):
p=np.polyfit([Tc,Th],[cold[j],c[k,j]],deg=1)
g[k,j]=dc(p[0])
b[k,j]=dc(p[1])
# Noise temperature
Trx0=np.linspace(0,500,1000);
rhs = (Trx0+Th)/(Trx0+Tc)
for j in range(self.nf):
Ph=c[k,j] # hot load
Pc=b[k,j] # cold load
lhs = Ph/Pc
Trx[k,j]=np.interp(0,lhs-rhs,Trx0)
#g[k,:]=(c[k,:]-b[k,:])/(Th-Tc)
self.c = c
self.m = m
self.b = b
self.g = g
self.Trx=Trx
def expandcal(self):
"""Return an index array to expand an nscan x nf array of cal data to an array
of size self.spec (nt x nf).
ex. ind=expandcal(); ratio=self.scan/self.c[ind]
"""
ind=np.zeros(self.spec.shape[0]).astype(int)
for k in range(self.nscan):
ind[self.getscanind(k)]=k
ind[self.getcalind(k)]=k
return ind
def svd(self):
"""SVD filter"""
self.fspec=np.zeros(self.spec.shape)
self.u=[]
self.s=[]
self.v=[]
for k in range(self.nscan):
sind=self.getscanind(k)
x=self.spec[sind]
x[np.isnan(x)]=0
u,s,v=np.linalg.svd(x,full_matrices=True)
ss=dc(s)
ss[0]=0
SS=np.zeros(x.shape)
sz=x.shape[0]
SS[:sz,:sz]=np.diag(ss);
z=np.dot(u,np.dot(SS,v))
#for k in range(sz):
# y=z[k];ind=np.arange(500,1000);p=np.polyfit(self.f[ind],y[ind],deg=3);
# z[k]=y-np.poly1d(p)(self.f);
self.fspec[sind]=z
self.u.append(u)
self.s.append(s)
self.v.append(v)
return u,s,v,self.fspec
def atmgaincal(self,flim=[10000,10500]):
"""Gain cal on atmosphere"""
sind=self.getscanind()
ind=self.expandcal()
amm=np.tile(self.am,(self.nf,1)).T
x=(self.spec-self.b[ind])/self.g[ind];x=x[sind]
x=x/amm[sind]
y=x[ np.where((self.za[sind]>=25) & (self.za[sind]<=35))[0] ]
#for k in range(y.shape[0]):
# y[k]=y[k]-np.nanmedian(y[k])
f=self.f+9500
find=np.where((f>=flim[0]) & (f<=flim[1]))[0]
for k in range(y.shape[0]):
x=self.f[find];
yy=y[k,find];
p=np.polyfit(x,yy,deg=1);
y[k]=y[k]-np.poly1d(p)(self.f)
return y
def wmean(self,y,flim=[10000,10500]):
f=self.f+9500
find=np.where((f>=flim[0]) & (f<=flim[1]))[0]
w=np.tile(1/np.nanstd(y[:,find],axis=1)**2,(2048,1)).T;
ym=np.sum(y*w,axis=0)/np.sum(w,axis=0)
return ym
| """Zenith angle in degrees to airmass"""
return 1/cosd(x) | identifier_body |
_ToolBar.ts | // Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License.txt in the project root for license information.
/// <reference path="../../Core.d.ts" />
import Animations = require("../../Animations");
import _Base = require("../../Core/_Base");
import _BaseUtils = require("../../Core/_BaseUtils");
import BindingList = require("../../BindingList");
import ControlProcessor = require("../../ControlProcessor");
import _Constants = require("../ToolBar/_Constants");
import _Command = require("../AppBar/_Command");
import _CommandingSurface = require("../CommandingSurface");
import _ICommandingSurface = require("../CommandingSurface/_CommandingSurface");
import _Control = require("../../Utilities/_Control");
import _Dispose = require("../../Utilities/_Dispose");
import _ElementUtilities = require("../../Utilities/_ElementUtilities");
import _ErrorFromName = require("../../Core/_ErrorFromName");
import _Events = require('../../Core/_Events');
import _Flyout = require("../../Controls/Flyout");
import _Global = require("../../Core/_Global");
import _Hoverable = require("../../Utilities/_Hoverable");
import _KeyboardBehavior = require("../../Utilities/_KeyboardBehavior");
import _LightDismissService = require('../../_LightDismissService');
import Menu = require("../../Controls/Menu");
import _MenuCommand = require("../Menu/_Command");
import Promise = require('../../Promise');
import _Resources = require("../../Core/_Resources");
import Scheduler = require("../../Scheduler");
import _OpenCloseMachine = require('../../Utilities/_OpenCloseMachine');
import _Signal = require('../../_Signal');
import _WinRT = require('../../Core/_WinRT');
import _WriteProfilerMark = require("../../Core/_WriteProfilerMark");
require(["require-style!less/styles-toolbar"]);
"use strict";
// The WinJS ToolBar is a specialized UI wrapper for the private _CommandingSurface UI component. The _CommandingSurface is responsible for rendering
// opened and closed states, knowing how to create the open and close animations, laying out commands, creating command hide/show animations and
// keyboard navigation across commands. The WinJS ToolBar is very similar to the WinJS AppBar, however the ToolBar is meant to be positioned in line
// with your app content whereas the AppBar is meant to overlay your app content.
//
// The responsibilities of the ToolBar include:
//
// - Seamlessly hosting the _CommandingSurface
// - From an end user perspective, there should be no visual distinction between where the ToolBar ends and the _CommandingSurface begins.
// - ToolBar wants to rely on the _CommandingSurface to do as much of the rendering as possible. The ToolBar relies on the _CommandingSurface to render its opened
// and closed states-- which defines the overall height of the ToolBar and CommandingSurface elements. The ToolBar has no policy or CSS styles regarding its own
// height and ToolBar takes advantage of the default behavior of its DIV element which is to always grow or shrink to match the height of its content.
// - From an end developer perspective, the _CommandingSurface should be abstracted as an implementation detail of the ToolBar as much as possible.
// - Developers should never have to interact with the CommandingSurface directly.The ToolBar exposes the majority of _CommandingSurface functionality through its
// own APIs
// - There are some HTML elements inside of the _CommandingSurface's DOM that a developer might like to style. After the _CommandingSurface has been instantiated
// and added to the ToolBar DOM, the ToolBar will inject its own "toolbar" specific class-names onto these elements to make them more discoverable to developers.
// - Example of developer styling guidelines https://msdn.microsoft.com/en-us/library/windows/apps/jj839733.asp
//
// - Open direction:
// - The ToolBar and its _CommandingSurface component can open upwards or downwards.Because there is no policy on where the ToolBar can be placed in an App, the ToolBar
// always wants to avoid opening in a direction that would cause any of its content to clip outside of the screen.
// - When the ToolBar is opening, it will always choose to expand in the direction(up or down) that currently has the most available space between the edge of the
// ToolBar element and the corresponding edge of the visual viewport.
// - This means that the a ToolBar near the bottom of the page will open upwards, but if the page is scrolled down such that the ToolBar is now near the top, the next
// time the ToolBar is opened it will open downwards.
//
// - Light dismiss
// - The ToolBar is a light dismissible when opened. This means that the ToolBar is closed thru a variety of cues such as tapping anywhere outside of it,
// pressing the escape key, and resizing the window.ToolBar relies on the _LightDismissService component for most of this functionality.
// The only pieces the ToolBar is responsible for are:
// - Describing what happens when a light dismiss is triggered on the ToolBar .
// - Describing how the ToolBar should take / restore focus when it becomes the topmost light dismissible in the light dismiss stack
// - Debugging Tip: Light dismiss can make debugging an opened ToolBar tricky.A good idea is to temporarily suspend the light dismiss cue that triggers when clicking
// outside of the current window.This can be achieved by executing the following code in the JavaScript console window: "WinJS.UI._LightDismissService._setDebug(true)"
//
// - Inline element when closed, overlay when opened:
// - The design of the toolbar called for it to be an control that developers can place inline with their other app content.When the ToolBar is closed it exists as a an
// element in your app, next to other app content and take up space in the flow of the document.
// - However, when the ToolBar opens, its vertical height will increase.Normally the change in height of an inline element will cause all of the other elements below the
// expanding element to move out of the way.Rather than push the rest of the app content down when opening, the design of the ToolBar called for it to overlay that content other content, while still taking up the same vertical space in the document as it did when closed.
// - The implementation of this feature is very complicated:
// - The only way one element can overlay another is to remove it from the flow of the document and give it a new CSS positioning like "absolute" or "fixed".
// - However, simply removing the ToolBar element from the document to make it an overlay, would leave behind a gap in the document that all the neighboring elements
// would try to fill by shifting over, leading to a jarring reflow of many elements whenever the ToolBar was opened.This was also undesirable
// - The final solution is as follows
// - Create a transparent placeholder element that is the exact same height and width as the closed ToolBar element.
// - Removing the ToolBar element from its place in the document while simultaneously inserting the placeholder element into the same spot the ToolBar element was
// just removed from.
// - Inserting the ToolBar element as a direct child of the body and giving it css position: fixed;
// We insert it directly into the body element because while opened, ToolBar is a Light dismissible overlay and is subject to the same stacking context pitfalls
// as any other light dismissible. https://github.com/winjs/winjs/wiki/Dismissables-and-Stacking-Contexts
// - Reposition the ToolBar element to be exactly overlaid on top of the placeholder element.
// - Render the ToolBar as opened, via the _CommandingSurface API, increasing the overall height of the ToolBar.
// - Closing the ToolBar is basically the same steps but in reverse.
// - One limitation to this implementation is that developers may not position the ToolBar element themselves directly via the CSS "position" or "float" properties.
// - This is because The ToolBar expects its element to be in the flow of the document when closed, and the placeholder element would not receive these same styles
// when inserted to replace the ToolBar element.
// - An easy workaround for developers is to wrap the ToolBar into another DIV element that they may style and position however they'd like.
//
// - Responding to the IHM:
// - If the ToolBar is opened when the IHM is shown, it will close itself.This is to avoid scenarios where the IHM totally occludes the opened ToolBar. If the ToolBar
// did not close itself, then the next mouse or touch input within the App wouldn't appear to do anything since it would just go to closing the light dismissible
// ToolBar anyway.
var strings = {
get ariaLabel() { return _Resources._getWinJSString("ui/toolbarAriaLabel").value; },
get overflowButtonAriaLabel() { return _Resources._getWinJSString("ui/toolbarOverflowButtonAriaLabel").value; },
get mustContainCommands() { return "The toolbar can only contain WinJS.UI.Command or WinJS.UI.AppBarCommand controls"; },
get duplicateConstruction() { return "Invalid argument: Controls may only be instantiated one time for each DOM element"; }
};
var ClosedDisplayMode = {
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.compact" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.compact">
/// When the ToolBar is closed, the height of the ToolBar is reduced such that button commands are still visible, but their labels are hidden.
/// </field>
compact: "compact",
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.full" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.full">
/// When the ToolBar is closed, the height of the ToolBar is always sized to content.
/// </field>
full: "full",
};
var closedDisplayModeClassMap = {};
closedDisplayModeClassMap[ClosedDisplayMode.compact] = _Constants.ClassNames.compactClass;
closedDisplayModeClassMap[ClosedDisplayMode.full] = _Constants.ClassNames.fullClass;
// Versions of add/removeClass that are no ops when called with falsy class names.
function addClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.addClass(element, className);
}
function removeClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.removeClass(element, className);
}
/// <field>
/// <summary locid="WinJS.UI.ToolBar">
/// Displays ICommands within the flow of the app. Use the ToolBar around other statically positioned app content.
/// </summary>
/// </field>
/// <icon src="ui_winjs.ui.toolbar.12x12.png" width="12" height="12" />
/// <icon src="ui_winjs.ui.toolbar.16x16.png" width="16" height="16" />
/// <htmlSnippet supportsContent="true"><![CDATA[<div data-win-control="WinJS.UI.ToolBar">
/// <button data-win-control="WinJS.UI.Command" data-win-options="{id:'',label:'example',icon:'back',type:'button',onclick:null,section:'primary'}"></button>
/// </div>]]></htmlSnippet>
/// <part name="toolbar" class="win-toolbar" locid="WinJS.UI.ToolBar_part:toolbar">The entire ToolBar control.</part>
/// <part name="toolbar-overflowbutton" class="win-toolbar-overflowbutton" locid="WinJS.UI.ToolBar_part:ToolBar-overflowbutton">The toolbar overflow button.</part>
/// <part name="toolbar-overflowarea" class="win-toolbar-overflowarea" locid="WinJS.UI.ToolBar_part:ToolBar-overflowarea">The container for toolbar commands that overflow.</part>
/// <resource type="javascript" src="//$(TARGET_DESTINATION)/js/WinJS.js" shared="true" />
/// <resource type="css" src="//$(TARGET_DESTINATION)/css/ui-dark.css" shared="true" />
export class ToolBar {
private _id: string;
private _disposed: boolean;
private _commandingSurface: _ICommandingSurface._CommandingSurface;
private _isOpenedMode: boolean;
private _handleShowingKeyboardBound: (ev: any) => void;
private _dismissable: _LightDismissService.LightDismissableElement;
private _cachedClosedHeight: number;
private _dom: {
root: HTMLElement;
commandingSurfaceEl: HTMLElement;
placeHolder: HTMLElement;
}
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode">
/// Display options for the actionarea when the ToolBar is closed.
/// </field>
static ClosedDisplayMode = ClosedDisplayMode;
static supportedForProcessing: boolean = true;
/// <field type="HTMLElement" domElement="true" hidden="true" locid="WinJS.UI.ToolBar.element" helpKeyword="WinJS.UI.ToolBar.element">
/// Gets the DOM element that hosts the ToolBar.
/// </field>
get element() {
return this._dom.root;
}
/// <field type="WinJS.Binding.List" locid="WinJS.UI.ToolBar.data" helpKeyword="WinJS.UI.ToolBar.data">
/// Gets or sets the Binding List of WinJS.UI.Command for the ToolBar.
/// </field>
get data() {
return this._commandingSurface.data;
}
set data(value: BindingList.List<_Command.ICommand>) {
this._commandingSurface.data = value;
}
/// <field type="String" locid="WinJS.UI.ToolBar.closedDisplayMode" helpKeyword="WinJS.UI.ToolBar.closedDisplayMode">
/// Gets or sets the closedDisplayMode for the ToolBar. Values are "compact" and "full".
/// </field>
get closedDisplayMode() {
return this._commandingSurface.closedDisplayMode;
}
set closedDisplayMode(value: string) {
if (ClosedDisplayMode[value]) {
this._commandingSurface.closedDisplayMode = value;
this._cachedClosedHeight = null;
}
}
/// <field type="Boolean" hidden="true" locid="WinJS.UI.ToolBar.opened" helpKeyword="WinJS.UI.ToolBar.opened">
/// Gets or sets whether the ToolBar is currently opened.
/// </field>
get opened(): boolean {
return this._commandingSurface.opened;
}
set opened(value: boolean) {
this._commandingSurface.opened = value;
}
constructor(element?: HTMLElement, options: any = {}) {
/// <signature helpKeyword="WinJS.UI.ToolBar.ToolBar">
/// <summary locid="WinJS.UI.ToolBar.constructor">
/// Creates a new ToolBar control.
/// </summary>
/// <param name="element" type="HTMLElement" domElement="true" locid="WinJS.UI.ToolBar.constructor_p:element">
/// The DOM element that will host the control.
/// </param>
/// <param name="options" type="Object" locid="WinJS.UI.ToolBar.constructor_p:options">
/// The set of properties and values to apply to the new ToolBar control.
/// </param>
/// <returns type="WinJS.UI.ToolBar" locid="WinJS.UI.ToolBar.constructor_returnValue">
/// The new ToolBar control.
/// </returns>
/// </signature>
this._writeProfilerMark("constructor,StartTM");
// Check to make sure we weren't duplicated
if (element && element["winControl"]) {
throw new _ErrorFromName("WinJS.UI.ToolBar.DuplicateConstruction", strings.duplicateConstruction);
}
this._initializeDom(element || _Global.document.createElement("div"));
var stateMachine = new _OpenCloseMachine.OpenCloseMachine({
eventElement: this.element,
onOpen: () => {
var openAnimation = this._commandingSurface.createOpenAnimation(this._getClosedHeight());
this._synchronousOpen();
return openAnimation.execute();
},
onClose: () => {
var closeAnimation = this._commandingSurface.createCloseAnimation(this._getClosedHeight());
return closeAnimation.execute().then(() => {
this._synchronousClose();
});
},
onUpdateDom: () => {
this._updateDomImpl();
},
onUpdateDomWithIsOpened: (isOpened: boolean) => {
this._isOpenedMode = isOpened;
this._updateDomImpl();
}
});
// Events
this._handleShowingKeyboardBound = this._handleShowingKeyboard.bind(this);
_ElementUtilities._inputPaneListener.addEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
// Initialize private state.
this._disposed = false;
this._cachedClosedHeight = null;
this._commandingSurface = new _CommandingSurface._CommandingSurface(this._dom.commandingSurfaceEl, { openCloseMachine: stateMachine });
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-actionarea"), _Constants.ClassNames.actionAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowarea"), _Constants.ClassNames.overflowAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowbutton"), _Constants.ClassNames.overflowButtonCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-ellipsis"), _Constants.ClassNames.ellipsisCssClass);
this._isOpenedMode = _Constants.defaultOpened;
this._dismissable = new _LightDismissService.LightDismissableElement({
element: this._dom.root,
tabIndex: this._dom.root.hasAttribute("tabIndex") ? this._dom.root.tabIndex : -1,
onLightDismiss: () => {
this.close();
},
onTakeFocus: (useSetActive) => {
this._dismissable.restoreFocus() ||
this._commandingSurface.takeFocus(useSetActive);
}
});
// Initialize public properties.
this.closedDisplayMode = _Constants.defaultClosedDisplayMode;
this.opened = this._isOpenedMode;
_Control.setOptions(this, options);
// Exit the Init state.
_ElementUtilities._inDom(this.element).then(() => {
return this._commandingSurface.initialized;
}).then(() => {
stateMachine.exitInit();
this._writeProfilerMark("constructor,StopTM");
});
}
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeopen" helpKeyword="WinJS.UI.ToolBar.onbeforeopen">
/// Occurs immediately before the control is opened. Is cancelable.
/// </field>
onbeforeopen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafteropen" helpKeyword="WinJS.UI.ToolBar.onafteropen">
/// Occurs immediately after the control is opened.
/// </field>
onafteropen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeclose" helpKeyword="WinJS.UI.ToolBar.onbeforeclose">
/// Occurs immediately before the control is closed. Is cancelable.
/// </field>
onbeforeclose: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafterclose" helpKeyword="WinJS.UI.ToolBar.onafterclose">
/// Occurs immediately after the control is closed.
/// </field>
onafterclose: (ev: CustomEvent) => void;
open(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.open">
/// <summary locid="WinJS.UI.ToolBar.open">
/// Opens the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.open();
}
close(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.close">
/// <summary locid="WinJS.UI.ToolBar.close">
/// Closes the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.close();
}
dispose() {
/// <signature helpKeyword="WinJS.UI.ToolBar.dispose">
/// <summary locid="WinJS.UI.ToolBar.dispose">
/// Disposes this ToolBar.
/// </summary>
/// </signature>
if (this._disposed) {
return;
}
this._disposed = true;
_LightDismissService.hidden(this._dismissable);
// Disposing the _commandingSurface will trigger dispose on its OpenCloseMachine and synchronously complete any animations that might have been running.
this._commandingSurface.dispose();
// If page navigation is happening, we don't want the ToolBar left behind in the body.
// Synchronoulsy close the ToolBar to force it out of the body and back into its parent element.
this._synchronousClose();
_ElementUtilities._inputPaneListener.removeEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
_Dispose.disposeSubTree(this.element);
}
forceLayout() {
/// <signature helpKeyword="WinJS.UI.ToolBar.forceLayout">
/// <summary locid="WinJS.UI.ToolBar.forceLayout">
/// Forces the ToolBar to update its layout. Use this function when the window did not change size, but the container of the ToolBar changed size.
/// </summary>
/// </signature>
this._commandingSurface.forceLayout();
}
getCommandById(id: string): _Command.ICommand {
/// <signature helpKeyword="WinJS.UI.ToolBar.getCommandById">
/// <summary locid="WinJS.UI.ToolBar.getCommandById">
/// Retrieves the command with the specified ID from this ToolBar.
/// If more than one command is found, this method returns the first command found.
/// </summary>
/// <param name="id" type="String" locid="WinJS.UI.ToolBar.getCommandById_p:id">Id of the command to return.</param>
/// <returns type="object" locid="WinJS.UI.ToolBar.getCommandById_returnValue">
/// The command found, or null if no command is found.
/// </returns>
/// </signature>
return this._commandingSurface.getCommandById(id);
}
showOnlyCommands(commands: Array<string|_Command.ICommand>): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.showOnlyCommands">
/// <summary locid="WinJS.UI.ToolBar.showOnlyCommands">
/// Show the specified commands, hiding all of the others in the ToolBar.
/// </summary>
/// <param name="commands" type="Array" locid="WinJS.UI.ToolBar.showOnlyCommands_p:commands">
/// An array of the commands to show. The array elements may be Command objects, or the string identifiers (IDs) of commands.
/// </param>
/// </signature>
return this._commandingSurface.showOnlyCommands(commands);
}
private _writeProfilerMark(text: string) {
_WriteProfilerMark("WinJS.UI.ToolBar:" + this._id + ":" + text);
}
private _initializeDom(root: HTMLElement): void {
this._writeProfilerMark("_intializeDom,info");
// Attaching JS control to DOM element
root["winControl"] = this;
this._id = root.id || _ElementUtilities._uniqueID(root);
_ElementUtilities.addClass(root, _Constants.ClassNames.controlCssClass);
_ElementUtilities.addClass(root, _Constants.ClassNames.disposableCssClass);
// Make sure we have an ARIA role
var role = root.getAttribute("role");
if (!role) {
root.setAttribute("role", "menubar");
}
var label = root.getAttribute("aria-label");
if (!label) {
root.setAttribute("aria-label", strings.ariaLabel);
}
// Create element for commandingSurface and reparent any declarative Commands.
// The CommandingSurface constructor will parse child elements as AppBarCommands.
var commandingSurfaceEl = document.createElement("DIV");
_ElementUtilities._reparentChildren(root, commandingSurfaceEl);
root.appendChild(commandingSurfaceEl);
// While the ToolBar is open, it will place itself in the <body> so it can become a light dismissible
// overlay. It leaves the placeHolder element behind as stand in at the ToolBar's original DOM location
// to avoid reflowing surrounding app content and create the illusion that the ToolBar hasn't moved along
// the x or y planes.
var placeHolder = _Global.document.createElement("DIV");
_ElementUtilities.addClass(placeHolder, _Constants.ClassNames.placeHolderCssClass);
// If the ToolBar's original HTML parent node is disposed while the ToolBar is open and repositioned as
// a temporary child of the <body>, make sure that calling dispose on the placeHolder element will trigger
// dispose on the ToolBar as well.
_Dispose.markDisposable(placeHolder, this.dispose.bind(this));
this._dom = {
root: root,
commandingSurfaceEl: commandingSurfaceEl,
placeHolder: placeHolder,
};
}
private _handleShowingKeyboard(event: { detail: { originalEvent: _WinRT.Windows.UI.ViewManagement.InputPaneVisibilityEventArgs } }) {
// Because the ToolBar takes up layout space and is not an overlay, it doesn't have the same expectation
// to move itself to get out of the way of a showing IHM. Instsead we just close the ToolBar to avoid
// scenarios where the ToolBar is occluded, but the click-eating-div is still present since it may seem
// strange to end users that an occluded ToolBar (out of sight, out of mind) is still eating their first
// click.
// Mitigation:
// Because (1) custom content in a ToolBar can only be included as a 'content' type command, because (2)
// the ToolBar only supports closedDisplayModes 'compact' and 'full', and because (3) 'content' type
// commands in the overflowarea use a separate contentflyout to display their contents:
// Interactable custom content contained within the ToolBar actionarea or overflowarea, will remain
// visible and interactable even when showing the IHM closes the ToolBar.
this.close();
}
private _synchronousOpen(): void {
this._isOpenedMode = true;
this._updateDomImpl();
}
private _synchronousClose(): void {
this._isOpenedMode = false;
this._updateDomImpl();
}
// State private to the _updateDomImpl family of method. No other methods should make use of it.
//
// Nothing has been rendered yet so these are all initialized to undefined. Because
// they are undefined, the first time _updateDomImpl is called, they will all be
// rendered.
private _updateDomImpl_renderedState = {
isOpenedMode: <boolean>undefined,
closedDisplayMode: <string>undefined,
prevInlineWidth: <string>undefined,
};
private _updateDomImpl(): void {
var rendered = this._updateDomImpl_renderedState;
if (rendered.isOpenedMode !== this._isOpenedMode) {
if (this._isOpenedMode) {
this._updateDomImpl_renderOpened();
} else {
this._updateDomImpl_renderClosed();
}
rendered.isOpenedMode = this._isOpenedMode;
}
if (rendered.closedDisplayMode !== this.closedDisplayMode) {
removeClass(this._dom.root, closedDisplayModeClassMap[rendered.closedDisplayMode]);
addClass(this._dom.root, closedDisplayModeClassMap[this.closedDisplayMode]);
rendered.closedDisplayMode = this.closedDisplayMode;
}
this._commandingSurface.updateDom();
}
private _getClosedHeight(): number {
if (this._cachedClosedHeight === null) {
var wasOpen = this._isOpenedMode;
if (this._isOpenedMode) |
this._cachedClosedHeight = this._commandingSurface.getBoundingRects().commandingSurface.height;
if (wasOpen) {
this._synchronousOpen();
}
}
return this._cachedClosedHeight;
}
private _updateDomImpl_renderOpened(): void {
// Measure closed state.
this._updateDomImpl_renderedState.prevInlineWidth = this._dom.root.style.width;
var closedBorderBox = this._dom.root.getBoundingClientRect();
var closedContentWidth = _ElementUtilities._getPreciseContentWidth(this._dom.root);
var closedContentHeight = _ElementUtilities._getPreciseContentHeight(this._dom.root);
var closedStyle = _ElementUtilities._getComputedStyle(this._dom.root);
var closedPaddingTop = _ElementUtilities._convertToPrecisePixels(closedStyle.paddingTop);
var closedBorderTop = _ElementUtilities._convertToPrecisePixels(closedStyle.borderTopWidth);
var closedMargins = _ElementUtilities._getPreciseMargins(this._dom.root);
var closedContentBoxTop = closedBorderBox.top + closedBorderTop + closedPaddingTop;
var closedContentBoxBottom = closedContentBoxTop + closedContentHeight;
// Size our placeHolder. Set height and width to match borderbox of the closed ToolBar.
// Copy ToolBar margins to the placeholder.
var placeHolder = this._dom.placeHolder;
var placeHolderStyle = placeHolder.style;
placeHolderStyle.width = closedBorderBox.width + "px";
placeHolderStyle.height = closedBorderBox.height + "px";
placeHolderStyle.marginTop = closedMargins.top + "px";
placeHolderStyle.marginRight = closedMargins.right + "px";
placeHolderStyle.marginBottom = closedMargins.bottom + "px";
placeHolderStyle.marginLeft = closedMargins.left + "px";
_ElementUtilities._maintainFocus(() => {
// Move ToolBar element to the body in preparation of becoming a light dismissible. Leave an equal sized placeHolder element
// at our original DOM location to avoid reflowing surrounding app content.
this._dom.root.parentElement.insertBefore(placeHolder, this._dom.root);
_Global.document.body.appendChild(this._dom.root);
// Position the ToolBar to completely cover the same region as the placeholder element.
this._dom.root.style.width = closedContentWidth + "px";
this._dom.root.style.left = closedBorderBox.left - closedMargins.left + "px";
// Determine which direction to expand the CommandingSurface elements when opened. The overflow area will be rendered at the corresponding edge of
// the ToolBar's content box, so we choose the direction that offers the most space between that edge and the corresponding edge of the viewport.
// This is to reduce the chance that the overflow area might clip through the edge of the viewport.
var topOfViewport = 0;
var bottomOfViewport = _Global.innerHeight;
var distanceFromTop = closedContentBoxTop - topOfViewport;
var distanceFromBottom = bottomOfViewport - closedContentBoxBottom;
if (distanceFromTop > distanceFromBottom) {
// CommandingSurface is going to expand updwards.
this._commandingSurface.overflowDirection = _Constants.OverflowDirection.top;
// Position the bottom edge of the ToolBar marginbox over the bottom edge of the placeholder marginbox.
this._dom.root.style.bottom = (bottomOfViewport - closedBorderBox.bottom) - closedMargins.bottom + "px";
} else {
// CommandingSurface is going to expand downwards.
this._commandingSurface.overflowDirection = _Constants.OverflowDirection.bottom;
// Position the top edge of the ToolBar marginbox over the top edge of the placeholder marginbox.
this._dom.root.style.top = (topOfViewport + closedBorderBox.top) - closedMargins.top + "px";
}
// Render opened state
_ElementUtilities.addClass(this._dom.root, _Constants.ClassNames.openedClass);
_ElementUtilities.removeClass(this._dom.root, _Constants.ClassNames.closedClass);
});
this._commandingSurface.synchronousOpen();
_LightDismissService.shown(this._dismissable); // Call at the start of the open animation
}
private _updateDomImpl_renderClosed(): void {
_ElementUtilities._maintainFocus(() => {
if (this._dom.placeHolder.parentElement) {
// Restore our placement in the DOM
var placeHolder = this._dom.placeHolder;
placeHolder.parentElement.insertBefore(this._dom.root, placeHolder);
placeHolder.parentElement.removeChild(placeHolder);
}
// Render Closed
this._dom.root.style.top = "";
this._dom.root.style.right = "";
this._dom.root.style.bottom = "";
this._dom.root.style.left = "";
this._dom.root.style.width = this._updateDomImpl_renderedState.prevInlineWidth;
_ElementUtilities.addClass(this._dom.root, _Constants.ClassNames.closedClass);
_ElementUtilities.removeClass(this._dom.root, _Constants.ClassNames.openedClass);
});
this._commandingSurface.synchronousClose();
_LightDismissService.hidden(this._dismissable); // Call after the close animation
}
}
_Base.Class.mix(ToolBar, _Events.createEventProperties(
_Constants.EventNames.beforeOpen,
_Constants.EventNames.afterOpen,
_Constants.EventNames.beforeClose,
_Constants.EventNames.afterClose));
// addEventListener, removeEventListener, dispatchEvent
_Base.Class.mix(ToolBar, _Control.DOMEventMixin);
| {
this._synchronousClose();
} | conditional_block |
_ToolBar.ts | // Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License.txt in the project root for license information.
/// <reference path="../../Core.d.ts" />
import Animations = require("../../Animations");
import _Base = require("../../Core/_Base");
import _BaseUtils = require("../../Core/_BaseUtils");
import BindingList = require("../../BindingList");
import ControlProcessor = require("../../ControlProcessor");
import _Constants = require("../ToolBar/_Constants");
import _Command = require("../AppBar/_Command");
import _CommandingSurface = require("../CommandingSurface");
import _ICommandingSurface = require("../CommandingSurface/_CommandingSurface");
import _Control = require("../../Utilities/_Control");
import _Dispose = require("../../Utilities/_Dispose");
import _ElementUtilities = require("../../Utilities/_ElementUtilities");
import _ErrorFromName = require("../../Core/_ErrorFromName");
import _Events = require('../../Core/_Events');
import _Flyout = require("../../Controls/Flyout");
import _Global = require("../../Core/_Global");
import _Hoverable = require("../../Utilities/_Hoverable");
import _KeyboardBehavior = require("../../Utilities/_KeyboardBehavior");
import _LightDismissService = require('../../_LightDismissService');
import Menu = require("../../Controls/Menu");
import _MenuCommand = require("../Menu/_Command");
import Promise = require('../../Promise');
import _Resources = require("../../Core/_Resources");
import Scheduler = require("../../Scheduler");
import _OpenCloseMachine = require('../../Utilities/_OpenCloseMachine');
import _Signal = require('../../_Signal');
import _WinRT = require('../../Core/_WinRT');
import _WriteProfilerMark = require("../../Core/_WriteProfilerMark");
require(["require-style!less/styles-toolbar"]);
"use strict";
// The WinJS ToolBar is a specialized UI wrapper for the private _CommandingSurface UI component. The _CommandingSurface is responsible for rendering
// opened and closed states, knowing how to create the open and close animations, laying out commands, creating command hide/show animations and
// keyboard navigation across commands. The WinJS ToolBar is very similar to the WinJS AppBar, however the ToolBar is meant to be positioned in line
// with your app content whereas the AppBar is meant to overlay your app content.
//
// The responsibilities of the ToolBar include:
//
// - Seamlessly hosting the _CommandingSurface
// - From an end user perspective, there should be no visual distinction between where the ToolBar ends and the _CommandingSurface begins.
// - ToolBar wants to rely on the _CommandingSurface to do as much of the rendering as possible. The ToolBar relies on the _CommandingSurface to render its opened
// and closed states-- which defines the overall height of the ToolBar and CommandingSurface elements. The ToolBar has no policy or CSS styles regarding its own
// height and ToolBar takes advantage of the default behavior of its DIV element which is to always grow or shrink to match the height of its content.
// - From an end developer perspective, the _CommandingSurface should be abstracted as an implementation detail of the ToolBar as much as possible.
// - Developers should never have to interact with the CommandingSurface directly.The ToolBar exposes the majority of _CommandingSurface functionality through its
// own APIs
// - There are some HTML elements inside of the _CommandingSurface's DOM that a developer might like to style. After the _CommandingSurface has been instantiated
// and added to the ToolBar DOM, the ToolBar will inject its own "toolbar" specific class-names onto these elements to make them more discoverable to developers.
// - Example of developer styling guidelines https://msdn.microsoft.com/en-us/library/windows/apps/jj839733.asp
//
// - Open direction:
// - The ToolBar and its _CommandingSurface component can open upwards or downwards.Because there is no policy on where the ToolBar can be placed in an App, the ToolBar
// always wants to avoid opening in a direction that would cause any of its content to clip outside of the screen.
// - When the ToolBar is opening, it will always choose to expand in the direction(up or down) that currently has the most available space between the edge of the
// ToolBar element and the corresponding edge of the visual viewport.
// - This means that the a ToolBar near the bottom of the page will open upwards, but if the page is scrolled down such that the ToolBar is now near the top, the next
// time the ToolBar is opened it will open downwards.
//
// - Light dismiss
// - The ToolBar is a light dismissible when opened. This means that the ToolBar is closed thru a variety of cues such as tapping anywhere outside of it,
// pressing the escape key, and resizing the window.ToolBar relies on the _LightDismissService component for most of this functionality.
// The only pieces the ToolBar is responsible for are:
// - Describing what happens when a light dismiss is triggered on the ToolBar .
// - Describing how the ToolBar should take / restore focus when it becomes the topmost light dismissible in the light dismiss stack
// - Debugging Tip: Light dismiss can make debugging an opened ToolBar tricky.A good idea is to temporarily suspend the light dismiss cue that triggers when clicking
// outside of the current window.This can be achieved by executing the following code in the JavaScript console window: "WinJS.UI._LightDismissService._setDebug(true)"
//
// - Inline element when closed, overlay when opened:
// - The design of the toolbar called for it to be an control that developers can place inline with their other app content.When the ToolBar is closed it exists as a an
// element in your app, next to other app content and take up space in the flow of the document.
// - However, when the ToolBar opens, its vertical height will increase.Normally the change in height of an inline element will cause all of the other elements below the
// expanding element to move out of the way.Rather than push the rest of the app content down when opening, the design of the ToolBar called for it to overlay that content other content, while still taking up the same vertical space in the document as it did when closed.
// - The implementation of this feature is very complicated:
// - The only way one element can overlay another is to remove it from the flow of the document and give it a new CSS positioning like "absolute" or "fixed".
// - However, simply removing the ToolBar element from the document to make it an overlay, would leave behind a gap in the document that all the neighboring elements
// would try to fill by shifting over, leading to a jarring reflow of many elements whenever the ToolBar was opened.This was also undesirable
// - The final solution is as follows
// - Create a transparent placeholder element that is the exact same height and width as the closed ToolBar element.
// - Removing the ToolBar element from its place in the document while simultaneously inserting the placeholder element into the same spot the ToolBar element was
// just removed from.
// - Inserting the ToolBar element as a direct child of the body and giving it css position: fixed;
// We insert it directly into the body element because while opened, ToolBar is a Light dismissible overlay and is subject to the same stacking context pitfalls
// as any other light dismissible. https://github.com/winjs/winjs/wiki/Dismissables-and-Stacking-Contexts
// - Reposition the ToolBar element to be exactly overlaid on top of the placeholder element.
// - Render the ToolBar as opened, via the _CommandingSurface API, increasing the overall height of the ToolBar.
// - Closing the ToolBar is basically the same steps but in reverse.
// - One limitation to this implementation is that developers may not position the ToolBar element themselves directly via the CSS "position" or "float" properties.
// - This is because The ToolBar expects its element to be in the flow of the document when closed, and the placeholder element would not receive these same styles
// when inserted to replace the ToolBar element.
// - An easy workaround for developers is to wrap the ToolBar into another DIV element that they may style and position however they'd like.
//
// - Responding to the IHM:
// - If the ToolBar is opened when the IHM is shown, it will close itself.This is to avoid scenarios where the IHM totally occludes the opened ToolBar. If the ToolBar
// did not close itself, then the next mouse or touch input within the App wouldn't appear to do anything since it would just go to closing the light dismissible
// ToolBar anyway.
var strings = {
get ariaLabel() { return _Resources._getWinJSString("ui/toolbarAriaLabel").value; },
get overflowButtonAriaLabel() { return _Resources._getWinJSString("ui/toolbarOverflowButtonAriaLabel").value; },
get mustContainCommands() { return "The toolbar can only contain WinJS.UI.Command or WinJS.UI.AppBarCommand controls"; },
get duplicateConstruction() { return "Invalid argument: Controls may only be instantiated one time for each DOM element"; }
};
var ClosedDisplayMode = {
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.compact" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.compact">
/// When the ToolBar is closed, the height of the ToolBar is reduced such that button commands are still visible, but their labels are hidden.
/// </field>
compact: "compact",
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.full" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.full">
/// When the ToolBar is closed, the height of the ToolBar is always sized to content.
/// </field>
full: "full",
};
var closedDisplayModeClassMap = {};
closedDisplayModeClassMap[ClosedDisplayMode.compact] = _Constants.ClassNames.compactClass;
closedDisplayModeClassMap[ClosedDisplayMode.full] = _Constants.ClassNames.fullClass;
// Versions of add/removeClass that are no ops when called with falsy class names.
function addClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.addClass(element, className);
}
function removeClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.removeClass(element, className);
}
/// <field>
/// <summary locid="WinJS.UI.ToolBar">
/// Displays ICommands within the flow of the app. Use the ToolBar around other statically positioned app content.
/// </summary>
/// </field>
/// <icon src="ui_winjs.ui.toolbar.12x12.png" width="12" height="12" />
/// <icon src="ui_winjs.ui.toolbar.16x16.png" width="16" height="16" />
/// <htmlSnippet supportsContent="true"><![CDATA[<div data-win-control="WinJS.UI.ToolBar">
/// <button data-win-control="WinJS.UI.Command" data-win-options="{id:'',label:'example',icon:'back',type:'button',onclick:null,section:'primary'}"></button>
/// </div>]]></htmlSnippet>
/// <part name="toolbar" class="win-toolbar" locid="WinJS.UI.ToolBar_part:toolbar">The entire ToolBar control.</part>
/// <part name="toolbar-overflowbutton" class="win-toolbar-overflowbutton" locid="WinJS.UI.ToolBar_part:ToolBar-overflowbutton">The toolbar overflow button.</part>
/// <part name="toolbar-overflowarea" class="win-toolbar-overflowarea" locid="WinJS.UI.ToolBar_part:ToolBar-overflowarea">The container for toolbar commands that overflow.</part>
/// <resource type="javascript" src="//$(TARGET_DESTINATION)/js/WinJS.js" shared="true" />
/// <resource type="css" src="//$(TARGET_DESTINATION)/css/ui-dark.css" shared="true" />
export class ToolBar {
private _id: string;
private _disposed: boolean;
private _commandingSurface: _ICommandingSurface._CommandingSurface;
private _isOpenedMode: boolean;
private _handleShowingKeyboardBound: (ev: any) => void;
private _dismissable: _LightDismissService.LightDismissableElement;
private _cachedClosedHeight: number;
private _dom: {
root: HTMLElement;
commandingSurfaceEl: HTMLElement;
placeHolder: HTMLElement;
}
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode">
/// Display options for the actionarea when the ToolBar is closed.
/// </field>
static ClosedDisplayMode = ClosedDisplayMode;
static supportedForProcessing: boolean = true;
/// <field type="HTMLElement" domElement="true" hidden="true" locid="WinJS.UI.ToolBar.element" helpKeyword="WinJS.UI.ToolBar.element">
/// Gets the DOM element that hosts the ToolBar.
/// </field>
get element() |
/// <field type="WinJS.Binding.List" locid="WinJS.UI.ToolBar.data" helpKeyword="WinJS.UI.ToolBar.data">
/// Gets or sets the Binding List of WinJS.UI.Command for the ToolBar.
/// </field>
get data() {
return this._commandingSurface.data;
}
set data(value: BindingList.List<_Command.ICommand>) {
this._commandingSurface.data = value;
}
/// <field type="String" locid="WinJS.UI.ToolBar.closedDisplayMode" helpKeyword="WinJS.UI.ToolBar.closedDisplayMode">
/// Gets or sets the closedDisplayMode for the ToolBar. Values are "compact" and "full".
/// </field>
get closedDisplayMode() {
return this._commandingSurface.closedDisplayMode;
}
set closedDisplayMode(value: string) {
if (ClosedDisplayMode[value]) {
this._commandingSurface.closedDisplayMode = value;
this._cachedClosedHeight = null;
}
}
/// <field type="Boolean" hidden="true" locid="WinJS.UI.ToolBar.opened" helpKeyword="WinJS.UI.ToolBar.opened">
/// Gets or sets whether the ToolBar is currently opened.
/// </field>
get opened(): boolean {
return this._commandingSurface.opened;
}
set opened(value: boolean) {
this._commandingSurface.opened = value;
}
constructor(element?: HTMLElement, options: any = {}) {
/// <signature helpKeyword="WinJS.UI.ToolBar.ToolBar">
/// <summary locid="WinJS.UI.ToolBar.constructor">
/// Creates a new ToolBar control.
/// </summary>
/// <param name="element" type="HTMLElement" domElement="true" locid="WinJS.UI.ToolBar.constructor_p:element">
/// The DOM element that will host the control.
/// </param>
/// <param name="options" type="Object" locid="WinJS.UI.ToolBar.constructor_p:options">
/// The set of properties and values to apply to the new ToolBar control.
/// </param>
/// <returns type="WinJS.UI.ToolBar" locid="WinJS.UI.ToolBar.constructor_returnValue">
/// The new ToolBar control.
/// </returns>
/// </signature>
this._writeProfilerMark("constructor,StartTM");
// Check to make sure we weren't duplicated
if (element && element["winControl"]) {
throw new _ErrorFromName("WinJS.UI.ToolBar.DuplicateConstruction", strings.duplicateConstruction);
}
this._initializeDom(element || _Global.document.createElement("div"));
var stateMachine = new _OpenCloseMachine.OpenCloseMachine({
eventElement: this.element,
onOpen: () => {
var openAnimation = this._commandingSurface.createOpenAnimation(this._getClosedHeight());
this._synchronousOpen();
return openAnimation.execute();
},
onClose: () => {
var closeAnimation = this._commandingSurface.createCloseAnimation(this._getClosedHeight());
return closeAnimation.execute().then(() => {
this._synchronousClose();
});
},
onUpdateDom: () => {
this._updateDomImpl();
},
onUpdateDomWithIsOpened: (isOpened: boolean) => {
this._isOpenedMode = isOpened;
this._updateDomImpl();
}
});
// Events
this._handleShowingKeyboardBound = this._handleShowingKeyboard.bind(this);
_ElementUtilities._inputPaneListener.addEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
// Initialize private state.
this._disposed = false;
this._cachedClosedHeight = null;
this._commandingSurface = new _CommandingSurface._CommandingSurface(this._dom.commandingSurfaceEl, { openCloseMachine: stateMachine });
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-actionarea"), _Constants.ClassNames.actionAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowarea"), _Constants.ClassNames.overflowAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowbutton"), _Constants.ClassNames.overflowButtonCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-ellipsis"), _Constants.ClassNames.ellipsisCssClass);
this._isOpenedMode = _Constants.defaultOpened;
this._dismissable = new _LightDismissService.LightDismissableElement({
element: this._dom.root,
tabIndex: this._dom.root.hasAttribute("tabIndex") ? this._dom.root.tabIndex : -1,
onLightDismiss: () => {
this.close();
},
onTakeFocus: (useSetActive) => {
this._dismissable.restoreFocus() ||
this._commandingSurface.takeFocus(useSetActive);
}
});
// Initialize public properties.
this.closedDisplayMode = _Constants.defaultClosedDisplayMode;
this.opened = this._isOpenedMode;
_Control.setOptions(this, options);
// Exit the Init state.
_ElementUtilities._inDom(this.element).then(() => {
return this._commandingSurface.initialized;
}).then(() => {
stateMachine.exitInit();
this._writeProfilerMark("constructor,StopTM");
});
}
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeopen" helpKeyword="WinJS.UI.ToolBar.onbeforeopen">
/// Occurs immediately before the control is opened. Is cancelable.
/// </field>
onbeforeopen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafteropen" helpKeyword="WinJS.UI.ToolBar.onafteropen">
/// Occurs immediately after the control is opened.
/// </field>
onafteropen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeclose" helpKeyword="WinJS.UI.ToolBar.onbeforeclose">
/// Occurs immediately before the control is closed. Is cancelable.
/// </field>
onbeforeclose: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafterclose" helpKeyword="WinJS.UI.ToolBar.onafterclose">
/// Occurs immediately after the control is closed.
/// </field>
onafterclose: (ev: CustomEvent) => void;
open(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.open">
/// <summary locid="WinJS.UI.ToolBar.open">
/// Opens the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.open();
}
close(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.close">
/// <summary locid="WinJS.UI.ToolBar.close">
/// Closes the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.close();
}
dispose() {
/// <signature helpKeyword="WinJS.UI.ToolBar.dispose">
/// <summary locid="WinJS.UI.ToolBar.dispose">
/// Disposes this ToolBar.
/// </summary>
/// </signature>
if (this._disposed) {
return;
}
this._disposed = true;
_LightDismissService.hidden(this._dismissable);
// Disposing the _commandingSurface will trigger dispose on its OpenCloseMachine and synchronously complete any animations that might have been running.
this._commandingSurface.dispose();
// If page navigation is happening, we don't want the ToolBar left behind in the body.
// Synchronoulsy close the ToolBar to force it out of the body and back into its parent element.
this._synchronousClose();
_ElementUtilities._inputPaneListener.removeEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
_Dispose.disposeSubTree(this.element);
}
forceLayout() {
/// <signature helpKeyword="WinJS.UI.ToolBar.forceLayout">
/// <summary locid="WinJS.UI.ToolBar.forceLayout">
/// Forces the ToolBar to update its layout. Use this function when the window did not change size, but the container of the ToolBar changed size.
/// </summary>
/// </signature>
this._commandingSurface.forceLayout();
}
getCommandById(id: string): _Command.ICommand {
/// <signature helpKeyword="WinJS.UI.ToolBar.getCommandById">
/// <summary locid="WinJS.UI.ToolBar.getCommandById">
/// Retrieves the command with the specified ID from this ToolBar.
/// If more than one command is found, this method returns the first command found.
/// </summary>
/// <param name="id" type="String" locid="WinJS.UI.ToolBar.getCommandById_p:id">Id of the command to return.</param>
/// <returns type="object" locid="WinJS.UI.ToolBar.getCommandById_returnValue">
/// The command found, or null if no command is found.
/// </returns>
/// </signature>
return this._commandingSurface.getCommandById(id);
}
showOnlyCommands(commands: Array<string|_Command.ICommand>): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.showOnlyCommands">
/// <summary locid="WinJS.UI.ToolBar.showOnlyCommands">
/// Show the specified commands, hiding all of the others in the ToolBar.
/// </summary>
/// <param name="commands" type="Array" locid="WinJS.UI.ToolBar.showOnlyCommands_p:commands">
/// An array of the commands to show. The array elements may be Command objects, or the string identifiers (IDs) of commands.
/// </param>
/// </signature>
return this._commandingSurface.showOnlyCommands(commands);
}
private _writeProfilerMark(text: string) {
_WriteProfilerMark("WinJS.UI.ToolBar:" + this._id + ":" + text);
}
private _initializeDom(root: HTMLElement): void {
this._writeProfilerMark("_intializeDom,info");
// Attaching JS control to DOM element
root["winControl"] = this;
this._id = root.id || _ElementUtilities._uniqueID(root);
_ElementUtilities.addClass(root, _Constants.ClassNames.controlCssClass);
_ElementUtilities.addClass(root, _Constants.ClassNames.disposableCssClass);
// Make sure we have an ARIA role
var role = root.getAttribute("role");
if (!role) {
root.setAttribute("role", "menubar");
}
var label = root.getAttribute("aria-label");
if (!label) {
root.setAttribute("aria-label", strings.ariaLabel);
}
// Create element for commandingSurface and reparent any declarative Commands.
// The CommandingSurface constructor will parse child elements as AppBarCommands.
var commandingSurfaceEl = document.createElement("DIV");
_ElementUtilities._reparentChildren(root, commandingSurfaceEl);
root.appendChild(commandingSurfaceEl);
// While the ToolBar is open, it will place itself in the <body> so it can become a light dismissible
// overlay. It leaves the placeHolder element behind as stand in at the ToolBar's original DOM location
// to avoid reflowing surrounding app content and create the illusion that the ToolBar hasn't moved along
// the x or y planes.
var placeHolder = _Global.document.createElement("DIV");
_ElementUtilities.addClass(placeHolder, _Constants.ClassNames.placeHolderCssClass);
// If the ToolBar's original HTML parent node is disposed while the ToolBar is open and repositioned as
// a temporary child of the <body>, make sure that calling dispose on the placeHolder element will trigger
// dispose on the ToolBar as well.
_Dispose.markDisposable(placeHolder, this.dispose.bind(this));
this._dom = {
root: root,
commandingSurfaceEl: commandingSurfaceEl,
placeHolder: placeHolder,
};
}
private _handleShowingKeyboard(event: { detail: { originalEvent: _WinRT.Windows.UI.ViewManagement.InputPaneVisibilityEventArgs } }) {
// Because the ToolBar takes up layout space and is not an overlay, it doesn't have the same expectation
// to move itself to get out of the way of a showing IHM. Instsead we just close the ToolBar to avoid
// scenarios where the ToolBar is occluded, but the click-eating-div is still present since it may seem
// strange to end users that an occluded ToolBar (out of sight, out of mind) is still eating their first
// click.
// Mitigation:
// Because (1) custom content in a ToolBar can only be included as a 'content' type command, because (2)
// the ToolBar only supports closedDisplayModes 'compact' and 'full', and because (3) 'content' type
// commands in the overflowarea use a separate contentflyout to display their contents:
// Interactable custom content contained within the ToolBar actionarea or overflowarea, will remain
// visible and interactable even when showing the IHM closes the ToolBar.
this.close();
}
private _synchronousOpen(): void {
this._isOpenedMode = true;
this._updateDomImpl();
}
private _synchronousClose(): void {
this._isOpenedMode = false;
this._updateDomImpl();
}
// State private to the _updateDomImpl family of method. No other methods should make use of it.
//
// Nothing has been rendered yet so these are all initialized to undefined. Because
// they are undefined, the first time _updateDomImpl is called, they will all be
// rendered.
private _updateDomImpl_renderedState = {
isOpenedMode: <boolean>undefined,
closedDisplayMode: <string>undefined,
prevInlineWidth: <string>undefined,
};
private _updateDomImpl(): void {
var rendered = this._updateDomImpl_renderedState;
if (rendered.isOpenedMode !== this._isOpenedMode) {
if (this._isOpenedMode) {
this._updateDomImpl_renderOpened();
} else {
this._updateDomImpl_renderClosed();
}
rendered.isOpenedMode = this._isOpenedMode;
}
if (rendered.closedDisplayMode !== this.closedDisplayMode) {
removeClass(this._dom.root, closedDisplayModeClassMap[rendered.closedDisplayMode]);
addClass(this._dom.root, closedDisplayModeClassMap[this.closedDisplayMode]);
rendered.closedDisplayMode = this.closedDisplayMode;
}
this._commandingSurface.updateDom();
}
private _getClosedHeight(): number {
if (this._cachedClosedHeight === null) {
var wasOpen = this._isOpenedMode;
if (this._isOpenedMode) {
this._synchronousClose();
}
this._cachedClosedHeight = this._commandingSurface.getBoundingRects().commandingSurface.height;
if (wasOpen) {
this._synchronousOpen();
}
}
return this._cachedClosedHeight;
}
private _updateDomImpl_renderOpened(): void {
// Measure closed state.
this._updateDomImpl_renderedState.prevInlineWidth = this._dom.root.style.width;
var closedBorderBox = this._dom.root.getBoundingClientRect();
var closedContentWidth = _ElementUtilities._getPreciseContentWidth(this._dom.root);
var closedContentHeight = _ElementUtilities._getPreciseContentHeight(this._dom.root);
var closedStyle = _ElementUtilities._getComputedStyle(this._dom.root);
var closedPaddingTop = _ElementUtilities._convertToPrecisePixels(closedStyle.paddingTop);
var closedBorderTop = _ElementUtilities._convertToPrecisePixels(closedStyle.borderTopWidth);
var closedMargins = _ElementUtilities._getPreciseMargins(this._dom.root);
var closedContentBoxTop = closedBorderBox.top + closedBorderTop + closedPaddingTop;
var closedContentBoxBottom = closedContentBoxTop + closedContentHeight;
// Size our placeHolder. Set height and width to match borderbox of the closed ToolBar.
// Copy ToolBar margins to the placeholder.
var placeHolder = this._dom.placeHolder;
var placeHolderStyle = placeHolder.style;
placeHolderStyle.width = closedBorderBox.width + "px";
placeHolderStyle.height = closedBorderBox.height + "px";
placeHolderStyle.marginTop = closedMargins.top + "px";
placeHolderStyle.marginRight = closedMargins.right + "px";
placeHolderStyle.marginBottom = closedMargins.bottom + "px";
placeHolderStyle.marginLeft = closedMargins.left + "px";
_ElementUtilities._maintainFocus(() => {
// Move ToolBar element to the body in preparation of becoming a light dismissible. Leave an equal sized placeHolder element
// at our original DOM location to avoid reflowing surrounding app content.
this._dom.root.parentElement.insertBefore(placeHolder, this._dom.root);
_Global.document.body.appendChild(this._dom.root);
// Position the ToolBar to completely cover the same region as the placeholder element.
this._dom.root.style.width = closedContentWidth + "px";
this._dom.root.style.left = closedBorderBox.left - closedMargins.left + "px";
// Determine which direction to expand the CommandingSurface elements when opened. The overflow area will be rendered at the corresponding edge of
// the ToolBar's content box, so we choose the direction that offers the most space between that edge and the corresponding edge of the viewport.
// This is to reduce the chance that the overflow area might clip through the edge of the viewport.
var topOfViewport = 0;
var bottomOfViewport = _Global.innerHeight;
var distanceFromTop = closedContentBoxTop - topOfViewport;
var distanceFromBottom = bottomOfViewport - closedContentBoxBottom;
if (distanceFromTop > distanceFromBottom) {
// CommandingSurface is going to expand updwards.
this._commandingSurface.overflowDirection = _Constants.OverflowDirection.top;
// Position the bottom edge of the ToolBar marginbox over the bottom edge of the placeholder marginbox.
this._dom.root.style.bottom = (bottomOfViewport - closedBorderBox.bottom) - closedMargins.bottom + "px";
} else {
// CommandingSurface is going to expand downwards.
this._commandingSurface.overflowDirection = _Constants.OverflowDirection.bottom;
// Position the top edge of the ToolBar marginbox over the top edge of the placeholder marginbox.
this._dom.root.style.top = (topOfViewport + closedBorderBox.top) - closedMargins.top + "px";
}
// Render opened state
_ElementUtilities.addClass(this._dom.root, _Constants.ClassNames.openedClass);
_ElementUtilities.removeClass(this._dom.root, _Constants.ClassNames.closedClass);
});
this._commandingSurface.synchronousOpen();
_LightDismissService.shown(this._dismissable); // Call at the start of the open animation
}
private _updateDomImpl_renderClosed(): void {
_ElementUtilities._maintainFocus(() => {
if (this._dom.placeHolder.parentElement) {
// Restore our placement in the DOM
var placeHolder = this._dom.placeHolder;
placeHolder.parentElement.insertBefore(this._dom.root, placeHolder);
placeHolder.parentElement.removeChild(placeHolder);
}
// Render Closed
this._dom.root.style.top = "";
this._dom.root.style.right = "";
this._dom.root.style.bottom = "";
this._dom.root.style.left = "";
this._dom.root.style.width = this._updateDomImpl_renderedState.prevInlineWidth;
_ElementUtilities.addClass(this._dom.root, _Constants.ClassNames.closedClass);
_ElementUtilities.removeClass(this._dom.root, _Constants.ClassNames.openedClass);
});
this._commandingSurface.synchronousClose();
_LightDismissService.hidden(this._dismissable); // Call after the close animation
}
}
_Base.Class.mix(ToolBar, _Events.createEventProperties(
_Constants.EventNames.beforeOpen,
_Constants.EventNames.afterOpen,
_Constants.EventNames.beforeClose,
_Constants.EventNames.afterClose));
// addEventListener, removeEventListener, dispatchEvent
_Base.Class.mix(ToolBar, _Control.DOMEventMixin);
| {
return this._dom.root;
} | identifier_body |
_ToolBar.ts | // Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License.txt in the project root for license information.
/// <reference path="../../Core.d.ts" />
import Animations = require("../../Animations");
import _Base = require("../../Core/_Base");
import _BaseUtils = require("../../Core/_BaseUtils");
import BindingList = require("../../BindingList");
import ControlProcessor = require("../../ControlProcessor");
import _Constants = require("../ToolBar/_Constants");
import _Command = require("../AppBar/_Command");
import _CommandingSurface = require("../CommandingSurface");
import _ICommandingSurface = require("../CommandingSurface/_CommandingSurface");
import _Control = require("../../Utilities/_Control");
import _Dispose = require("../../Utilities/_Dispose");
import _ElementUtilities = require("../../Utilities/_ElementUtilities");
import _ErrorFromName = require("../../Core/_ErrorFromName");
import _Events = require('../../Core/_Events');
import _Flyout = require("../../Controls/Flyout");
import _Global = require("../../Core/_Global");
import _Hoverable = require("../../Utilities/_Hoverable");
import _KeyboardBehavior = require("../../Utilities/_KeyboardBehavior");
import _LightDismissService = require('../../_LightDismissService');
import Menu = require("../../Controls/Menu");
import _MenuCommand = require("../Menu/_Command");
import Promise = require('../../Promise');
import _Resources = require("../../Core/_Resources");
import Scheduler = require("../../Scheduler");
import _OpenCloseMachine = require('../../Utilities/_OpenCloseMachine');
import _Signal = require('../../_Signal');
import _WinRT = require('../../Core/_WinRT');
import _WriteProfilerMark = require("../../Core/_WriteProfilerMark");
require(["require-style!less/styles-toolbar"]);
"use strict";
// The WinJS ToolBar is a specialized UI wrapper for the private _CommandingSurface UI component. The _CommandingSurface is responsible for rendering
// opened and closed states, knowing how to create the open and close animations, laying out commands, creating command hide/show animations and
// keyboard navigation across commands. The WinJS ToolBar is very similar to the WinJS AppBar, however the ToolBar is meant to be positioned in line
// with your app content whereas the AppBar is meant to overlay your app content.
//
// The responsibilities of the ToolBar include:
//
// - Seamlessly hosting the _CommandingSurface
// - From an end user perspective, there should be no visual distinction between where the ToolBar ends and the _CommandingSurface begins.
// - ToolBar wants to rely on the _CommandingSurface to do as much of the rendering as possible. The ToolBar relies on the _CommandingSurface to render its opened
// and closed states-- which defines the overall height of the ToolBar and CommandingSurface elements. The ToolBar has no policy or CSS styles regarding its own
// height and ToolBar takes advantage of the default behavior of its DIV element which is to always grow or shrink to match the height of its content.
// - From an end developer perspective, the _CommandingSurface should be abstracted as an implementation detail of the ToolBar as much as possible.
// - Developers should never have to interact with the CommandingSurface directly.The ToolBar exposes the majority of _CommandingSurface functionality through its
// own APIs
// - There are some HTML elements inside of the _CommandingSurface's DOM that a developer might like to style. After the _CommandingSurface has been instantiated
// and added to the ToolBar DOM, the ToolBar will inject its own "toolbar" specific class-names onto these elements to make them more discoverable to developers.
// - Example of developer styling guidelines https://msdn.microsoft.com/en-us/library/windows/apps/jj839733.asp
//
// - Open direction:
// - The ToolBar and its _CommandingSurface component can open upwards or downwards.Because there is no policy on where the ToolBar can be placed in an App, the ToolBar
// always wants to avoid opening in a direction that would cause any of its content to clip outside of the screen.
// - When the ToolBar is opening, it will always choose to expand in the direction(up or down) that currently has the most available space between the edge of the
// ToolBar element and the corresponding edge of the visual viewport.
// - This means that the a ToolBar near the bottom of the page will open upwards, but if the page is scrolled down such that the ToolBar is now near the top, the next
// time the ToolBar is opened it will open downwards.
//
// - Light dismiss
// - The ToolBar is a light dismissible when opened. This means that the ToolBar is closed thru a variety of cues such as tapping anywhere outside of it,
// pressing the escape key, and resizing the window.ToolBar relies on the _LightDismissService component for most of this functionality.
// The only pieces the ToolBar is responsible for are:
// - Describing what happens when a light dismiss is triggered on the ToolBar .
// - Describing how the ToolBar should take / restore focus when it becomes the topmost light dismissible in the light dismiss stack
// - Debugging Tip: Light dismiss can make debugging an opened ToolBar tricky.A good idea is to temporarily suspend the light dismiss cue that triggers when clicking
// outside of the current window.This can be achieved by executing the following code in the JavaScript console window: "WinJS.UI._LightDismissService._setDebug(true)"
//
// - Inline element when closed, overlay when opened:
// - The design of the toolbar called for it to be an control that developers can place inline with their other app content.When the ToolBar is closed it exists as a an
// element in your app, next to other app content and take up space in the flow of the document.
// - However, when the ToolBar opens, its vertical height will increase.Normally the change in height of an inline element will cause all of the other elements below the
// expanding element to move out of the way.Rather than push the rest of the app content down when opening, the design of the ToolBar called for it to overlay that content other content, while still taking up the same vertical space in the document as it did when closed.
// - The implementation of this feature is very complicated:
// - The only way one element can overlay another is to remove it from the flow of the document and give it a new CSS positioning like "absolute" or "fixed".
// - However, simply removing the ToolBar element from the document to make it an overlay, would leave behind a gap in the document that all the neighboring elements
// would try to fill by shifting over, leading to a jarring reflow of many elements whenever the ToolBar was opened.This was also undesirable
// - The final solution is as follows
// - Create a transparent placeholder element that is the exact same height and width as the closed ToolBar element.
// - Removing the ToolBar element from its place in the document while simultaneously inserting the placeholder element into the same spot the ToolBar element was
// just removed from.
// - Inserting the ToolBar element as a direct child of the body and giving it css position: fixed;
// We insert it directly into the body element because while opened, ToolBar is a Light dismissible overlay and is subject to the same stacking context pitfalls
// as any other light dismissible. https://github.com/winjs/winjs/wiki/Dismissables-and-Stacking-Contexts
// - Reposition the ToolBar element to be exactly overlaid on top of the placeholder element.
// - Render the ToolBar as opened, via the _CommandingSurface API, increasing the overall height of the ToolBar.
// - Closing the ToolBar is basically the same steps but in reverse.
// - One limitation to this implementation is that developers may not position the ToolBar element themselves directly via the CSS "position" or "float" properties.
// - This is because The ToolBar expects its element to be in the flow of the document when closed, and the placeholder element would not receive these same styles
// when inserted to replace the ToolBar element.
// - An easy workaround for developers is to wrap the ToolBar into another DIV element that they may style and position however they'd like.
//
// - Responding to the IHM:
// - If the ToolBar is opened when the IHM is shown, it will close itself.This is to avoid scenarios where the IHM totally occludes the opened ToolBar. If the ToolBar
// did not close itself, then the next mouse or touch input within the App wouldn't appear to do anything since it would just go to closing the light dismissible
// ToolBar anyway.
var strings = {
get ariaLabel() { return _Resources._getWinJSString("ui/toolbarAriaLabel").value; },
get overflowButtonAriaLabel() { return _Resources._getWinJSString("ui/toolbarOverflowButtonAriaLabel").value; },
get mustContainCommands() { return "The toolbar can only contain WinJS.UI.Command or WinJS.UI.AppBarCommand controls"; },
get duplicateConstruction() { return "Invalid argument: Controls may only be instantiated one time for each DOM element"; }
};
var ClosedDisplayMode = {
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.compact" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.compact">
/// When the ToolBar is closed, the height of the ToolBar is reduced such that button commands are still visible, but their labels are hidden.
/// </field>
compact: "compact",
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.full" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.full">
/// When the ToolBar is closed, the height of the ToolBar is always sized to content.
/// </field>
full: "full",
};
var closedDisplayModeClassMap = {};
closedDisplayModeClassMap[ClosedDisplayMode.compact] = _Constants.ClassNames.compactClass;
closedDisplayModeClassMap[ClosedDisplayMode.full] = _Constants.ClassNames.fullClass;
// Versions of add/removeClass that are no ops when called with falsy class names.
function addClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.addClass(element, className);
}
function removeClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.removeClass(element, className);
}
/// <field>
/// <summary locid="WinJS.UI.ToolBar">
/// Displays ICommands within the flow of the app. Use the ToolBar around other statically positioned app content.
/// </summary>
/// </field>
/// <icon src="ui_winjs.ui.toolbar.12x12.png" width="12" height="12" />
/// <icon src="ui_winjs.ui.toolbar.16x16.png" width="16" height="16" />
/// <htmlSnippet supportsContent="true"><![CDATA[<div data-win-control="WinJS.UI.ToolBar">
/// <button data-win-control="WinJS.UI.Command" data-win-options="{id:'',label:'example',icon:'back',type:'button',onclick:null,section:'primary'}"></button>
/// </div>]]></htmlSnippet>
/// <part name="toolbar" class="win-toolbar" locid="WinJS.UI.ToolBar_part:toolbar">The entire ToolBar control.</part>
/// <part name="toolbar-overflowbutton" class="win-toolbar-overflowbutton" locid="WinJS.UI.ToolBar_part:ToolBar-overflowbutton">The toolbar overflow button.</part>
/// <part name="toolbar-overflowarea" class="win-toolbar-overflowarea" locid="WinJS.UI.ToolBar_part:ToolBar-overflowarea">The container for toolbar commands that overflow.</part>
/// <resource type="javascript" src="//$(TARGET_DESTINATION)/js/WinJS.js" shared="true" />
/// <resource type="css" src="//$(TARGET_DESTINATION)/css/ui-dark.css" shared="true" />
export class ToolBar {
private _id: string;
private _disposed: boolean;
private _commandingSurface: _ICommandingSurface._CommandingSurface;
private _isOpenedMode: boolean;
private _handleShowingKeyboardBound: (ev: any) => void;
private _dismissable: _LightDismissService.LightDismissableElement;
private _cachedClosedHeight: number;
private _dom: {
root: HTMLElement;
commandingSurfaceEl: HTMLElement;
placeHolder: HTMLElement;
}
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode">
/// Display options for the actionarea when the ToolBar is closed.
/// </field>
static ClosedDisplayMode = ClosedDisplayMode;
static supportedForProcessing: boolean = true;
/// <field type="HTMLElement" domElement="true" hidden="true" locid="WinJS.UI.ToolBar.element" helpKeyword="WinJS.UI.ToolBar.element">
/// Gets the DOM element that hosts the ToolBar.
/// </field>
get element() {
return this._dom.root;
}
/// <field type="WinJS.Binding.List" locid="WinJS.UI.ToolBar.data" helpKeyword="WinJS.UI.ToolBar.data">
/// Gets or sets the Binding List of WinJS.UI.Command for the ToolBar.
/// </field>
get data() {
return this._commandingSurface.data;
}
set data(value: BindingList.List<_Command.ICommand>) {
this._commandingSurface.data = value;
}
/// <field type="String" locid="WinJS.UI.ToolBar.closedDisplayMode" helpKeyword="WinJS.UI.ToolBar.closedDisplayMode">
/// Gets or sets the closedDisplayMode for the ToolBar. Values are "compact" and "full".
/// </field>
get closedDisplayMode() {
return this._commandingSurface.closedDisplayMode;
}
set closedDisplayMode(value: string) {
if (ClosedDisplayMode[value]) {
this._commandingSurface.closedDisplayMode = value;
this._cachedClosedHeight = null;
}
}
/// <field type="Boolean" hidden="true" locid="WinJS.UI.ToolBar.opened" helpKeyword="WinJS.UI.ToolBar.opened">
/// Gets or sets whether the ToolBar is currently opened.
/// </field>
get opened(): boolean {
return this._commandingSurface.opened;
}
set opened(value: boolean) {
this._commandingSurface.opened = value;
}
constructor(element?: HTMLElement, options: any = {}) {
/// <signature helpKeyword="WinJS.UI.ToolBar.ToolBar">
/// <summary locid="WinJS.UI.ToolBar.constructor">
/// Creates a new ToolBar control.
/// </summary>
/// <param name="element" type="HTMLElement" domElement="true" locid="WinJS.UI.ToolBar.constructor_p:element">
/// The DOM element that will host the control.
/// </param>
/// <param name="options" type="Object" locid="WinJS.UI.ToolBar.constructor_p:options">
/// The set of properties and values to apply to the new ToolBar control.
/// </param>
/// <returns type="WinJS.UI.ToolBar" locid="WinJS.UI.ToolBar.constructor_returnValue">
/// The new ToolBar control.
/// </returns>
/// </signature>
this._writeProfilerMark("constructor,StartTM");
// Check to make sure we weren't duplicated
if (element && element["winControl"]) {
throw new _ErrorFromName("WinJS.UI.ToolBar.DuplicateConstruction", strings.duplicateConstruction);
}
this._initializeDom(element || _Global.document.createElement("div"));
var stateMachine = new _OpenCloseMachine.OpenCloseMachine({
eventElement: this.element,
onOpen: () => {
var openAnimation = this._commandingSurface.createOpenAnimation(this._getClosedHeight());
this._synchronousOpen();
return openAnimation.execute();
},
onClose: () => {
var closeAnimation = this._commandingSurface.createCloseAnimation(this._getClosedHeight());
return closeAnimation.execute().then(() => {
this._synchronousClose();
});
},
onUpdateDom: () => {
this._updateDomImpl();
},
onUpdateDomWithIsOpened: (isOpened: boolean) => {
this._isOpenedMode = isOpened;
this._updateDomImpl();
}
});
// Events
this._handleShowingKeyboardBound = this._handleShowingKeyboard.bind(this);
_ElementUtilities._inputPaneListener.addEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
// Initialize private state.
this._disposed = false;
this._cachedClosedHeight = null;
this._commandingSurface = new _CommandingSurface._CommandingSurface(this._dom.commandingSurfaceEl, { openCloseMachine: stateMachine });
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-actionarea"), _Constants.ClassNames.actionAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowarea"), _Constants.ClassNames.overflowAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowbutton"), _Constants.ClassNames.overflowButtonCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-ellipsis"), _Constants.ClassNames.ellipsisCssClass);
this._isOpenedMode = _Constants.defaultOpened;
this._dismissable = new _LightDismissService.LightDismissableElement({
element: this._dom.root,
tabIndex: this._dom.root.hasAttribute("tabIndex") ? this._dom.root.tabIndex : -1,
onLightDismiss: () => {
this.close();
},
onTakeFocus: (useSetActive) => {
this._dismissable.restoreFocus() ||
this._commandingSurface.takeFocus(useSetActive);
}
});
// Initialize public properties.
this.closedDisplayMode = _Constants.defaultClosedDisplayMode;
this.opened = this._isOpenedMode;
_Control.setOptions(this, options);
// Exit the Init state.
_ElementUtilities._inDom(this.element).then(() => {
return this._commandingSurface.initialized;
}).then(() => {
stateMachine.exitInit();
this._writeProfilerMark("constructor,StopTM");
});
}
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeopen" helpKeyword="WinJS.UI.ToolBar.onbeforeopen">
/// Occurs immediately before the control is opened. Is cancelable.
/// </field>
onbeforeopen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafteropen" helpKeyword="WinJS.UI.ToolBar.onafteropen">
/// Occurs immediately after the control is opened.
/// </field>
onafteropen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeclose" helpKeyword="WinJS.UI.ToolBar.onbeforeclose">
/// Occurs immediately before the control is closed. Is cancelable.
/// </field>
onbeforeclose: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafterclose" helpKeyword="WinJS.UI.ToolBar.onafterclose">
/// Occurs immediately after the control is closed.
/// </field>
onafterclose: (ev: CustomEvent) => void;
open(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.open">
/// <summary locid="WinJS.UI.ToolBar.open">
/// Opens the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.open();
}
close(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.close">
/// <summary locid="WinJS.UI.ToolBar.close">
/// Closes the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.close();
}
dispose() {
/// <signature helpKeyword="WinJS.UI.ToolBar.dispose">
/// <summary locid="WinJS.UI.ToolBar.dispose">
/// Disposes this ToolBar.
/// </summary>
/// </signature>
if (this._disposed) {
return;
}
this._disposed = true;
_LightDismissService.hidden(this._dismissable);
// Disposing the _commandingSurface will trigger dispose on its OpenCloseMachine and synchronously complete any animations that might have been running.
this._commandingSurface.dispose();
// If page navigation is happening, we don't want the ToolBar left behind in the body.
// Synchronoulsy close the ToolBar to force it out of the body and back into its parent element.
this._synchronousClose();
_ElementUtilities._inputPaneListener.removeEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
_Dispose.disposeSubTree(this.element);
}
forceLayout() {
/// <signature helpKeyword="WinJS.UI.ToolBar.forceLayout">
/// <summary locid="WinJS.UI.ToolBar.forceLayout">
/// Forces the ToolBar to update its layout. Use this function when the window did not change size, but the container of the ToolBar changed size.
/// </summary>
/// </signature>
this._commandingSurface.forceLayout();
}
getCommandById(id: string): _Command.ICommand {
/// <signature helpKeyword="WinJS.UI.ToolBar.getCommandById">
/// <summary locid="WinJS.UI.ToolBar.getCommandById">
/// Retrieves the command with the specified ID from this ToolBar.
/// If more than one command is found, this method returns the first command found.
/// </summary>
/// <param name="id" type="String" locid="WinJS.UI.ToolBar.getCommandById_p:id">Id of the command to return.</param>
/// <returns type="object" locid="WinJS.UI.ToolBar.getCommandById_returnValue">
/// The command found, or null if no command is found.
/// </returns>
/// </signature>
return this._commandingSurface.getCommandById(id);
}
showOnlyCommands(commands: Array<string|_Command.ICommand>): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.showOnlyCommands">
/// <summary locid="WinJS.UI.ToolBar.showOnlyCommands">
/// Show the specified commands, hiding all of the others in the ToolBar.
/// </summary>
/// <param name="commands" type="Array" locid="WinJS.UI.ToolBar.showOnlyCommands_p:commands">
/// An array of the commands to show. The array elements may be Command objects, or the string identifiers (IDs) of commands.
/// </param>
/// </signature>
return this._commandingSurface.showOnlyCommands(commands);
}
private _writeProfilerMark(text: string) {
_WriteProfilerMark("WinJS.UI.ToolBar:" + this._id + ":" + text);
}
private _initializeDom(root: HTMLElement): void {
this._writeProfilerMark("_intializeDom,info");
// Attaching JS control to DOM element
root["winControl"] = this;
this._id = root.id || _ElementUtilities._uniqueID(root);
_ElementUtilities.addClass(root, _Constants.ClassNames.controlCssClass);
_ElementUtilities.addClass(root, _Constants.ClassNames.disposableCssClass);
// Make sure we have an ARIA role
var role = root.getAttribute("role");
if (!role) { | if (!label) {
root.setAttribute("aria-label", strings.ariaLabel);
}
// Create element for commandingSurface and reparent any declarative Commands.
// The CommandingSurface constructor will parse child elements as AppBarCommands.
var commandingSurfaceEl = document.createElement("DIV");
_ElementUtilities._reparentChildren(root, commandingSurfaceEl);
root.appendChild(commandingSurfaceEl);
// While the ToolBar is open, it will place itself in the <body> so it can become a light dismissible
// overlay. It leaves the placeHolder element behind as stand in at the ToolBar's original DOM location
// to avoid reflowing surrounding app content and create the illusion that the ToolBar hasn't moved along
// the x or y planes.
var placeHolder = _Global.document.createElement("DIV");
_ElementUtilities.addClass(placeHolder, _Constants.ClassNames.placeHolderCssClass);
// If the ToolBar's original HTML parent node is disposed while the ToolBar is open and repositioned as
// a temporary child of the <body>, make sure that calling dispose on the placeHolder element will trigger
// dispose on the ToolBar as well.
_Dispose.markDisposable(placeHolder, this.dispose.bind(this));
this._dom = {
root: root,
commandingSurfaceEl: commandingSurfaceEl,
placeHolder: placeHolder,
};
}
private _handleShowingKeyboard(event: { detail: { originalEvent: _WinRT.Windows.UI.ViewManagement.InputPaneVisibilityEventArgs } }) {
// Because the ToolBar takes up layout space and is not an overlay, it doesn't have the same expectation
// to move itself to get out of the way of a showing IHM. Instsead we just close the ToolBar to avoid
// scenarios where the ToolBar is occluded, but the click-eating-div is still present since it may seem
// strange to end users that an occluded ToolBar (out of sight, out of mind) is still eating their first
// click.
// Mitigation:
// Because (1) custom content in a ToolBar can only be included as a 'content' type command, because (2)
// the ToolBar only supports closedDisplayModes 'compact' and 'full', and because (3) 'content' type
// commands in the overflowarea use a separate contentflyout to display their contents:
// Interactable custom content contained within the ToolBar actionarea or overflowarea, will remain
// visible and interactable even when showing the IHM closes the ToolBar.
this.close();
}
private _synchronousOpen(): void {
this._isOpenedMode = true;
this._updateDomImpl();
}
private _synchronousClose(): void {
this._isOpenedMode = false;
this._updateDomImpl();
}
// State private to the _updateDomImpl family of method. No other methods should make use of it.
//
// Nothing has been rendered yet so these are all initialized to undefined. Because
// they are undefined, the first time _updateDomImpl is called, they will all be
// rendered.
private _updateDomImpl_renderedState = {
isOpenedMode: <boolean>undefined,
closedDisplayMode: <string>undefined,
prevInlineWidth: <string>undefined,
};
private _updateDomImpl(): void {
var rendered = this._updateDomImpl_renderedState;
if (rendered.isOpenedMode !== this._isOpenedMode) {
if (this._isOpenedMode) {
this._updateDomImpl_renderOpened();
} else {
this._updateDomImpl_renderClosed();
}
rendered.isOpenedMode = this._isOpenedMode;
}
if (rendered.closedDisplayMode !== this.closedDisplayMode) {
removeClass(this._dom.root, closedDisplayModeClassMap[rendered.closedDisplayMode]);
addClass(this._dom.root, closedDisplayModeClassMap[this.closedDisplayMode]);
rendered.closedDisplayMode = this.closedDisplayMode;
}
this._commandingSurface.updateDom();
}
private _getClosedHeight(): number {
if (this._cachedClosedHeight === null) {
var wasOpen = this._isOpenedMode;
if (this._isOpenedMode) {
this._synchronousClose();
}
this._cachedClosedHeight = this._commandingSurface.getBoundingRects().commandingSurface.height;
if (wasOpen) {
this._synchronousOpen();
}
}
return this._cachedClosedHeight;
}
private _updateDomImpl_renderOpened(): void {
// Measure closed state.
this._updateDomImpl_renderedState.prevInlineWidth = this._dom.root.style.width;
var closedBorderBox = this._dom.root.getBoundingClientRect();
var closedContentWidth = _ElementUtilities._getPreciseContentWidth(this._dom.root);
var closedContentHeight = _ElementUtilities._getPreciseContentHeight(this._dom.root);
var closedStyle = _ElementUtilities._getComputedStyle(this._dom.root);
var closedPaddingTop = _ElementUtilities._convertToPrecisePixels(closedStyle.paddingTop);
var closedBorderTop = _ElementUtilities._convertToPrecisePixels(closedStyle.borderTopWidth);
var closedMargins = _ElementUtilities._getPreciseMargins(this._dom.root);
var closedContentBoxTop = closedBorderBox.top + closedBorderTop + closedPaddingTop;
var closedContentBoxBottom = closedContentBoxTop + closedContentHeight;
// Size our placeHolder. Set height and width to match borderbox of the closed ToolBar.
// Copy ToolBar margins to the placeholder.
var placeHolder = this._dom.placeHolder;
var placeHolderStyle = placeHolder.style;
placeHolderStyle.width = closedBorderBox.width + "px";
placeHolderStyle.height = closedBorderBox.height + "px";
placeHolderStyle.marginTop = closedMargins.top + "px";
placeHolderStyle.marginRight = closedMargins.right + "px";
placeHolderStyle.marginBottom = closedMargins.bottom + "px";
placeHolderStyle.marginLeft = closedMargins.left + "px";
_ElementUtilities._maintainFocus(() => {
// Move ToolBar element to the body in preparation of becoming a light dismissible. Leave an equal sized placeHolder element
// at our original DOM location to avoid reflowing surrounding app content.
this._dom.root.parentElement.insertBefore(placeHolder, this._dom.root);
_Global.document.body.appendChild(this._dom.root);
// Position the ToolBar to completely cover the same region as the placeholder element.
this._dom.root.style.width = closedContentWidth + "px";
this._dom.root.style.left = closedBorderBox.left - closedMargins.left + "px";
// Determine which direction to expand the CommandingSurface elements when opened. The overflow area will be rendered at the corresponding edge of
// the ToolBar's content box, so we choose the direction that offers the most space between that edge and the corresponding edge of the viewport.
// This is to reduce the chance that the overflow area might clip through the edge of the viewport.
var topOfViewport = 0;
var bottomOfViewport = _Global.innerHeight;
var distanceFromTop = closedContentBoxTop - topOfViewport;
var distanceFromBottom = bottomOfViewport - closedContentBoxBottom;
if (distanceFromTop > distanceFromBottom) {
// CommandingSurface is going to expand updwards.
this._commandingSurface.overflowDirection = _Constants.OverflowDirection.top;
// Position the bottom edge of the ToolBar marginbox over the bottom edge of the placeholder marginbox.
this._dom.root.style.bottom = (bottomOfViewport - closedBorderBox.bottom) - closedMargins.bottom + "px";
} else {
// CommandingSurface is going to expand downwards.
this._commandingSurface.overflowDirection = _Constants.OverflowDirection.bottom;
// Position the top edge of the ToolBar marginbox over the top edge of the placeholder marginbox.
this._dom.root.style.top = (topOfViewport + closedBorderBox.top) - closedMargins.top + "px";
}
// Render opened state
_ElementUtilities.addClass(this._dom.root, _Constants.ClassNames.openedClass);
_ElementUtilities.removeClass(this._dom.root, _Constants.ClassNames.closedClass);
});
this._commandingSurface.synchronousOpen();
_LightDismissService.shown(this._dismissable); // Call at the start of the open animation
}
private _updateDomImpl_renderClosed(): void {
_ElementUtilities._maintainFocus(() => {
if (this._dom.placeHolder.parentElement) {
// Restore our placement in the DOM
var placeHolder = this._dom.placeHolder;
placeHolder.parentElement.insertBefore(this._dom.root, placeHolder);
placeHolder.parentElement.removeChild(placeHolder);
}
// Render Closed
this._dom.root.style.top = "";
this._dom.root.style.right = "";
this._dom.root.style.bottom = "";
this._dom.root.style.left = "";
this._dom.root.style.width = this._updateDomImpl_renderedState.prevInlineWidth;
_ElementUtilities.addClass(this._dom.root, _Constants.ClassNames.closedClass);
_ElementUtilities.removeClass(this._dom.root, _Constants.ClassNames.openedClass);
});
this._commandingSurface.synchronousClose();
_LightDismissService.hidden(this._dismissable); // Call after the close animation
}
}
_Base.Class.mix(ToolBar, _Events.createEventProperties(
_Constants.EventNames.beforeOpen,
_Constants.EventNames.afterOpen,
_Constants.EventNames.beforeClose,
_Constants.EventNames.afterClose));
// addEventListener, removeEventListener, dispatchEvent
_Base.Class.mix(ToolBar, _Control.DOMEventMixin); | root.setAttribute("role", "menubar");
}
var label = root.getAttribute("aria-label"); | random_line_split |
_ToolBar.ts | // Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License.txt in the project root for license information.
/// <reference path="../../Core.d.ts" />
import Animations = require("../../Animations");
import _Base = require("../../Core/_Base");
import _BaseUtils = require("../../Core/_BaseUtils");
import BindingList = require("../../BindingList");
import ControlProcessor = require("../../ControlProcessor");
import _Constants = require("../ToolBar/_Constants");
import _Command = require("../AppBar/_Command");
import _CommandingSurface = require("../CommandingSurface");
import _ICommandingSurface = require("../CommandingSurface/_CommandingSurface");
import _Control = require("../../Utilities/_Control");
import _Dispose = require("../../Utilities/_Dispose");
import _ElementUtilities = require("../../Utilities/_ElementUtilities");
import _ErrorFromName = require("../../Core/_ErrorFromName");
import _Events = require('../../Core/_Events');
import _Flyout = require("../../Controls/Flyout");
import _Global = require("../../Core/_Global");
import _Hoverable = require("../../Utilities/_Hoverable");
import _KeyboardBehavior = require("../../Utilities/_KeyboardBehavior");
import _LightDismissService = require('../../_LightDismissService');
import Menu = require("../../Controls/Menu");
import _MenuCommand = require("../Menu/_Command");
import Promise = require('../../Promise');
import _Resources = require("../../Core/_Resources");
import Scheduler = require("../../Scheduler");
import _OpenCloseMachine = require('../../Utilities/_OpenCloseMachine');
import _Signal = require('../../_Signal');
import _WinRT = require('../../Core/_WinRT');
import _WriteProfilerMark = require("../../Core/_WriteProfilerMark");
require(["require-style!less/styles-toolbar"]);
"use strict";
// The WinJS ToolBar is a specialized UI wrapper for the private _CommandingSurface UI component. The _CommandingSurface is responsible for rendering
// opened and closed states, knowing how to create the open and close animations, laying out commands, creating command hide/show animations and
// keyboard navigation across commands. The WinJS ToolBar is very similar to the WinJS AppBar, however the ToolBar is meant to be positioned in line
// with your app content whereas the AppBar is meant to overlay your app content.
//
// The responsibilities of the ToolBar include:
//
// - Seamlessly hosting the _CommandingSurface
// - From an end user perspective, there should be no visual distinction between where the ToolBar ends and the _CommandingSurface begins.
// - ToolBar wants to rely on the _CommandingSurface to do as much of the rendering as possible. The ToolBar relies on the _CommandingSurface to render its opened
// and closed states-- which defines the overall height of the ToolBar and CommandingSurface elements. The ToolBar has no policy or CSS styles regarding its own
// height and ToolBar takes advantage of the default behavior of its DIV element which is to always grow or shrink to match the height of its content.
// - From an end developer perspective, the _CommandingSurface should be abstracted as an implementation detail of the ToolBar as much as possible.
// - Developers should never have to interact with the CommandingSurface directly.The ToolBar exposes the majority of _CommandingSurface functionality through its
// own APIs
// - There are some HTML elements inside of the _CommandingSurface's DOM that a developer might like to style. After the _CommandingSurface has been instantiated
// and added to the ToolBar DOM, the ToolBar will inject its own "toolbar" specific class-names onto these elements to make them more discoverable to developers.
// - Example of developer styling guidelines https://msdn.microsoft.com/en-us/library/windows/apps/jj839733.asp
//
// - Open direction:
// - The ToolBar and its _CommandingSurface component can open upwards or downwards.Because there is no policy on where the ToolBar can be placed in an App, the ToolBar
// always wants to avoid opening in a direction that would cause any of its content to clip outside of the screen.
// - When the ToolBar is opening, it will always choose to expand in the direction(up or down) that currently has the most available space between the edge of the
// ToolBar element and the corresponding edge of the visual viewport.
// - This means that the a ToolBar near the bottom of the page will open upwards, but if the page is scrolled down such that the ToolBar is now near the top, the next
// time the ToolBar is opened it will open downwards.
//
// - Light dismiss
// - The ToolBar is a light dismissible when opened. This means that the ToolBar is closed thru a variety of cues such as tapping anywhere outside of it,
// pressing the escape key, and resizing the window.ToolBar relies on the _LightDismissService component for most of this functionality.
// The only pieces the ToolBar is responsible for are:
// - Describing what happens when a light dismiss is triggered on the ToolBar .
// - Describing how the ToolBar should take / restore focus when it becomes the topmost light dismissible in the light dismiss stack
// - Debugging Tip: Light dismiss can make debugging an opened ToolBar tricky.A good idea is to temporarily suspend the light dismiss cue that triggers when clicking
// outside of the current window.This can be achieved by executing the following code in the JavaScript console window: "WinJS.UI._LightDismissService._setDebug(true)"
//
// - Inline element when closed, overlay when opened:
// - The design of the toolbar called for it to be an control that developers can place inline with their other app content.When the ToolBar is closed it exists as a an
// element in your app, next to other app content and take up space in the flow of the document.
// - However, when the ToolBar opens, its vertical height will increase.Normally the change in height of an inline element will cause all of the other elements below the
// expanding element to move out of the way.Rather than push the rest of the app content down when opening, the design of the ToolBar called for it to overlay that content other content, while still taking up the same vertical space in the document as it did when closed.
// - The implementation of this feature is very complicated:
// - The only way one element can overlay another is to remove it from the flow of the document and give it a new CSS positioning like "absolute" or "fixed".
// - However, simply removing the ToolBar element from the document to make it an overlay, would leave behind a gap in the document that all the neighboring elements
// would try to fill by shifting over, leading to a jarring reflow of many elements whenever the ToolBar was opened.This was also undesirable
// - The final solution is as follows
// - Create a transparent placeholder element that is the exact same height and width as the closed ToolBar element.
// - Removing the ToolBar element from its place in the document while simultaneously inserting the placeholder element into the same spot the ToolBar element was
// just removed from.
// - Inserting the ToolBar element as a direct child of the body and giving it css position: fixed;
// We insert it directly into the body element because while opened, ToolBar is a Light dismissible overlay and is subject to the same stacking context pitfalls
// as any other light dismissible. https://github.com/winjs/winjs/wiki/Dismissables-and-Stacking-Contexts
// - Reposition the ToolBar element to be exactly overlaid on top of the placeholder element.
// - Render the ToolBar as opened, via the _CommandingSurface API, increasing the overall height of the ToolBar.
// - Closing the ToolBar is basically the same steps but in reverse.
// - One limitation to this implementation is that developers may not position the ToolBar element themselves directly via the CSS "position" or "float" properties.
// - This is because The ToolBar expects its element to be in the flow of the document when closed, and the placeholder element would not receive these same styles
// when inserted to replace the ToolBar element.
// - An easy workaround for developers is to wrap the ToolBar into another DIV element that they may style and position however they'd like.
//
// - Responding to the IHM:
// - If the ToolBar is opened when the IHM is shown, it will close itself.This is to avoid scenarios where the IHM totally occludes the opened ToolBar. If the ToolBar
// did not close itself, then the next mouse or touch input within the App wouldn't appear to do anything since it would just go to closing the light dismissible
// ToolBar anyway.
var strings = {
get ariaLabel() { return _Resources._getWinJSString("ui/toolbarAriaLabel").value; },
get overflowButtonAriaLabel() { return _Resources._getWinJSString("ui/toolbarOverflowButtonAriaLabel").value; },
get mustContainCommands() { return "The toolbar can only contain WinJS.UI.Command or WinJS.UI.AppBarCommand controls"; },
get duplicateConstruction() { return "Invalid argument: Controls may only be instantiated one time for each DOM element"; }
};
var ClosedDisplayMode = {
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.compact" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.compact">
/// When the ToolBar is closed, the height of the ToolBar is reduced such that button commands are still visible, but their labels are hidden.
/// </field>
compact: "compact",
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.full" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.full">
/// When the ToolBar is closed, the height of the ToolBar is always sized to content.
/// </field>
full: "full",
};
var closedDisplayModeClassMap = {};
closedDisplayModeClassMap[ClosedDisplayMode.compact] = _Constants.ClassNames.compactClass;
closedDisplayModeClassMap[ClosedDisplayMode.full] = _Constants.ClassNames.fullClass;
// Versions of add/removeClass that are no ops when called with falsy class names.
function addClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.addClass(element, className);
}
function removeClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.removeClass(element, className);
}
/// <field>
/// <summary locid="WinJS.UI.ToolBar">
/// Displays ICommands within the flow of the app. Use the ToolBar around other statically positioned app content.
/// </summary>
/// </field>
/// <icon src="ui_winjs.ui.toolbar.12x12.png" width="12" height="12" />
/// <icon src="ui_winjs.ui.toolbar.16x16.png" width="16" height="16" />
/// <htmlSnippet supportsContent="true"><![CDATA[<div data-win-control="WinJS.UI.ToolBar">
/// <button data-win-control="WinJS.UI.Command" data-win-options="{id:'',label:'example',icon:'back',type:'button',onclick:null,section:'primary'}"></button>
/// </div>]]></htmlSnippet>
/// <part name="toolbar" class="win-toolbar" locid="WinJS.UI.ToolBar_part:toolbar">The entire ToolBar control.</part>
/// <part name="toolbar-overflowbutton" class="win-toolbar-overflowbutton" locid="WinJS.UI.ToolBar_part:ToolBar-overflowbutton">The toolbar overflow button.</part>
/// <part name="toolbar-overflowarea" class="win-toolbar-overflowarea" locid="WinJS.UI.ToolBar_part:ToolBar-overflowarea">The container for toolbar commands that overflow.</part>
/// <resource type="javascript" src="//$(TARGET_DESTINATION)/js/WinJS.js" shared="true" />
/// <resource type="css" src="//$(TARGET_DESTINATION)/css/ui-dark.css" shared="true" />
export class ToolBar {
private _id: string;
private _disposed: boolean;
private _commandingSurface: _ICommandingSurface._CommandingSurface;
private _isOpenedMode: boolean;
private _handleShowingKeyboardBound: (ev: any) => void;
private _dismissable: _LightDismissService.LightDismissableElement;
private _cachedClosedHeight: number;
private _dom: {
root: HTMLElement;
commandingSurfaceEl: HTMLElement;
placeHolder: HTMLElement;
}
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode">
/// Display options for the actionarea when the ToolBar is closed.
/// </field>
static ClosedDisplayMode = ClosedDisplayMode;
static supportedForProcessing: boolean = true;
/// <field type="HTMLElement" domElement="true" hidden="true" locid="WinJS.UI.ToolBar.element" helpKeyword="WinJS.UI.ToolBar.element">
/// Gets the DOM element that hosts the ToolBar.
/// </field>
get element() {
return this._dom.root;
}
/// <field type="WinJS.Binding.List" locid="WinJS.UI.ToolBar.data" helpKeyword="WinJS.UI.ToolBar.data">
/// Gets or sets the Binding List of WinJS.UI.Command for the ToolBar.
/// </field>
get data() {
return this._commandingSurface.data;
}
set data(value: BindingList.List<_Command.ICommand>) {
this._commandingSurface.data = value;
}
/// <field type="String" locid="WinJS.UI.ToolBar.closedDisplayMode" helpKeyword="WinJS.UI.ToolBar.closedDisplayMode">
/// Gets or sets the closedDisplayMode for the ToolBar. Values are "compact" and "full".
/// </field>
get closedDisplayMode() {
return this._commandingSurface.closedDisplayMode;
}
set closedDisplayMode(value: string) {
if (ClosedDisplayMode[value]) {
this._commandingSurface.closedDisplayMode = value;
this._cachedClosedHeight = null;
}
}
/// <field type="Boolean" hidden="true" locid="WinJS.UI.ToolBar.opened" helpKeyword="WinJS.UI.ToolBar.opened">
/// Gets or sets whether the ToolBar is currently opened.
/// </field>
get opened(): boolean {
return this._commandingSurface.opened;
}
set opened(value: boolean) {
this._commandingSurface.opened = value;
}
constructor(element?: HTMLElement, options: any = {}) {
/// <signature helpKeyword="WinJS.UI.ToolBar.ToolBar">
/// <summary locid="WinJS.UI.ToolBar.constructor">
/// Creates a new ToolBar control.
/// </summary>
/// <param name="element" type="HTMLElement" domElement="true" locid="WinJS.UI.ToolBar.constructor_p:element">
/// The DOM element that will host the control.
/// </param>
/// <param name="options" type="Object" locid="WinJS.UI.ToolBar.constructor_p:options">
/// The set of properties and values to apply to the new ToolBar control.
/// </param>
/// <returns type="WinJS.UI.ToolBar" locid="WinJS.UI.ToolBar.constructor_returnValue">
/// The new ToolBar control.
/// </returns>
/// </signature>
this._writeProfilerMark("constructor,StartTM");
// Check to make sure we weren't duplicated
if (element && element["winControl"]) {
throw new _ErrorFromName("WinJS.UI.ToolBar.DuplicateConstruction", strings.duplicateConstruction);
}
this._initializeDom(element || _Global.document.createElement("div"));
var stateMachine = new _OpenCloseMachine.OpenCloseMachine({
eventElement: this.element,
onOpen: () => {
var openAnimation = this._commandingSurface.createOpenAnimation(this._getClosedHeight());
this._synchronousOpen();
return openAnimation.execute();
},
onClose: () => {
var closeAnimation = this._commandingSurface.createCloseAnimation(this._getClosedHeight());
return closeAnimation.execute().then(() => {
this._synchronousClose();
});
},
onUpdateDom: () => {
this._updateDomImpl();
},
onUpdateDomWithIsOpened: (isOpened: boolean) => {
this._isOpenedMode = isOpened;
this._updateDomImpl();
}
});
// Events
this._handleShowingKeyboardBound = this._handleShowingKeyboard.bind(this);
_ElementUtilities._inputPaneListener.addEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
// Initialize private state.
this._disposed = false;
this._cachedClosedHeight = null;
this._commandingSurface = new _CommandingSurface._CommandingSurface(this._dom.commandingSurfaceEl, { openCloseMachine: stateMachine });
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-actionarea"), _Constants.ClassNames.actionAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowarea"), _Constants.ClassNames.overflowAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowbutton"), _Constants.ClassNames.overflowButtonCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-ellipsis"), _Constants.ClassNames.ellipsisCssClass);
this._isOpenedMode = _Constants.defaultOpened;
this._dismissable = new _LightDismissService.LightDismissableElement({
element: this._dom.root,
tabIndex: this._dom.root.hasAttribute("tabIndex") ? this._dom.root.tabIndex : -1,
onLightDismiss: () => {
this.close();
},
onTakeFocus: (useSetActive) => {
this._dismissable.restoreFocus() ||
this._commandingSurface.takeFocus(useSetActive);
}
});
// Initialize public properties.
this.closedDisplayMode = _Constants.defaultClosedDisplayMode;
this.opened = this._isOpenedMode;
_Control.setOptions(this, options);
// Exit the Init state.
_ElementUtilities._inDom(this.element).then(() => {
return this._commandingSurface.initialized;
}).then(() => {
stateMachine.exitInit();
this._writeProfilerMark("constructor,StopTM");
});
}
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeopen" helpKeyword="WinJS.UI.ToolBar.onbeforeopen">
/// Occurs immediately before the control is opened. Is cancelable.
/// </field>
onbeforeopen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafteropen" helpKeyword="WinJS.UI.ToolBar.onafteropen">
/// Occurs immediately after the control is opened.
/// </field>
onafteropen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeclose" helpKeyword="WinJS.UI.ToolBar.onbeforeclose">
/// Occurs immediately before the control is closed. Is cancelable.
/// </field>
onbeforeclose: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafterclose" helpKeyword="WinJS.UI.ToolBar.onafterclose">
/// Occurs immediately after the control is closed.
/// </field>
onafterclose: (ev: CustomEvent) => void;
open(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.open">
/// <summary locid="WinJS.UI.ToolBar.open">
/// Opens the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.open();
}
close(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.close">
/// <summary locid="WinJS.UI.ToolBar.close">
/// Closes the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.close();
}
dispose() {
/// <signature helpKeyword="WinJS.UI.ToolBar.dispose">
/// <summary locid="WinJS.UI.ToolBar.dispose">
/// Disposes this ToolBar.
/// </summary>
/// </signature>
if (this._disposed) {
return;
}
this._disposed = true;
_LightDismissService.hidden(this._dismissable);
// Disposing the _commandingSurface will trigger dispose on its OpenCloseMachine and synchronously complete any animations that might have been running.
this._commandingSurface.dispose();
// If page navigation is happening, we don't want the ToolBar left behind in the body.
// Synchronoulsy close the ToolBar to force it out of the body and back into its parent element.
this._synchronousClose();
_ElementUtilities._inputPaneListener.removeEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
_Dispose.disposeSubTree(this.element);
}
forceLayout() {
/// <signature helpKeyword="WinJS.UI.ToolBar.forceLayout">
/// <summary locid="WinJS.UI.ToolBar.forceLayout">
/// Forces the ToolBar to update its layout. Use this function when the window did not change size, but the container of the ToolBar changed size.
/// </summary>
/// </signature>
this._commandingSurface.forceLayout();
}
| (id: string): _Command.ICommand {
/// <signature helpKeyword="WinJS.UI.ToolBar.getCommandById">
/// <summary locid="WinJS.UI.ToolBar.getCommandById">
/// Retrieves the command with the specified ID from this ToolBar.
/// If more than one command is found, this method returns the first command found.
/// </summary>
/// <param name="id" type="String" locid="WinJS.UI.ToolBar.getCommandById_p:id">Id of the command to return.</param>
/// <returns type="object" locid="WinJS.UI.ToolBar.getCommandById_returnValue">
/// The command found, or null if no command is found.
/// </returns>
/// </signature>
return this._commandingSurface.getCommandById(id);
}
showOnlyCommands(commands: Array<string|_Command.ICommand>): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.showOnlyCommands">
/// <summary locid="WinJS.UI.ToolBar.showOnlyCommands">
/// Show the specified commands, hiding all of the others in the ToolBar.
/// </summary>
/// <param name="commands" type="Array" locid="WinJS.UI.ToolBar.showOnlyCommands_p:commands">
/// An array of the commands to show. The array elements may be Command objects, or the string identifiers (IDs) of commands.
/// </param>
/// </signature>
return this._commandingSurface.showOnlyCommands(commands);
}
private _writeProfilerMark(text: string) {
_WriteProfilerMark("WinJS.UI.ToolBar:" + this._id + ":" + text);
}
private _initializeDom(root: HTMLElement): void {
this._writeProfilerMark("_intializeDom,info");
// Attaching JS control to DOM element
root["winControl"] = this;
this._id = root.id || _ElementUtilities._uniqueID(root);
_ElementUtilities.addClass(root, _Constants.ClassNames.controlCssClass);
_ElementUtilities.addClass(root, _Constants.ClassNames.disposableCssClass);
// Make sure we have an ARIA role
var role = root.getAttribute("role");
if (!role) {
root.setAttribute("role", "menubar");
}
var label = root.getAttribute("aria-label");
if (!label) {
root.setAttribute("aria-label", strings.ariaLabel);
}
// Create element for commandingSurface and reparent any declarative Commands.
// The CommandingSurface constructor will parse child elements as AppBarCommands.
var commandingSurfaceEl = document.createElement("DIV");
_ElementUtilities._reparentChildren(root, commandingSurfaceEl);
root.appendChild(commandingSurfaceEl);
// While the ToolBar is open, it will place itself in the <body> so it can become a light dismissible
// overlay. It leaves the placeHolder element behind as stand in at the ToolBar's original DOM location
// to avoid reflowing surrounding app content and create the illusion that the ToolBar hasn't moved along
// the x or y planes.
var placeHolder = _Global.document.createElement("DIV");
_ElementUtilities.addClass(placeHolder, _Constants.ClassNames.placeHolderCssClass);
// If the ToolBar's original HTML parent node is disposed while the ToolBar is open and repositioned as
// a temporary child of the <body>, make sure that calling dispose on the placeHolder element will trigger
// dispose on the ToolBar as well.
_Dispose.markDisposable(placeHolder, this.dispose.bind(this));
this._dom = {
root: root,
commandingSurfaceEl: commandingSurfaceEl,
placeHolder: placeHolder,
};
}
private _handleShowingKeyboard(event: { detail: { originalEvent: _WinRT.Windows.UI.ViewManagement.InputPaneVisibilityEventArgs } }) {
// Because the ToolBar takes up layout space and is not an overlay, it doesn't have the same expectation
// to move itself to get out of the way of a showing IHM. Instsead we just close the ToolBar to avoid
// scenarios where the ToolBar is occluded, but the click-eating-div is still present since it may seem
// strange to end users that an occluded ToolBar (out of sight, out of mind) is still eating their first
// click.
// Mitigation:
// Because (1) custom content in a ToolBar can only be included as a 'content' type command, because (2)
// the ToolBar only supports closedDisplayModes 'compact' and 'full', and because (3) 'content' type
// commands in the overflowarea use a separate contentflyout to display their contents:
// Interactable custom content contained within the ToolBar actionarea or overflowarea, will remain
// visible and interactable even when showing the IHM closes the ToolBar.
this.close();
}
private _synchronousOpen(): void {
this._isOpenedMode = true;
this._updateDomImpl();
}
private _synchronousClose(): void {
this._isOpenedMode = false;
this._updateDomImpl();
}
// State private to the _updateDomImpl family of method. No other methods should make use of it.
//
// Nothing has been rendered yet so these are all initialized to undefined. Because
// they are undefined, the first time _updateDomImpl is called, they will all be
// rendered.
private _updateDomImpl_renderedState = {
isOpenedMode: <boolean>undefined,
closedDisplayMode: <string>undefined,
prevInlineWidth: <string>undefined,
};
private _updateDomImpl(): void {
var rendered = this._updateDomImpl_renderedState;
if (rendered.isOpenedMode !== this._isOpenedMode) {
if (this._isOpenedMode) {
this._updateDomImpl_renderOpened();
} else {
this._updateDomImpl_renderClosed();
}
rendered.isOpenedMode = this._isOpenedMode;
}
if (rendered.closedDisplayMode !== this.closedDisplayMode) {
removeClass(this._dom.root, closedDisplayModeClassMap[rendered.closedDisplayMode]);
addClass(this._dom.root, closedDisplayModeClassMap[this.closedDisplayMode]);
rendered.closedDisplayMode = this.closedDisplayMode;
}
this._commandingSurface.updateDom();
}
private _getClosedHeight(): number {
if (this._cachedClosedHeight === null) {
var wasOpen = this._isOpenedMode;
if (this._isOpenedMode) {
this._synchronousClose();
}
this._cachedClosedHeight = this._commandingSurface.getBoundingRects().commandingSurface.height;
if (wasOpen) {
this._synchronousOpen();
}
}
return this._cachedClosedHeight;
}
private _updateDomImpl_renderOpened(): void {
// Measure closed state.
this._updateDomImpl_renderedState.prevInlineWidth = this._dom.root.style.width;
var closedBorderBox = this._dom.root.getBoundingClientRect();
var closedContentWidth = _ElementUtilities._getPreciseContentWidth(this._dom.root);
var closedContentHeight = _ElementUtilities._getPreciseContentHeight(this._dom.root);
var closedStyle = _ElementUtilities._getComputedStyle(this._dom.root);
var closedPaddingTop = _ElementUtilities._convertToPrecisePixels(closedStyle.paddingTop);
var closedBorderTop = _ElementUtilities._convertToPrecisePixels(closedStyle.borderTopWidth);
var closedMargins = _ElementUtilities._getPreciseMargins(this._dom.root);
var closedContentBoxTop = closedBorderBox.top + closedBorderTop + closedPaddingTop;
var closedContentBoxBottom = closedContentBoxTop + closedContentHeight;
// Size our placeHolder. Set height and width to match borderbox of the closed ToolBar.
// Copy ToolBar margins to the placeholder.
var placeHolder = this._dom.placeHolder;
var placeHolderStyle = placeHolder.style;
placeHolderStyle.width = closedBorderBox.width + "px";
placeHolderStyle.height = closedBorderBox.height + "px";
placeHolderStyle.marginTop = closedMargins.top + "px";
placeHolderStyle.marginRight = closedMargins.right + "px";
placeHolderStyle.marginBottom = closedMargins.bottom + "px";
placeHolderStyle.marginLeft = closedMargins.left + "px";
_ElementUtilities._maintainFocus(() => {
// Move ToolBar element to the body in preparation of becoming a light dismissible. Leave an equal sized placeHolder element
// at our original DOM location to avoid reflowing surrounding app content.
this._dom.root.parentElement.insertBefore(placeHolder, this._dom.root);
_Global.document.body.appendChild(this._dom.root);
// Position the ToolBar to completely cover the same region as the placeholder element.
this._dom.root.style.width = closedContentWidth + "px";
this._dom.root.style.left = closedBorderBox.left - closedMargins.left + "px";
// Determine which direction to expand the CommandingSurface elements when opened. The overflow area will be rendered at the corresponding edge of
// the ToolBar's content box, so we choose the direction that offers the most space between that edge and the corresponding edge of the viewport.
// This is to reduce the chance that the overflow area might clip through the edge of the viewport.
var topOfViewport = 0;
var bottomOfViewport = _Global.innerHeight;
var distanceFromTop = closedContentBoxTop - topOfViewport;
var distanceFromBottom = bottomOfViewport - closedContentBoxBottom;
if (distanceFromTop > distanceFromBottom) {
// CommandingSurface is going to expand updwards.
this._commandingSurface.overflowDirection = _Constants.OverflowDirection.top;
// Position the bottom edge of the ToolBar marginbox over the bottom edge of the placeholder marginbox.
this._dom.root.style.bottom = (bottomOfViewport - closedBorderBox.bottom) - closedMargins.bottom + "px";
} else {
// CommandingSurface is going to expand downwards.
this._commandingSurface.overflowDirection = _Constants.OverflowDirection.bottom;
// Position the top edge of the ToolBar marginbox over the top edge of the placeholder marginbox.
this._dom.root.style.top = (topOfViewport + closedBorderBox.top) - closedMargins.top + "px";
}
// Render opened state
_ElementUtilities.addClass(this._dom.root, _Constants.ClassNames.openedClass);
_ElementUtilities.removeClass(this._dom.root, _Constants.ClassNames.closedClass);
});
this._commandingSurface.synchronousOpen();
_LightDismissService.shown(this._dismissable); // Call at the start of the open animation
}
private _updateDomImpl_renderClosed(): void {
_ElementUtilities._maintainFocus(() => {
if (this._dom.placeHolder.parentElement) {
// Restore our placement in the DOM
var placeHolder = this._dom.placeHolder;
placeHolder.parentElement.insertBefore(this._dom.root, placeHolder);
placeHolder.parentElement.removeChild(placeHolder);
}
// Render Closed
this._dom.root.style.top = "";
this._dom.root.style.right = "";
this._dom.root.style.bottom = "";
this._dom.root.style.left = "";
this._dom.root.style.width = this._updateDomImpl_renderedState.prevInlineWidth;
_ElementUtilities.addClass(this._dom.root, _Constants.ClassNames.closedClass);
_ElementUtilities.removeClass(this._dom.root, _Constants.ClassNames.openedClass);
});
this._commandingSurface.synchronousClose();
_LightDismissService.hidden(this._dismissable); // Call after the close animation
}
}
_Base.Class.mix(ToolBar, _Events.createEventProperties(
_Constants.EventNames.beforeOpen,
_Constants.EventNames.afterOpen,
_Constants.EventNames.beforeClose,
_Constants.EventNames.afterClose));
// addEventListener, removeEventListener, dispatchEvent
_Base.Class.mix(ToolBar, _Control.DOMEventMixin);
| getCommandById | identifier_name |
update.py | import json
import os
from pathlib import Path
from shutil import rmtree
from subprocess import DEVNULL, PIPE, CalledProcessError, run # nosec
from tempfile import TemporaryDirectory
from typing import Any, Dict, Optional, Set
import click
import typer
from cookiecutter.generate import generate_files
from git import Repo
from .utils import (
example,
generate_cookiecutter_context,
get_cookiecutter_repo,
get_cruft_file,
is_project_updated,
json_dumps,
)
try:
import toml # type: ignore
except ImportError: # pragma: no cover
toml = None # type: ignore
CruftState = Dict[str, Any]
@example(skip_apply_ask=False)
@example()
def update(
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
skip_apply_ask: bool = True,
skip_update: bool = False,
checkout: Optional[str] = None,
strict: bool = True,
) -> bool:
"""Update specified project's cruft to the latest and greatest release."""
pyproject_file = project_dir / "pyproject.toml"
cruft_file = get_cruft_file(project_dir)
# If the project dir is a git repository, we ensure
# that the user has a clean working directory before proceeding.
if not _is_project_repo_clean(project_dir):
typer.secho(
"Cruft cannot apply updates on an unclean git project."
" Please make sure your git working tree is clean before proceeding.",
fg=typer.colors.RED,
)
return False
cruft_state = json.loads(cruft_file.read_text())
with TemporaryDirectory() as compare_directory_str:
# Initial setup
compare_directory = Path(compare_directory_str)
template_dir = compare_directory / "template"
repo = get_cookiecutter_repo(cruft_state["template"], template_dir, checkout)
directory = cruft_state.get("directory", None)
if directory:
template_dir = template_dir / directory
last_commit = repo.head.object.hexsha
# Bail early if the repo is already up to date
if is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"Nothing to do, project's cruft is already up to date!", fg=typer.colors.GREEN
)
return True
# Generate clean outputs via the cookiecutter
# from the current cruft state commit of the cookiectter and the updated
# cookiecutter.
old_main_directory, new_main_directory, new_context = _generate_project_updates(
compare_directory, cruft_state, template_dir, cookiecutter_input, repo
)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
deleted_paths = _get_deleted_files(old_main_directory, project_dir)
# We now remove both the skipped and deleted paths from the new and old project
_remove_paths(old_main_directory, new_main_directory, skip_paths | deleted_paths)
# Given the two versions of the cookiecutter outputs based
# on the current project's context we calculate the diff and
# apply the updates to the current project.
if _apply_project_updates(
old_main_directory, new_main_directory, project_dir, skip_update, skip_apply_ask
):
# Update the cruft state and dump the new state
# to the cruft file
cruft_state["commit"] = last_commit
cruft_state["context"] = new_context
cruft_state["directory"] = directory
cruft_file.write_text(json_dumps(cruft_state))
typer.secho(
"Good work! Project's cruft has been updated and is as clean as possible!",
fg=typer.colors.GREEN,
)
return True
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, template_dir: Path, cookiecutter_input: bool, new_output_dir: Path
):
new_context = generate_cookiecutter_context(
cruft_state["template"],
template_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
project_dir = generate_files(
repo_dir=template_dir,
context=new_context,
overwrite_if_exists=True,
output_dir=new_output_dir,
)
return new_context, Path(project_dir)
def _generate_project_updates(
compare_directory: Path,
cruft_state: CruftState,
template_dir: Path,
cookiecutter_input: bool,
repo: Repo,
):
new_output_dir = compare_directory / "new_output"
new_context, new_main_directory = _generate_output(
cruft_state, template_dir, cookiecutter_input, new_output_dir
)
repo.head.reset(commit=cruft_state["commit"], working_tree=True)
old_output_dir = compare_directory / "old_output"
# We should not prompt for the cookiecutter input for the current
# project state
_, old_main_directory = _generate_output(cruft_state, template_dir, False, old_output_dir)
return old_main_directory, new_main_directory, new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_paths(old_main_directory: Path, new_main_directory: Path, paths_to_remove: Set[Path]):
for path_to_remove in paths_to_remove:
old_path = old_main_directory / path_to_remove
new_path = new_main_directory / path_to_remove
for path in (old_path, new_path):
if path.is_dir():
rmtree(path)
elif path.is_file():
path.unlink()
#################################################
# Calculating project diff and applying updates #
#################################################
def _get_diff(old_main_directory: Path, new_main_directory: Path):
diff = run(
[
"git",
"diff",
"--no-index",
"--no-ext-diff",
"--no-color",
str(old_main_directory),
str(new_main_directory),
],
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
diff = diff.replace(str(old_main_directory), "").replace(str(new_main_directory), "")
return diff
def _view_diff(old_main_directory: Path, new_main_directory: Path):
run(["git", "diff", "--no-index", str(old_main_directory), str(new_main_directory)])
def _is_git_repo(directory: Path):
# Taken from https://stackoverflow.com/a/16925062
# This works even if we are in a sub folder in a git
# repo
output = run(
["git", "rev-parse", "--is-inside-work-tree"], stdout=PIPE, stderr=DEVNULL, cwd=directory
)
if b"true" in output.stdout:
return True
return False
def _is_project_repo_clean(directory: Path):
if not _is_git_repo(directory):
return True
output = run(["git", "status", "--porcelain"], stdout=PIPE, stderr=DEVNULL, cwd=directory)
if output.stdout.strip():
return False
return True
def _apply_patch_with_rejections(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "--reject"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
typer.secho(
(
"Project directory may have *.rej files reflecting merge conflicts with the update."
" Please resolve those conflicts manually."
),
fg=typer.colors.YELLOW,
)
def | (diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "-3"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
if _is_project_repo_clean(expanded_dir_path):
typer.secho(
"Failed to apply the update. Retrying again with a different update stratergy.",
fg=typer.colors.YELLOW,
)
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_patch(diff: str, expanded_dir_path: Path):
# Git 3 way merge is the our best bet
# at applying patches. But it only works
# with git repos. If the repo is not a git dir
# we fall back to git apply --reject which applies
# diffs cleanly where applicable otherwise creates
# *.rej files where there are conflicts
if _is_git_repo(expanded_dir_path):
_apply_three_way_patch(diff, expanded_dir_path)
else:
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_project_updates(
old_main_directory: Path,
new_main_directory: Path,
project_dir: Path,
skip_update: bool,
skip_apply_ask: bool,
) -> bool:
diff = _get_diff(old_main_directory, new_main_directory)
if not skip_apply_ask and not skip_update:
input_str: str = "v"
while input_str == "v":
typer.echo(
'Respond with "s" to intentionally skip the update while marking '
"your project as up-to-date or "
'respond with "v" to view the changes that will be applied.'
)
input_str = typer.prompt(
"Apply diff and update?",
type=click.Choice(("y", "n", "s", "v")),
show_choices=True,
default="y",
)
if input_str == "v":
if diff.strip():
_view_diff(old_main_directory, new_main_directory)
else:
click.secho("There are no changes.", fg=typer.colors.YELLOW)
if input_str == "n":
typer.echo("User cancelled Cookiecutter template update.")
return False
elif input_str == "s":
skip_update = True
if not skip_update and diff.strip():
_apply_patch(diff, project_dir)
return True
| _apply_three_way_patch | identifier_name |
update.py | import json
import os
from pathlib import Path
from shutil import rmtree
from subprocess import DEVNULL, PIPE, CalledProcessError, run # nosec
from tempfile import TemporaryDirectory
from typing import Any, Dict, Optional, Set
import click
import typer
from cookiecutter.generate import generate_files
from git import Repo
from .utils import (
example,
generate_cookiecutter_context,
get_cookiecutter_repo,
get_cruft_file,
is_project_updated,
json_dumps,
)
try:
import toml # type: ignore
except ImportError: # pragma: no cover
toml = None # type: ignore
CruftState = Dict[str, Any]
@example(skip_apply_ask=False)
@example()
def update(
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
skip_apply_ask: bool = True,
skip_update: bool = False,
checkout: Optional[str] = None,
strict: bool = True,
) -> bool:
"""Update specified project's cruft to the latest and greatest release."""
pyproject_file = project_dir / "pyproject.toml"
cruft_file = get_cruft_file(project_dir)
# If the project dir is a git repository, we ensure
# that the user has a clean working directory before proceeding.
if not _is_project_repo_clean(project_dir):
typer.secho(
"Cruft cannot apply updates on an unclean git project."
" Please make sure your git working tree is clean before proceeding.",
fg=typer.colors.RED,
)
return False
cruft_state = json.loads(cruft_file.read_text())
with TemporaryDirectory() as compare_directory_str:
# Initial setup
compare_directory = Path(compare_directory_str)
template_dir = compare_directory / "template"
repo = get_cookiecutter_repo(cruft_state["template"], template_dir, checkout)
directory = cruft_state.get("directory", None)
if directory:
template_dir = template_dir / directory
last_commit = repo.head.object.hexsha
# Bail early if the repo is already up to date
if is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"Nothing to do, project's cruft is already up to date!", fg=typer.colors.GREEN
)
return True
# Generate clean outputs via the cookiecutter
# from the current cruft state commit of the cookiectter and the updated
# cookiecutter.
old_main_directory, new_main_directory, new_context = _generate_project_updates(
compare_directory, cruft_state, template_dir, cookiecutter_input, repo
)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
deleted_paths = _get_deleted_files(old_main_directory, project_dir)
# We now remove both the skipped and deleted paths from the new and old project
_remove_paths(old_main_directory, new_main_directory, skip_paths | deleted_paths)
# Given the two versions of the cookiecutter outputs based
# on the current project's context we calculate the diff and
# apply the updates to the current project.
if _apply_project_updates(
old_main_directory, new_main_directory, project_dir, skip_update, skip_apply_ask
):
# Update the cruft state and dump the new state
# to the cruft file
cruft_state["commit"] = last_commit
cruft_state["context"] = new_context
cruft_state["directory"] = directory
cruft_file.write_text(json_dumps(cruft_state))
typer.secho(
"Good work! Project's cruft has been updated and is as clean as possible!",
fg=typer.colors.GREEN,
)
return True
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, template_dir: Path, cookiecutter_input: bool, new_output_dir: Path
):
new_context = generate_cookiecutter_context(
cruft_state["template"],
template_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
project_dir = generate_files(
repo_dir=template_dir,
context=new_context,
overwrite_if_exists=True,
output_dir=new_output_dir,
)
return new_context, Path(project_dir)
def _generate_project_updates(
compare_directory: Path,
cruft_state: CruftState,
template_dir: Path,
cookiecutter_input: bool,
repo: Repo,
):
new_output_dir = compare_directory / "new_output"
new_context, new_main_directory = _generate_output(
cruft_state, template_dir, cookiecutter_input, new_output_dir
)
repo.head.reset(commit=cruft_state["commit"], working_tree=True)
old_output_dir = compare_directory / "old_output"
# We should not prompt for the cookiecutter input for the current
# project state
_, old_main_directory = _generate_output(cruft_state, template_dir, False, old_output_dir)
return old_main_directory, new_main_directory, new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_paths(old_main_directory: Path, new_main_directory: Path, paths_to_remove: Set[Path]):
for path_to_remove in paths_to_remove:
old_path = old_main_directory / path_to_remove
new_path = new_main_directory / path_to_remove
for path in (old_path, new_path):
if path.is_dir():
rmtree(path)
elif path.is_file():
path.unlink()
#################################################
# Calculating project diff and applying updates #
#################################################
def _get_diff(old_main_directory: Path, new_main_directory: Path):
diff = run(
[
"git",
"diff",
"--no-index",
"--no-ext-diff",
"--no-color",
str(old_main_directory),
str(new_main_directory),
],
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
diff = diff.replace(str(old_main_directory), "").replace(str(new_main_directory), "")
return diff
def _view_diff(old_main_directory: Path, new_main_directory: Path):
|
def _is_git_repo(directory: Path):
# Taken from https://stackoverflow.com/a/16925062
# This works even if we are in a sub folder in a git
# repo
output = run(
["git", "rev-parse", "--is-inside-work-tree"], stdout=PIPE, stderr=DEVNULL, cwd=directory
)
if b"true" in output.stdout:
return True
return False
def _is_project_repo_clean(directory: Path):
if not _is_git_repo(directory):
return True
output = run(["git", "status", "--porcelain"], stdout=PIPE, stderr=DEVNULL, cwd=directory)
if output.stdout.strip():
return False
return True
def _apply_patch_with_rejections(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "--reject"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
typer.secho(
(
"Project directory may have *.rej files reflecting merge conflicts with the update."
" Please resolve those conflicts manually."
),
fg=typer.colors.YELLOW,
)
def _apply_three_way_patch(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "-3"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
if _is_project_repo_clean(expanded_dir_path):
typer.secho(
"Failed to apply the update. Retrying again with a different update stratergy.",
fg=typer.colors.YELLOW,
)
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_patch(diff: str, expanded_dir_path: Path):
# Git 3 way merge is the our best bet
# at applying patches. But it only works
# with git repos. If the repo is not a git dir
# we fall back to git apply --reject which applies
# diffs cleanly where applicable otherwise creates
# *.rej files where there are conflicts
if _is_git_repo(expanded_dir_path):
_apply_three_way_patch(diff, expanded_dir_path)
else:
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_project_updates(
old_main_directory: Path,
new_main_directory: Path,
project_dir: Path,
skip_update: bool,
skip_apply_ask: bool,
) -> bool:
diff = _get_diff(old_main_directory, new_main_directory)
if not skip_apply_ask and not skip_update:
input_str: str = "v"
while input_str == "v":
typer.echo(
'Respond with "s" to intentionally skip the update while marking '
"your project as up-to-date or "
'respond with "v" to view the changes that will be applied.'
)
input_str = typer.prompt(
"Apply diff and update?",
type=click.Choice(("y", "n", "s", "v")),
show_choices=True,
default="y",
)
if input_str == "v":
if diff.strip():
_view_diff(old_main_directory, new_main_directory)
else:
click.secho("There are no changes.", fg=typer.colors.YELLOW)
if input_str == "n":
typer.echo("User cancelled Cookiecutter template update.")
return False
elif input_str == "s":
skip_update = True
if not skip_update and diff.strip():
_apply_patch(diff, project_dir)
return True
| run(["git", "diff", "--no-index", str(old_main_directory), str(new_main_directory)]) | identifier_body |
update.py | import json
import os
from pathlib import Path
from shutil import rmtree
from subprocess import DEVNULL, PIPE, CalledProcessError, run # nosec
from tempfile import TemporaryDirectory
from typing import Any, Dict, Optional, Set
import click
import typer
from cookiecutter.generate import generate_files
from git import Repo
from .utils import (
example,
generate_cookiecutter_context,
get_cookiecutter_repo,
get_cruft_file,
is_project_updated,
json_dumps,
)
try:
import toml # type: ignore
except ImportError: # pragma: no cover
toml = None # type: ignore
CruftState = Dict[str, Any]
@example(skip_apply_ask=False)
@example()
def update(
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
skip_apply_ask: bool = True,
skip_update: bool = False,
checkout: Optional[str] = None,
strict: bool = True,
) -> bool:
"""Update specified project's cruft to the latest and greatest release."""
pyproject_file = project_dir / "pyproject.toml"
cruft_file = get_cruft_file(project_dir)
# If the project dir is a git repository, we ensure
# that the user has a clean working directory before proceeding.
if not _is_project_repo_clean(project_dir):
typer.secho(
"Cruft cannot apply updates on an unclean git project."
" Please make sure your git working tree is clean before proceeding.",
fg=typer.colors.RED,
)
return False
cruft_state = json.loads(cruft_file.read_text())
with TemporaryDirectory() as compare_directory_str:
# Initial setup
compare_directory = Path(compare_directory_str)
template_dir = compare_directory / "template"
repo = get_cookiecutter_repo(cruft_state["template"], template_dir, checkout)
directory = cruft_state.get("directory", None)
if directory:
template_dir = template_dir / directory
last_commit = repo.head.object.hexsha
# Bail early if the repo is already up to date
if is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"Nothing to do, project's cruft is already up to date!", fg=typer.colors.GREEN
)
return True
# Generate clean outputs via the cookiecutter
# from the current cruft state commit of the cookiectter and the updated
# cookiecutter.
old_main_directory, new_main_directory, new_context = _generate_project_updates(
compare_directory, cruft_state, template_dir, cookiecutter_input, repo
)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
deleted_paths = _get_deleted_files(old_main_directory, project_dir)
# We now remove both the skipped and deleted paths from the new and old project
_remove_paths(old_main_directory, new_main_directory, skip_paths | deleted_paths)
# Given the two versions of the cookiecutter outputs based
# on the current project's context we calculate the diff and
# apply the updates to the current project.
if _apply_project_updates(
old_main_directory, new_main_directory, project_dir, skip_update, skip_apply_ask
):
# Update the cruft state and dump the new state
# to the cruft file
cruft_state["commit"] = last_commit
cruft_state["context"] = new_context
cruft_state["directory"] = directory
cruft_file.write_text(json_dumps(cruft_state))
typer.secho(
"Good work! Project's cruft has been updated and is as clean as possible!",
fg=typer.colors.GREEN,
)
return True
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, template_dir: Path, cookiecutter_input: bool, new_output_dir: Path
):
new_context = generate_cookiecutter_context(
cruft_state["template"],
template_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
project_dir = generate_files(
repo_dir=template_dir,
context=new_context,
overwrite_if_exists=True,
output_dir=new_output_dir,
)
return new_context, Path(project_dir)
def _generate_project_updates(
compare_directory: Path,
cruft_state: CruftState,
template_dir: Path,
cookiecutter_input: bool,
repo: Repo,
):
new_output_dir = compare_directory / "new_output"
new_context, new_main_directory = _generate_output(
cruft_state, template_dir, cookiecutter_input, new_output_dir
)
repo.head.reset(commit=cruft_state["commit"], working_tree=True)
old_output_dir = compare_directory / "old_output"
# We should not prompt for the cookiecutter input for the current
# project state
_, old_main_directory = _generate_output(cruft_state, template_dir, False, old_output_dir)
return old_main_directory, new_main_directory, new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_paths(old_main_directory: Path, new_main_directory: Path, paths_to_remove: Set[Path]):
for path_to_remove in paths_to_remove:
old_path = old_main_directory / path_to_remove
new_path = new_main_directory / path_to_remove
for path in (old_path, new_path):
if path.is_dir():
rmtree(path)
elif path.is_file():
path.unlink()
#################################################
# Calculating project diff and applying updates #
#################################################
def _get_diff(old_main_directory: Path, new_main_directory: Path):
diff = run(
[
"git",
"diff",
"--no-index",
"--no-ext-diff",
"--no-color",
str(old_main_directory),
str(new_main_directory),
],
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
diff = diff.replace(str(old_main_directory), "").replace(str(new_main_directory), "")
return diff
def _view_diff(old_main_directory: Path, new_main_directory: Path):
run(["git", "diff", "--no-index", str(old_main_directory), str(new_main_directory)])
def _is_git_repo(directory: Path):
# Taken from https://stackoverflow.com/a/16925062
# This works even if we are in a sub folder in a git
# repo
output = run(
["git", "rev-parse", "--is-inside-work-tree"], stdout=PIPE, stderr=DEVNULL, cwd=directory
)
if b"true" in output.stdout:
return True
return False
def _is_project_repo_clean(directory: Path):
if not _is_git_repo(directory):
return True
output = run(["git", "status", "--porcelain"], stdout=PIPE, stderr=DEVNULL, cwd=directory)
if output.stdout.strip():
return False
return True
def _apply_patch_with_rejections(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "--reject"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
typer.secho(
(
"Project directory may have *.rej files reflecting merge conflicts with the update."
" Please resolve those conflicts manually."
),
fg=typer.colors.YELLOW,
)
def _apply_three_way_patch(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "-3"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
if _is_project_repo_clean(expanded_dir_path):
typer.secho(
"Failed to apply the update. Retrying again with a different update stratergy.",
fg=typer.colors.YELLOW,
)
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_patch(diff: str, expanded_dir_path: Path):
# Git 3 way merge is the our best bet
# at applying patches. But it only works
# with git repos. If the repo is not a git dir
# we fall back to git apply --reject which applies
# diffs cleanly where applicable otherwise creates
# *.rej files where there are conflicts
if _is_git_repo(expanded_dir_path):
|
else:
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_project_updates(
old_main_directory: Path,
new_main_directory: Path,
project_dir: Path,
skip_update: bool,
skip_apply_ask: bool,
) -> bool:
diff = _get_diff(old_main_directory, new_main_directory)
if not skip_apply_ask and not skip_update:
input_str: str = "v"
while input_str == "v":
typer.echo(
'Respond with "s" to intentionally skip the update while marking '
"your project as up-to-date or "
'respond with "v" to view the changes that will be applied.'
)
input_str = typer.prompt(
"Apply diff and update?",
type=click.Choice(("y", "n", "s", "v")),
show_choices=True,
default="y",
)
if input_str == "v":
if diff.strip():
_view_diff(old_main_directory, new_main_directory)
else:
click.secho("There are no changes.", fg=typer.colors.YELLOW)
if input_str == "n":
typer.echo("User cancelled Cookiecutter template update.")
return False
elif input_str == "s":
skip_update = True
if not skip_update and diff.strip():
_apply_patch(diff, project_dir)
return True
| _apply_three_way_patch(diff, expanded_dir_path) | conditional_block |
update.py | import json
import os
from pathlib import Path
from shutil import rmtree
from subprocess import DEVNULL, PIPE, CalledProcessError, run # nosec
from tempfile import TemporaryDirectory
from typing import Any, Dict, Optional, Set
import click
import typer
from cookiecutter.generate import generate_files
from git import Repo
from .utils import (
example, | json_dumps,
)
try:
import toml # type: ignore
except ImportError: # pragma: no cover
toml = None # type: ignore
CruftState = Dict[str, Any]
@example(skip_apply_ask=False)
@example()
def update(
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
skip_apply_ask: bool = True,
skip_update: bool = False,
checkout: Optional[str] = None,
strict: bool = True,
) -> bool:
"""Update specified project's cruft to the latest and greatest release."""
pyproject_file = project_dir / "pyproject.toml"
cruft_file = get_cruft_file(project_dir)
# If the project dir is a git repository, we ensure
# that the user has a clean working directory before proceeding.
if not _is_project_repo_clean(project_dir):
typer.secho(
"Cruft cannot apply updates on an unclean git project."
" Please make sure your git working tree is clean before proceeding.",
fg=typer.colors.RED,
)
return False
cruft_state = json.loads(cruft_file.read_text())
with TemporaryDirectory() as compare_directory_str:
# Initial setup
compare_directory = Path(compare_directory_str)
template_dir = compare_directory / "template"
repo = get_cookiecutter_repo(cruft_state["template"], template_dir, checkout)
directory = cruft_state.get("directory", None)
if directory:
template_dir = template_dir / directory
last_commit = repo.head.object.hexsha
# Bail early if the repo is already up to date
if is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"Nothing to do, project's cruft is already up to date!", fg=typer.colors.GREEN
)
return True
# Generate clean outputs via the cookiecutter
# from the current cruft state commit of the cookiectter and the updated
# cookiecutter.
old_main_directory, new_main_directory, new_context = _generate_project_updates(
compare_directory, cruft_state, template_dir, cookiecutter_input, repo
)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
deleted_paths = _get_deleted_files(old_main_directory, project_dir)
# We now remove both the skipped and deleted paths from the new and old project
_remove_paths(old_main_directory, new_main_directory, skip_paths | deleted_paths)
# Given the two versions of the cookiecutter outputs based
# on the current project's context we calculate the diff and
# apply the updates to the current project.
if _apply_project_updates(
old_main_directory, new_main_directory, project_dir, skip_update, skip_apply_ask
):
# Update the cruft state and dump the new state
# to the cruft file
cruft_state["commit"] = last_commit
cruft_state["context"] = new_context
cruft_state["directory"] = directory
cruft_file.write_text(json_dumps(cruft_state))
typer.secho(
"Good work! Project's cruft has been updated and is as clean as possible!",
fg=typer.colors.GREEN,
)
return True
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, template_dir: Path, cookiecutter_input: bool, new_output_dir: Path
):
new_context = generate_cookiecutter_context(
cruft_state["template"],
template_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
project_dir = generate_files(
repo_dir=template_dir,
context=new_context,
overwrite_if_exists=True,
output_dir=new_output_dir,
)
return new_context, Path(project_dir)
def _generate_project_updates(
compare_directory: Path,
cruft_state: CruftState,
template_dir: Path,
cookiecutter_input: bool,
repo: Repo,
):
new_output_dir = compare_directory / "new_output"
new_context, new_main_directory = _generate_output(
cruft_state, template_dir, cookiecutter_input, new_output_dir
)
repo.head.reset(commit=cruft_state["commit"], working_tree=True)
old_output_dir = compare_directory / "old_output"
# We should not prompt for the cookiecutter input for the current
# project state
_, old_main_directory = _generate_output(cruft_state, template_dir, False, old_output_dir)
return old_main_directory, new_main_directory, new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_paths(old_main_directory: Path, new_main_directory: Path, paths_to_remove: Set[Path]):
for path_to_remove in paths_to_remove:
old_path = old_main_directory / path_to_remove
new_path = new_main_directory / path_to_remove
for path in (old_path, new_path):
if path.is_dir():
rmtree(path)
elif path.is_file():
path.unlink()
#################################################
# Calculating project diff and applying updates #
#################################################
def _get_diff(old_main_directory: Path, new_main_directory: Path):
diff = run(
[
"git",
"diff",
"--no-index",
"--no-ext-diff",
"--no-color",
str(old_main_directory),
str(new_main_directory),
],
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
diff = diff.replace(str(old_main_directory), "").replace(str(new_main_directory), "")
return diff
def _view_diff(old_main_directory: Path, new_main_directory: Path):
run(["git", "diff", "--no-index", str(old_main_directory), str(new_main_directory)])
def _is_git_repo(directory: Path):
# Taken from https://stackoverflow.com/a/16925062
# This works even if we are in a sub folder in a git
# repo
output = run(
["git", "rev-parse", "--is-inside-work-tree"], stdout=PIPE, stderr=DEVNULL, cwd=directory
)
if b"true" in output.stdout:
return True
return False
def _is_project_repo_clean(directory: Path):
if not _is_git_repo(directory):
return True
output = run(["git", "status", "--porcelain"], stdout=PIPE, stderr=DEVNULL, cwd=directory)
if output.stdout.strip():
return False
return True
def _apply_patch_with_rejections(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "--reject"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
typer.secho(
(
"Project directory may have *.rej files reflecting merge conflicts with the update."
" Please resolve those conflicts manually."
),
fg=typer.colors.YELLOW,
)
def _apply_three_way_patch(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "-3"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
if _is_project_repo_clean(expanded_dir_path):
typer.secho(
"Failed to apply the update. Retrying again with a different update stratergy.",
fg=typer.colors.YELLOW,
)
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_patch(diff: str, expanded_dir_path: Path):
# Git 3 way merge is the our best bet
# at applying patches. But it only works
# with git repos. If the repo is not a git dir
# we fall back to git apply --reject which applies
# diffs cleanly where applicable otherwise creates
# *.rej files where there are conflicts
if _is_git_repo(expanded_dir_path):
_apply_three_way_patch(diff, expanded_dir_path)
else:
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_project_updates(
old_main_directory: Path,
new_main_directory: Path,
project_dir: Path,
skip_update: bool,
skip_apply_ask: bool,
) -> bool:
diff = _get_diff(old_main_directory, new_main_directory)
if not skip_apply_ask and not skip_update:
input_str: str = "v"
while input_str == "v":
typer.echo(
'Respond with "s" to intentionally skip the update while marking '
"your project as up-to-date or "
'respond with "v" to view the changes that will be applied.'
)
input_str = typer.prompt(
"Apply diff and update?",
type=click.Choice(("y", "n", "s", "v")),
show_choices=True,
default="y",
)
if input_str == "v":
if diff.strip():
_view_diff(old_main_directory, new_main_directory)
else:
click.secho("There are no changes.", fg=typer.colors.YELLOW)
if input_str == "n":
typer.echo("User cancelled Cookiecutter template update.")
return False
elif input_str == "s":
skip_update = True
if not skip_update and diff.strip():
_apply_patch(diff, project_dir)
return True | generate_cookiecutter_context,
get_cookiecutter_repo,
get_cruft_file,
is_project_updated, | random_line_split |
main.rs | //#![no_std]
#![feature(termination_trait)]
extern crate megaton_hammer;
extern crate byteorder;
extern crate image;
extern crate math;
use megaton_hammer::ipcdefs as megaton_ipc;
use byteorder::{ReadBytesExt, WriteBytesExt, LE, ByteOrder};
use megaton_hammer::kernel::{TransferMemory, KObject, FromKObject, Event, svc};
use megaton_ipc::{nn, nns};
use std::io::{Seek, SeekFrom, Cursor};
//use image::png::PNGDecoder;
use image::bmp::BMPDecoder;
use image::{Pixel, ImageDecoder};
// TODO: This kind of sucks. And is only a problem because my IPC bindings don't
// have a concept of strings yet. We need to fix this.
pub fn u8_slice_to_i8_slice(slice: &[u8]) -> &[i8] {
unsafe { &*(slice as *const _ as *const [i8]) }
}
#[derive(Debug)]
enum MyError {
MegatonError(megaton_hammer::error::Error),
ImageError(image::ImageError),
IoctlError(u32),
ParcelError(u32)
}
impl From<image::ImageError> for MyError {
fn from(err: image::ImageError) -> MyError {
MyError::ImageError(err)
}
}
impl From<megaton_hammer::error::Error> for MyError {
fn from(err: megaton_hammer::error::Error) -> MyError {
MyError::MegatonError(err)
}
}
fn main() -> std::result::Result<(), MyError> {
// Let's get ferris to show up on my switch.
println!("Initialize NV");
let nvdrv = nns::nvdrv::INvDrvServices::new_nvdrv_a(|cb| {
println!("Create transfer memory");
let transfer_mem = TransferMemory::new(0x30000)?;
// TODO: Find a better way.
let temporary_process = unsafe { KObject::new(megaton_hammer::kernel::svc::CURRENT_PROCESS) };
let ret = cb(0x30000, &temporary_process, transfer_mem.as_ref());
unsafe { std::mem::forget(temporary_process); }
ret
})?;
println!("Open /dev/nvhost-as-gpu");
let (nvasgpu, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvhost-as-gpu"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Open /dev/nvmap");
let (nvmap, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvmap"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Initialize vi");
let vi_m = nn::visrv::sf::IManagerRootService::new()?;
println!("get_display_service");
let disp_svc = vi_m.get_display_service(1)?;
println!("get_relay_service");
let relay_svc = disp_svc.get_relay_service()?;
println!("get_system_display_service");
let system_disp_svc = disp_svc.get_system_display_service()?;
println!("get_manager_display_service");
let manager_disp_svc = disp_svc.get_manager_display_service()?;
println!("Open display");
let display_id = {
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
disp_svc.open_display(display)?
};
println!("Open a layer");
let layer_id = manager_disp_svc.create_managed_layer(1, display_id, 0)?;
let binder_id = {
let mut parcel = RawParcel::default();
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
let _window_size = disp_svc.open_layer(display, layer_id, 0, parcel.as_bytes_mut())?;
let mut reader = parcel.into_parcel_reader();
let fbo = FlatBinderObject::from_parcel(&mut reader);
let binder = fbo.inner as i32;
relay_svc.adjust_refcount(binder, 1, 0)?;
relay_svc.adjust_refcount(binder, 1, 1)?;
binder
};
// Connect to the IGBP. Take a look at the following link for reference.
// https://android.googlesource.com/platform/frameworks/native/+/e2786ea5aec3a12d948feb85ffbb535fc89c0fe6/libs/gui/IGraphicBufferProducer.cpp#297
println!("Connect to the IGBP");
let queue_buffer_output = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(0); // IProducerListener is null because we don't support it in MegatonHammer (nor in libt) yet.
parcel.write_u32(2); // API
parcel.write_u32(0); // ProducerControlledByApp.
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, CONNECT, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut reader = parcel_out.into_parcel_reader();
let qbo = QueueBufferOutput::from_parcel(&mut reader);
if reader.read_u32() != 0 {
println!("Failed to connect to igbp");
return Ok(());
}
qbo
};
println!("Allocate framebuffers");
let mut mem : Vec<BufferMemory> = Vec::with_capacity(3);
unsafe { mem.set_len(3); }
// Disables caching when talking to the gpu.
unsafe { svc::set_memory_attribute(mem.as_mut_ptr() as _, mem.len() * std::mem::size_of::<BufferMemory>(), 0x8, 0x8).expect("Failed to set memory attribute"); }
let gpu_buffer = {
let mut create = NvMapIocCreateArgs {
size: (mem.len() * std::mem::size_of::<BufferMemory>()) as u32,
handle: 0
};
println!("NVMAP_IOC_CREATE {:?} ({:?})", create, unsafe { std::mem::transmute::<&NvMapIocCreateArgs, &[u8; std::mem::size_of::<NvMapIocCreateArgs>()]>(&create) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_CREATE,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&create as *const NvMapIocCreateArgs as *const u8, std::mem::size_of::<NvMapIocCreateArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut create as *mut NvMapIocCreateArgs as *mut u8, std::mem::size_of::<NvMapIocCreateArgs>()) })?;
if ret != 0 |
GpuBuffer {
nvmap_handle: create.handle,
size: mem.len() * std::mem::size_of::<BufferMemory>(),
alignment: 0x1000,
kind: 0
}
};
let buffers = {
let mut alloc = NvMapIocAllocArgs {
handle: gpu_buffer.nvmap_handle,
heapmask: 0,
flags: 0,
align: gpu_buffer.alignment,
kind: gpu_buffer.kind,
pad: [0; 7],
addr: mem.as_mut_ptr() as u64
};
println!("NVMAP_IOC_ALLOC {:?} ({:?})", alloc, unsafe { std::mem::transmute::<&NvMapIocAllocArgs, &[u8; std::mem::size_of::<NvMapIocAllocArgs>()]>(&alloc) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_ALLOC,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&alloc as *const NvMapIocAllocArgs as *const u8, std::mem::size_of::<NvMapIocAllocArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut alloc as *mut NvMapIocAllocArgs as *mut u8, std::mem::size_of::<NvMapIocAllocArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
let mut buffers = Vec::with_capacity(3);
for i in 0..3 {
buffers.push(GraphicBuffer {
width: queue_buffer_output.width,
height: queue_buffer_output.height,
stride: queue_buffer_output.width,
format: 1, // RGBA_8888
usage: 0xb00, // TODO: Wat?
gpu_buffer: &gpu_buffer,
index: i,
offset_gpu_buffer: 0x3c0000 * i
});
}
buffers
};
println!("Tell IGBP about the buffers");
for (i, buf) in buffers.iter().enumerate() {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(i as u32); // slot
parcel.write_u32(1); // Unknown
parcel.write_u32(0x16c); // Flattened GraphicsBuffer length
parcel.write_u32(0); // Unknown
buf.write_to_parcel(&mut parcel);
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, SET_PREALLOCATED_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
}
println!("Set scaling mode");
disp_svc.set_layer_scaling_mode(2, layer_id)?;
println!("Add layer to stack");
for stack in [0x0, 0x2, 0x4, 0x5, 0xA].iter() {
manager_disp_svc.add_to_layer_stack(*stack, layer_id)?;
}
println!("Set Z layer");
system_disp_svc.set_layer_z(layer_id, 2)?;
println!("Loading image from FERRIS");
let image = BMPDecoder::new(Cursor::new(&FERRIS[..]));
println!("Getting frame");
let frame = image.into_frames()?.next().unwrap().into_buffer();
//println!("Resizing FERRIS");
//let frame = image::imageops::resize(&image.into_frames()?.next().unwrap().into_buffer(), 1280, 760, image::FilterType::Lanczos3);
let vevent = unsafe { Event::from_kobject(disp_svc.get_display_vsync_event(display_id)?) };
for _ in 0..60 {
println!("Dequeue buffer");
let slot = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(1); // Pixel format
parcel.write_u32(1280); // width
parcel.write_u32(720); // height
parcel.write_u32(0); // get_frame_timestamp
parcel.write_u32(0xb00); // usage
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, DEQUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
let mut parcel_out = parcel_out.into_parcel_reader();
let slot = parcel_out.read_u32();
// Read fence
parcel_out.0.seek(SeekFrom::Current(44));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
slot
};
// Request buffer if it hasn't been requested already.
println!("Request buffer {}", slot);
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, REQUEST_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
let non_null = parcel_out.read_u32() != 0;
if non_null {
let len = parcel_out.read_u32();
if len != 0x16c {
println!("Invalid length: {}", len);
return Ok(())
}
let unk = parcel_out.read_u32();
// TODO: Get graphicbuffer.
parcel_out.0.seek(SeekFrom::Current(0x16c));
}
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
// Blit
println!("Blit");
{
fn pdep(mask: u32, mut value: u32) -> u32 {
let mut out = 0;
for shift in 0..32 {
let bit = 1 << shift;
if mask & bit != 0 {
if value & 1 != 0 {
out |= bit
}
value >>= 1;
}
}
out
}
fn swizzle_x(v: u32) -> u32 { pdep(!0x7B4, v) }
fn swizzle_y(v: u32) -> u32 { pdep(0x7B4, v) }
let x0 = 0;
let y0 = 0;
let mut offs_x0 = swizzle_x(x0);
let mut offs_y = swizzle_y(y0);
let x_mask = swizzle_x(!0);
let y_mask = swizzle_y(!0);
let incr_y = swizzle_x(128 * 10);
let tile_height = 128;
offs_x0 += incr_y * (y0 / tile_height);
// TODO: Add clipping.
for y in 0..frame.height() {
let mut offs_x = offs_x0;
for x in 0..frame.width() {
let pixel = frame.get_pixel(x, y);
mem[slot as usize][offs_y as usize + offs_x as usize] = LE::read_u32(pixel.channels());
offs_x = offs_x.wrapping_sub(x_mask) & x_mask;
}
offs_y = offs_y.wrapping_sub(y_mask) & y_mask;
if offs_y == 0 {
offs_x0 += incr_y; // wrap into next tile row
}
}
}
// Enqueue buffer
println!("Enqueue buffer");
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
parcel.write_u32(0x54); parcel.write_u32(0); // unknown, but always those values
parcel.write_u32(0x588bbba9); parcel.write_u32(0); // Timestamp, u64
parcel.write_u32(1); // unknown, but always those values
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // sometimes zero
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // Also seen 2
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(1); // fence?
parcel.write_u32(1);
parcel.write_u32(0xa3);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, QUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
println!("{:?}", QueueBufferOutput::from_parcel(&mut parcel_out));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
vevent.wait()?;
vevent.reset()?;
}
Ok(())
}
//static FERRIS : &'static [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
//static FERRIS: &'static [u8; 33061] = include_bytes!("../img/ferris.png");
static FERRIS: &'static [u8; 153718] = include_bytes!("../img/ferris.bmp");
// Graphic buffer stuff
//struct IGraphicBufferProducer(Arc<IHOSBinderDriver>, u32);
//
//impl IGraphicBufferProducer {
// pub fn dequeue_buffer(&self) {
//
// }
//}
//struct Display(Arc<IManagerDisplayService>, u64);
//
//impl Drop for Display {
// fn drop(&mut self) {
// self.0.close_display(self.1);
// }
//}
// TODO: Layer trait?
//struct ManagedLayer(Arc<IManagerDisplayService>, u64);
//
//impl Drop for ManagedLayer {
// fn drop(&mut self) {
// self.0.destroy_managed_layer(self.1);
// }
//}
/// Binder object in a parcel
#[repr(C)]
#[derive(Debug)]
struct FlatBinderObject {
ty: u32,
flags: u32,
inner: usize, // Can either be a void *binder or a u32 handle
cookie: usize
}
impl FlatBinderObject {
fn from_parcel(parcel: &mut ParcelReader) -> FlatBinderObject {
FlatBinderObject {
ty: parcel.read_u32(),
flags: parcel.read_u32(),
inner: parcel.read_u64() as usize,
cookie: parcel.read_u64() as usize
}
}
}
// Returned by igbp_connect
#[repr(C)]
#[derive(Debug)]
struct QueueBufferOutput {
width: u32,
height: u32,
transform_hint: u32,
num_pending_buffers: u32
}
impl QueueBufferOutput {
fn from_parcel(parcel: &mut ParcelReader) -> QueueBufferOutput {
let width = parcel.read_u32();
let height = parcel.read_u32();
let transform_hint = parcel.read_u32();
let num_pending_buffers = parcel.read_u32();
QueueBufferOutput {
width, height, transform_hint, num_pending_buffers
}
}
}
#[repr(C)]
struct GraphicBuffer<'a> {
width: u32,
height: u32,
stride: u32,
format: u32,
usage: u32,
gpu_buffer: &'a GpuBuffer,
index: u32,
offset_gpu_buffer: u32,
}
impl<'a> GraphicBuffer<'a> {
fn write_to_parcel(&self, parcel: &mut OwnedParcel) {
}
}
#[repr(C)]
struct GpuBuffer {
nvmap_handle: u32,
size: usize,
alignment: u32,
kind: u8
}
// nvmap stuff
#[repr(C, align(4096))]
struct BufferMemory([u32; 0x3c0000/4]);
impl std::ops::Deref for BufferMemory {
type Target = [u32];
fn deref(&self) -> &[u32] {
&self.0[..]
}
}
impl std::ops::DerefMut for BufferMemory {
fn deref_mut(&mut self) -> &mut [u32] {
&mut self.0[..]
}
}
const NVMAP_IOC_CREATE: u32 = 0xC0080101;
const NVMAP_IOC_FROM_ID: u32 = 0xC0080103;
const NVMAP_IOC_ALLOC: u32 = 0xC0200104;
const NVMAP_IOC_FREE: u32 = 0xC0180105;
const NVMAP_IOC_PARAM: u32 = 0xC00C0109;
const NVMAP_IOC_GET_ID: u32 = 0xC008010E;
#[repr(C)]
#[derive(Debug)]
struct NvMapIocCreateArgs{
/// In, size of the buffer in bytes
size: u32,
/// Out, handle to use for other operations
handle: u32
}
#[repr(C)]
#[derive(Debug)]
struct NvMapIocAllocArgs {
handle: u32,
heapmask: u32,
/// (0=read-only, 1=read-write)
flags: u32,
align: u32,
kind: u8,
pad: [u8; 7],
addr: u64,
}
// vi stuff. I should reuse some code from rust-binder, instead of rolling my
// own again.
const REQUEST_BUFFER: u32 = 0x1;
const SET_BUFFER_COUNT: u32 = 0x2;
const DEQUEUE_BUFFER: u32 = 0x3;
const DETACH_BUFFER: u32 = 0x4;
const DETACH_NEXT_BUFFER: u32 = 0x5;
const ATTACH_BUFFER: u32 = 0x6;
const QUEUE_BUFFER: u32 = 0x7;
const CANCEL_BUFFER: u32 = 0x8;
const QUERY: u32 = 0x9;
const CONNECT: u32 = 0xA;
const DISCONNECT: u32 = 0xB;
// 0xC might be SET_SIDEBAND_STREAM but I'm not sure
const ALLOCATE_BUFFERS: u32 = 0xD;
const SET_PREALLOCATED_BUFFER: u32 = 0xE;
#[derive(Debug)]
struct OwnedParcel(Vec<u8>);
impl OwnedParcel {
pub fn new() -> OwnedParcel {
OwnedParcel(Vec::new())
}
pub fn write_u32(&mut self, data: u32) {
self.0.write_u32::<LE>(data).unwrap();
}
pub fn write_string16(&mut self, s: &str) {
let encoded_s_count = s.encode_utf16().count();
self.write_u32(encoded_s_count as u32);
for c in s.encode_utf16() {
self.0.write_u16::<LE>(c).unwrap();
}
// zero-terminated
self.0.write_u16::<LE>(0).unwrap();
// Padding
if (encoded_s_count + 1) % 2 == 1 {
self.0.write_u16::<LE>(0).unwrap();
}
}
pub fn write_interface_token(&mut self, token: &str) {
self.write_u32(0x100);
self.write_string16(token);
}
pub fn build(self) -> RawParcel {
let mut parcel = RawParcel {
data_size: self.0.len() as u32,
data_offset: 0x10,
objects_size: 0,
objects_offset: 0,
payload: [0; 0x200]
};
parcel.payload[..self.0.len()].copy_from_slice(&self.0[..]);
parcel
}
}
#[derive(Debug)]
struct ParcelReader(std::io::Cursor<Vec<u8>>);
impl ParcelReader {
pub fn read_u32(&mut self) -> u32 {
self.0.read_u32::<LE>().unwrap()
}
pub fn read_u64(&mut self) -> u64 {
self.0.read_u64::<LE>().unwrap()
}
}
#[repr(C)]
struct RawParcel {
data_size: u32,
data_offset: u32,
objects_size: u32,
objects_offset: u32,
payload: [u8; 0x200]
}
impl std::fmt::Debug for RawParcel {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
let mut s = f.debug_struct("RawParcel");
s.field("data_size", &self.data_size)
.field("data_offset", &self.data_offset)
.field("objects_size", &self.objects_size)
.field("objects_offset", &self.objects_offset)
.field("payload", &&self.payload[..])
.finish()
}
}
impl Default for RawParcel {
fn default() -> RawParcel {
RawParcel {
data_size: 0x200,
data_offset: 0x10,
objects_size: 0,
objects_offset: 0,
payload: [0; 0x200]
}
}
}
impl RawParcel {
pub fn as_bytes(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self as *const RawParcel as *const u8, 0x10 + self.data_size as usize) }
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self as *mut RawParcel as *mut u8, std::mem::size_of::<RawParcel>()) }
}
pub fn into_parcel_reader(self) -> ParcelReader {
ParcelReader(std::io::Cursor::new(Vec::from(&self.payload[(self.data_offset - 0x10) as usize..(self.data_offset - 0x10 + self.data_size) as usize])))
}
}
| {
return Err(MyError::IoctlError(ret));
} | conditional_block |
main.rs | //#![no_std]
#![feature(termination_trait)]
extern crate megaton_hammer;
extern crate byteorder;
extern crate image;
extern crate math;
use megaton_hammer::ipcdefs as megaton_ipc;
use byteorder::{ReadBytesExt, WriteBytesExt, LE, ByteOrder};
use megaton_hammer::kernel::{TransferMemory, KObject, FromKObject, Event, svc};
use megaton_ipc::{nn, nns};
use std::io::{Seek, SeekFrom, Cursor};
//use image::png::PNGDecoder;
use image::bmp::BMPDecoder;
use image::{Pixel, ImageDecoder};
// TODO: This kind of sucks. And is only a problem because my IPC bindings don't
// have a concept of strings yet. We need to fix this.
pub fn u8_slice_to_i8_slice(slice: &[u8]) -> &[i8] {
unsafe { &*(slice as *const _ as *const [i8]) }
}
#[derive(Debug)]
enum MyError {
MegatonError(megaton_hammer::error::Error),
ImageError(image::ImageError),
IoctlError(u32),
ParcelError(u32)
}
impl From<image::ImageError> for MyError {
fn from(err: image::ImageError) -> MyError {
MyError::ImageError(err)
}
}
impl From<megaton_hammer::error::Error> for MyError {
fn from(err: megaton_hammer::error::Error) -> MyError {
MyError::MegatonError(err)
}
}
fn main() -> std::result::Result<(), MyError> {
// Let's get ferris to show up on my switch.
println!("Initialize NV");
let nvdrv = nns::nvdrv::INvDrvServices::new_nvdrv_a(|cb| {
println!("Create transfer memory");
let transfer_mem = TransferMemory::new(0x30000)?;
// TODO: Find a better way.
let temporary_process = unsafe { KObject::new(megaton_hammer::kernel::svc::CURRENT_PROCESS) };
let ret = cb(0x30000, &temporary_process, transfer_mem.as_ref());
unsafe { std::mem::forget(temporary_process); }
ret
})?;
println!("Open /dev/nvhost-as-gpu");
let (nvasgpu, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvhost-as-gpu"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Open /dev/nvmap");
let (nvmap, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvmap"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Initialize vi");
let vi_m = nn::visrv::sf::IManagerRootService::new()?;
println!("get_display_service");
let disp_svc = vi_m.get_display_service(1)?;
println!("get_relay_service");
let relay_svc = disp_svc.get_relay_service()?;
println!("get_system_display_service");
let system_disp_svc = disp_svc.get_system_display_service()?;
println!("get_manager_display_service");
let manager_disp_svc = disp_svc.get_manager_display_service()?;
println!("Open display");
let display_id = {
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
disp_svc.open_display(display)?
};
println!("Open a layer");
let layer_id = manager_disp_svc.create_managed_layer(1, display_id, 0)?;
let binder_id = {
let mut parcel = RawParcel::default();
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
let _window_size = disp_svc.open_layer(display, layer_id, 0, parcel.as_bytes_mut())?;
let mut reader = parcel.into_parcel_reader();
let fbo = FlatBinderObject::from_parcel(&mut reader);
let binder = fbo.inner as i32;
relay_svc.adjust_refcount(binder, 1, 0)?;
relay_svc.adjust_refcount(binder, 1, 1)?;
binder
};
// Connect to the IGBP. Take a look at the following link for reference.
// https://android.googlesource.com/platform/frameworks/native/+/e2786ea5aec3a12d948feb85ffbb535fc89c0fe6/libs/gui/IGraphicBufferProducer.cpp#297
println!("Connect to the IGBP");
let queue_buffer_output = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(0); // IProducerListener is null because we don't support it in MegatonHammer (nor in libt) yet.
parcel.write_u32(2); // API
parcel.write_u32(0); // ProducerControlledByApp.
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, CONNECT, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut reader = parcel_out.into_parcel_reader();
let qbo = QueueBufferOutput::from_parcel(&mut reader);
if reader.read_u32() != 0 {
println!("Failed to connect to igbp");
return Ok(());
}
qbo
};
println!("Allocate framebuffers");
let mut mem : Vec<BufferMemory> = Vec::with_capacity(3);
unsafe { mem.set_len(3); }
// Disables caching when talking to the gpu.
unsafe { svc::set_memory_attribute(mem.as_mut_ptr() as _, mem.len() * std::mem::size_of::<BufferMemory>(), 0x8, 0x8).expect("Failed to set memory attribute"); }
let gpu_buffer = {
let mut create = NvMapIocCreateArgs {
size: (mem.len() * std::mem::size_of::<BufferMemory>()) as u32,
handle: 0
};
println!("NVMAP_IOC_CREATE {:?} ({:?})", create, unsafe { std::mem::transmute::<&NvMapIocCreateArgs, &[u8; std::mem::size_of::<NvMapIocCreateArgs>()]>(&create) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_CREATE,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&create as *const NvMapIocCreateArgs as *const u8, std::mem::size_of::<NvMapIocCreateArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut create as *mut NvMapIocCreateArgs as *mut u8, std::mem::size_of::<NvMapIocCreateArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
GpuBuffer {
nvmap_handle: create.handle,
size: mem.len() * std::mem::size_of::<BufferMemory>(),
alignment: 0x1000,
kind: 0
}
};
let buffers = {
let mut alloc = NvMapIocAllocArgs {
handle: gpu_buffer.nvmap_handle,
heapmask: 0,
flags: 0,
align: gpu_buffer.alignment,
kind: gpu_buffer.kind,
pad: [0; 7],
addr: mem.as_mut_ptr() as u64
};
println!("NVMAP_IOC_ALLOC {:?} ({:?})", alloc, unsafe { std::mem::transmute::<&NvMapIocAllocArgs, &[u8; std::mem::size_of::<NvMapIocAllocArgs>()]>(&alloc) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_ALLOC,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&alloc as *const NvMapIocAllocArgs as *const u8, std::mem::size_of::<NvMapIocAllocArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut alloc as *mut NvMapIocAllocArgs as *mut u8, std::mem::size_of::<NvMapIocAllocArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
let mut buffers = Vec::with_capacity(3);
for i in 0..3 {
buffers.push(GraphicBuffer {
width: queue_buffer_output.width,
height: queue_buffer_output.height,
stride: queue_buffer_output.width,
format: 1, // RGBA_8888
usage: 0xb00, // TODO: Wat?
gpu_buffer: &gpu_buffer,
index: i,
offset_gpu_buffer: 0x3c0000 * i
});
}
buffers
};
println!("Tell IGBP about the buffers");
for (i, buf) in buffers.iter().enumerate() {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(i as u32); // slot
parcel.write_u32(1); // Unknown
parcel.write_u32(0x16c); // Flattened GraphicsBuffer length
parcel.write_u32(0); // Unknown
buf.write_to_parcel(&mut parcel);
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, SET_PREALLOCATED_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
}
println!("Set scaling mode");
disp_svc.set_layer_scaling_mode(2, layer_id)?;
println!("Add layer to stack");
for stack in [0x0, 0x2, 0x4, 0x5, 0xA].iter() {
manager_disp_svc.add_to_layer_stack(*stack, layer_id)?;
}
println!("Set Z layer");
system_disp_svc.set_layer_z(layer_id, 2)?;
println!("Loading image from FERRIS");
let image = BMPDecoder::new(Cursor::new(&FERRIS[..]));
println!("Getting frame");
let frame = image.into_frames()?.next().unwrap().into_buffer();
//println!("Resizing FERRIS");
//let frame = image::imageops::resize(&image.into_frames()?.next().unwrap().into_buffer(), 1280, 760, image::FilterType::Lanczos3);
let vevent = unsafe { Event::from_kobject(disp_svc.get_display_vsync_event(display_id)?) };
for _ in 0..60 {
println!("Dequeue buffer");
let slot = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(1); // Pixel format
parcel.write_u32(1280); // width
parcel.write_u32(720); // height
parcel.write_u32(0); // get_frame_timestamp
parcel.write_u32(0xb00); // usage
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, DEQUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
let mut parcel_out = parcel_out.into_parcel_reader();
let slot = parcel_out.read_u32();
// Read fence
parcel_out.0.seek(SeekFrom::Current(44));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
slot
};
// Request buffer if it hasn't been requested already.
println!("Request buffer {}", slot);
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, REQUEST_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
let non_null = parcel_out.read_u32() != 0;
if non_null {
let len = parcel_out.read_u32();
if len != 0x16c {
println!("Invalid length: {}", len);
return Ok(())
}
let unk = parcel_out.read_u32();
// TODO: Get graphicbuffer.
parcel_out.0.seek(SeekFrom::Current(0x16c));
}
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
// Blit
println!("Blit");
{
fn pdep(mask: u32, mut value: u32) -> u32 {
let mut out = 0;
for shift in 0..32 {
let bit = 1 << shift;
if mask & bit != 0 {
if value & 1 != 0 {
out |= bit
}
value >>= 1;
}
}
out
}
fn swizzle_x(v: u32) -> u32 { pdep(!0x7B4, v) }
fn swizzle_y(v: u32) -> u32 { pdep(0x7B4, v) }
let x0 = 0;
let y0 = 0;
let mut offs_x0 = swizzle_x(x0);
let mut offs_y = swizzle_y(y0);
let x_mask = swizzle_x(!0);
let y_mask = swizzle_y(!0);
let incr_y = swizzle_x(128 * 10);
let tile_height = 128;
offs_x0 += incr_y * (y0 / tile_height);
// TODO: Add clipping.
for y in 0..frame.height() {
let mut offs_x = offs_x0;
for x in 0..frame.width() {
let pixel = frame.get_pixel(x, y);
mem[slot as usize][offs_y as usize + offs_x as usize] = LE::read_u32(pixel.channels());
offs_x = offs_x.wrapping_sub(x_mask) & x_mask;
}
offs_y = offs_y.wrapping_sub(y_mask) & y_mask;
if offs_y == 0 {
offs_x0 += incr_y; // wrap into next tile row
}
}
}
// Enqueue buffer
println!("Enqueue buffer");
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
parcel.write_u32(0x54); parcel.write_u32(0); // unknown, but always those values
parcel.write_u32(0x588bbba9); parcel.write_u32(0); // Timestamp, u64
parcel.write_u32(1); // unknown, but always those values
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // sometimes zero
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // Also seen 2
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(1); // fence?
parcel.write_u32(1);
parcel.write_u32(0xa3);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, QUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
println!("{:?}", QueueBufferOutput::from_parcel(&mut parcel_out));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
vevent.wait()?;
vevent.reset()?;
}
Ok(())
}
//static FERRIS : &'static [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
//static FERRIS: &'static [u8; 33061] = include_bytes!("../img/ferris.png");
static FERRIS: &'static [u8; 153718] = include_bytes!("../img/ferris.bmp");
// Graphic buffer stuff
//struct IGraphicBufferProducer(Arc<IHOSBinderDriver>, u32);
//
//impl IGraphicBufferProducer {
// pub fn dequeue_buffer(&self) {
//
// }
//}
//struct Display(Arc<IManagerDisplayService>, u64);
//
//impl Drop for Display {
// fn drop(&mut self) {
// self.0.close_display(self.1);
// }
//}
// TODO: Layer trait?
//struct ManagedLayer(Arc<IManagerDisplayService>, u64);
//
//impl Drop for ManagedLayer {
// fn drop(&mut self) {
// self.0.destroy_managed_layer(self.1);
// }
//}
/// Binder object in a parcel
#[repr(C)]
#[derive(Debug)]
struct FlatBinderObject {
ty: u32,
flags: u32,
inner: usize, // Can either be a void *binder or a u32 handle
cookie: usize
}
impl FlatBinderObject {
fn from_parcel(parcel: &mut ParcelReader) -> FlatBinderObject {
FlatBinderObject {
ty: parcel.read_u32(),
flags: parcel.read_u32(),
inner: parcel.read_u64() as usize,
cookie: parcel.read_u64() as usize
}
}
}
// Returned by igbp_connect
#[repr(C)]
#[derive(Debug)]
struct QueueBufferOutput {
width: u32,
height: u32,
transform_hint: u32,
num_pending_buffers: u32
}
impl QueueBufferOutput {
fn from_parcel(parcel: &mut ParcelReader) -> QueueBufferOutput {
let width = parcel.read_u32();
let height = parcel.read_u32();
let transform_hint = parcel.read_u32();
let num_pending_buffers = parcel.read_u32();
QueueBufferOutput {
width, height, transform_hint, num_pending_buffers
}
}
}
#[repr(C)]
struct GraphicBuffer<'a> {
width: u32,
height: u32,
stride: u32,
format: u32,
usage: u32,
gpu_buffer: &'a GpuBuffer,
index: u32,
offset_gpu_buffer: u32,
}
impl<'a> GraphicBuffer<'a> {
fn write_to_parcel(&self, parcel: &mut OwnedParcel) {
}
}
#[repr(C)]
struct GpuBuffer {
nvmap_handle: u32,
size: usize,
alignment: u32,
kind: u8
}
// nvmap stuff
#[repr(C, align(4096))]
struct BufferMemory([u32; 0x3c0000/4]);
impl std::ops::Deref for BufferMemory {
type Target = [u32];
fn | (&self) -> &[u32] {
&self.0[..]
}
}
impl std::ops::DerefMut for BufferMemory {
fn deref_mut(&mut self) -> &mut [u32] {
&mut self.0[..]
}
}
const NVMAP_IOC_CREATE: u32 = 0xC0080101;
const NVMAP_IOC_FROM_ID: u32 = 0xC0080103;
const NVMAP_IOC_ALLOC: u32 = 0xC0200104;
const NVMAP_IOC_FREE: u32 = 0xC0180105;
const NVMAP_IOC_PARAM: u32 = 0xC00C0109;
const NVMAP_IOC_GET_ID: u32 = 0xC008010E;
#[repr(C)]
#[derive(Debug)]
struct NvMapIocCreateArgs{
/// In, size of the buffer in bytes
size: u32,
/// Out, handle to use for other operations
handle: u32
}
#[repr(C)]
#[derive(Debug)]
struct NvMapIocAllocArgs {
handle: u32,
heapmask: u32,
/// (0=read-only, 1=read-write)
flags: u32,
align: u32,
kind: u8,
pad: [u8; 7],
addr: u64,
}
// vi stuff. I should reuse some code from rust-binder, instead of rolling my
// own again.
const REQUEST_BUFFER: u32 = 0x1;
const SET_BUFFER_COUNT: u32 = 0x2;
const DEQUEUE_BUFFER: u32 = 0x3;
const DETACH_BUFFER: u32 = 0x4;
const DETACH_NEXT_BUFFER: u32 = 0x5;
const ATTACH_BUFFER: u32 = 0x6;
const QUEUE_BUFFER: u32 = 0x7;
const CANCEL_BUFFER: u32 = 0x8;
const QUERY: u32 = 0x9;
const CONNECT: u32 = 0xA;
const DISCONNECT: u32 = 0xB;
// 0xC might be SET_SIDEBAND_STREAM but I'm not sure
const ALLOCATE_BUFFERS: u32 = 0xD;
const SET_PREALLOCATED_BUFFER: u32 = 0xE;
#[derive(Debug)]
struct OwnedParcel(Vec<u8>);
impl OwnedParcel {
pub fn new() -> OwnedParcel {
OwnedParcel(Vec::new())
}
pub fn write_u32(&mut self, data: u32) {
self.0.write_u32::<LE>(data).unwrap();
}
pub fn write_string16(&mut self, s: &str) {
let encoded_s_count = s.encode_utf16().count();
self.write_u32(encoded_s_count as u32);
for c in s.encode_utf16() {
self.0.write_u16::<LE>(c).unwrap();
}
// zero-terminated
self.0.write_u16::<LE>(0).unwrap();
// Padding
if (encoded_s_count + 1) % 2 == 1 {
self.0.write_u16::<LE>(0).unwrap();
}
}
pub fn write_interface_token(&mut self, token: &str) {
self.write_u32(0x100);
self.write_string16(token);
}
pub fn build(self) -> RawParcel {
let mut parcel = RawParcel {
data_size: self.0.len() as u32,
data_offset: 0x10,
objects_size: 0,
objects_offset: 0,
payload: [0; 0x200]
};
parcel.payload[..self.0.len()].copy_from_slice(&self.0[..]);
parcel
}
}
#[derive(Debug)]
struct ParcelReader(std::io::Cursor<Vec<u8>>);
impl ParcelReader {
pub fn read_u32(&mut self) -> u32 {
self.0.read_u32::<LE>().unwrap()
}
pub fn read_u64(&mut self) -> u64 {
self.0.read_u64::<LE>().unwrap()
}
}
#[repr(C)]
struct RawParcel {
data_size: u32,
data_offset: u32,
objects_size: u32,
objects_offset: u32,
payload: [u8; 0x200]
}
impl std::fmt::Debug for RawParcel {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
let mut s = f.debug_struct("RawParcel");
s.field("data_size", &self.data_size)
.field("data_offset", &self.data_offset)
.field("objects_size", &self.objects_size)
.field("objects_offset", &self.objects_offset)
.field("payload", &&self.payload[..])
.finish()
}
}
impl Default for RawParcel {
fn default() -> RawParcel {
RawParcel {
data_size: 0x200,
data_offset: 0x10,
objects_size: 0,
objects_offset: 0,
payload: [0; 0x200]
}
}
}
impl RawParcel {
pub fn as_bytes(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self as *const RawParcel as *const u8, 0x10 + self.data_size as usize) }
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self as *mut RawParcel as *mut u8, std::mem::size_of::<RawParcel>()) }
}
pub fn into_parcel_reader(self) -> ParcelReader {
ParcelReader(std::io::Cursor::new(Vec::from(&self.payload[(self.data_offset - 0x10) as usize..(self.data_offset - 0x10 + self.data_size) as usize])))
}
}
| deref | identifier_name |
main.rs | //#![no_std]
#![feature(termination_trait)]
extern crate megaton_hammer;
extern crate byteorder;
extern crate image;
extern crate math;
use megaton_hammer::ipcdefs as megaton_ipc;
use byteorder::{ReadBytesExt, WriteBytesExt, LE, ByteOrder};
use megaton_hammer::kernel::{TransferMemory, KObject, FromKObject, Event, svc};
use megaton_ipc::{nn, nns};
use std::io::{Seek, SeekFrom, Cursor};
//use image::png::PNGDecoder;
use image::bmp::BMPDecoder;
use image::{Pixel, ImageDecoder};
// TODO: This kind of sucks. And is only a problem because my IPC bindings don't
// have a concept of strings yet. We need to fix this.
pub fn u8_slice_to_i8_slice(slice: &[u8]) -> &[i8] {
unsafe { &*(slice as *const _ as *const [i8]) }
}
#[derive(Debug)]
enum MyError {
MegatonError(megaton_hammer::error::Error),
ImageError(image::ImageError),
IoctlError(u32),
ParcelError(u32)
}
impl From<image::ImageError> for MyError {
fn from(err: image::ImageError) -> MyError {
MyError::ImageError(err)
}
}
impl From<megaton_hammer::error::Error> for MyError {
fn from(err: megaton_hammer::error::Error) -> MyError {
MyError::MegatonError(err)
}
}
fn main() -> std::result::Result<(), MyError> {
// Let's get ferris to show up on my switch.
println!("Initialize NV");
let nvdrv = nns::nvdrv::INvDrvServices::new_nvdrv_a(|cb| {
println!("Create transfer memory");
let transfer_mem = TransferMemory::new(0x30000)?;
// TODO: Find a better way.
let temporary_process = unsafe { KObject::new(megaton_hammer::kernel::svc::CURRENT_PROCESS) };
let ret = cb(0x30000, &temporary_process, transfer_mem.as_ref());
unsafe { std::mem::forget(temporary_process); }
ret
})?;
println!("Open /dev/nvhost-as-gpu");
let (nvasgpu, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvhost-as-gpu"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Open /dev/nvmap");
let (nvmap, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvmap"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Initialize vi");
let vi_m = nn::visrv::sf::IManagerRootService::new()?;
println!("get_display_service");
let disp_svc = vi_m.get_display_service(1)?;
println!("get_relay_service");
let relay_svc = disp_svc.get_relay_service()?;
println!("get_system_display_service");
let system_disp_svc = disp_svc.get_system_display_service()?;
println!("get_manager_display_service");
let manager_disp_svc = disp_svc.get_manager_display_service()?;
println!("Open display");
let display_id = {
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
disp_svc.open_display(display)?
};
println!("Open a layer");
let layer_id = manager_disp_svc.create_managed_layer(1, display_id, 0)?;
let binder_id = {
let mut parcel = RawParcel::default();
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
let _window_size = disp_svc.open_layer(display, layer_id, 0, parcel.as_bytes_mut())?;
let mut reader = parcel.into_parcel_reader();
let fbo = FlatBinderObject::from_parcel(&mut reader);
let binder = fbo.inner as i32;
relay_svc.adjust_refcount(binder, 1, 0)?;
relay_svc.adjust_refcount(binder, 1, 1)?;
binder
};
// Connect to the IGBP. Take a look at the following link for reference.
// https://android.googlesource.com/platform/frameworks/native/+/e2786ea5aec3a12d948feb85ffbb535fc89c0fe6/libs/gui/IGraphicBufferProducer.cpp#297
println!("Connect to the IGBP");
let queue_buffer_output = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(0); // IProducerListener is null because we don't support it in MegatonHammer (nor in libt) yet.
parcel.write_u32(2); // API
parcel.write_u32(0); // ProducerControlledByApp.
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, CONNECT, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut reader = parcel_out.into_parcel_reader();
let qbo = QueueBufferOutput::from_parcel(&mut reader);
if reader.read_u32() != 0 {
println!("Failed to connect to igbp");
return Ok(());
}
qbo
};
println!("Allocate framebuffers");
let mut mem : Vec<BufferMemory> = Vec::with_capacity(3);
unsafe { mem.set_len(3); }
// Disables caching when talking to the gpu.
unsafe { svc::set_memory_attribute(mem.as_mut_ptr() as _, mem.len() * std::mem::size_of::<BufferMemory>(), 0x8, 0x8).expect("Failed to set memory attribute"); }
let gpu_buffer = {
let mut create = NvMapIocCreateArgs {
size: (mem.len() * std::mem::size_of::<BufferMemory>()) as u32,
handle: 0
};
println!("NVMAP_IOC_CREATE {:?} ({:?})", create, unsafe { std::mem::transmute::<&NvMapIocCreateArgs, &[u8; std::mem::size_of::<NvMapIocCreateArgs>()]>(&create) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_CREATE,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&create as *const NvMapIocCreateArgs as *const u8, std::mem::size_of::<NvMapIocCreateArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut create as *mut NvMapIocCreateArgs as *mut u8, std::mem::size_of::<NvMapIocCreateArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
GpuBuffer {
nvmap_handle: create.handle,
size: mem.len() * std::mem::size_of::<BufferMemory>(),
alignment: 0x1000,
kind: 0
}
};
let buffers = {
let mut alloc = NvMapIocAllocArgs {
handle: gpu_buffer.nvmap_handle,
heapmask: 0,
flags: 0,
align: gpu_buffer.alignment,
kind: gpu_buffer.kind,
pad: [0; 7],
addr: mem.as_mut_ptr() as u64
};
println!("NVMAP_IOC_ALLOC {:?} ({:?})", alloc, unsafe { std::mem::transmute::<&NvMapIocAllocArgs, &[u8; std::mem::size_of::<NvMapIocAllocArgs>()]>(&alloc) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_ALLOC,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&alloc as *const NvMapIocAllocArgs as *const u8, std::mem::size_of::<NvMapIocAllocArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut alloc as *mut NvMapIocAllocArgs as *mut u8, std::mem::size_of::<NvMapIocAllocArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
let mut buffers = Vec::with_capacity(3);
for i in 0..3 {
buffers.push(GraphicBuffer {
width: queue_buffer_output.width,
height: queue_buffer_output.height,
stride: queue_buffer_output.width,
format: 1, // RGBA_8888
usage: 0xb00, // TODO: Wat?
gpu_buffer: &gpu_buffer,
index: i,
offset_gpu_buffer: 0x3c0000 * i
});
}
buffers
};
println!("Tell IGBP about the buffers");
for (i, buf) in buffers.iter().enumerate() {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(i as u32); // slot
parcel.write_u32(1); // Unknown
parcel.write_u32(0x16c); // Flattened GraphicsBuffer length
parcel.write_u32(0); // Unknown
buf.write_to_parcel(&mut parcel);
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, SET_PREALLOCATED_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
}
println!("Set scaling mode");
disp_svc.set_layer_scaling_mode(2, layer_id)?;
println!("Add layer to stack");
for stack in [0x0, 0x2, 0x4, 0x5, 0xA].iter() {
manager_disp_svc.add_to_layer_stack(*stack, layer_id)?;
}
println!("Set Z layer");
system_disp_svc.set_layer_z(layer_id, 2)?;
println!("Loading image from FERRIS");
let image = BMPDecoder::new(Cursor::new(&FERRIS[..]));
println!("Getting frame");
let frame = image.into_frames()?.next().unwrap().into_buffer();
//println!("Resizing FERRIS");
//let frame = image::imageops::resize(&image.into_frames()?.next().unwrap().into_buffer(), 1280, 760, image::FilterType::Lanczos3);
let vevent = unsafe { Event::from_kobject(disp_svc.get_display_vsync_event(display_id)?) };
for _ in 0..60 {
println!("Dequeue buffer");
let slot = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(1); // Pixel format
parcel.write_u32(1280); // width
parcel.write_u32(720); // height
parcel.write_u32(0); // get_frame_timestamp
parcel.write_u32(0xb00); // usage
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, DEQUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
let mut parcel_out = parcel_out.into_parcel_reader();
let slot = parcel_out.read_u32();
// Read fence
parcel_out.0.seek(SeekFrom::Current(44));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
slot
};
// Request buffer if it hasn't been requested already.
println!("Request buffer {}", slot);
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, REQUEST_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
let non_null = parcel_out.read_u32() != 0;
if non_null {
let len = parcel_out.read_u32();
if len != 0x16c {
println!("Invalid length: {}", len);
return Ok(())
}
let unk = parcel_out.read_u32();
// TODO: Get graphicbuffer.
parcel_out.0.seek(SeekFrom::Current(0x16c));
}
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
// Blit
println!("Blit");
{
fn pdep(mask: u32, mut value: u32) -> u32 {
let mut out = 0;
for shift in 0..32 {
let bit = 1 << shift;
if mask & bit != 0 {
if value & 1 != 0 {
out |= bit
}
value >>= 1;
}
}
out
}
fn swizzle_x(v: u32) -> u32 { pdep(!0x7B4, v) }
fn swizzle_y(v: u32) -> u32 { pdep(0x7B4, v) }
let x0 = 0;
let y0 = 0;
let mut offs_x0 = swizzle_x(x0);
let mut offs_y = swizzle_y(y0);
let x_mask = swizzle_x(!0);
let y_mask = swizzle_y(!0);
let incr_y = swizzle_x(128 * 10);
let tile_height = 128;
offs_x0 += incr_y * (y0 / tile_height);
// TODO: Add clipping.
for y in 0..frame.height() {
let mut offs_x = offs_x0;
for x in 0..frame.width() {
let pixel = frame.get_pixel(x, y);
mem[slot as usize][offs_y as usize + offs_x as usize] = LE::read_u32(pixel.channels());
offs_x = offs_x.wrapping_sub(x_mask) & x_mask;
}
offs_y = offs_y.wrapping_sub(y_mask) & y_mask;
if offs_y == 0 {
offs_x0 += incr_y; // wrap into next tile row
}
}
}
// Enqueue buffer
println!("Enqueue buffer");
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot | parcel.write_u32(0x54); parcel.write_u32(0); // unknown, but always those values
parcel.write_u32(0x588bbba9); parcel.write_u32(0); // Timestamp, u64
parcel.write_u32(1); // unknown, but always those values
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // sometimes zero
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // Also seen 2
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(1); // fence?
parcel.write_u32(1);
parcel.write_u32(0xa3);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, QUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
println!("{:?}", QueueBufferOutput::from_parcel(&mut parcel_out));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
vevent.wait()?;
vevent.reset()?;
}
Ok(())
}
//static FERRIS : &'static [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
//static FERRIS: &'static [u8; 33061] = include_bytes!("../img/ferris.png");
static FERRIS: &'static [u8; 153718] = include_bytes!("../img/ferris.bmp");
// Graphic buffer stuff
//struct IGraphicBufferProducer(Arc<IHOSBinderDriver>, u32);
//
//impl IGraphicBufferProducer {
// pub fn dequeue_buffer(&self) {
//
// }
//}
//struct Display(Arc<IManagerDisplayService>, u64);
//
//impl Drop for Display {
// fn drop(&mut self) {
// self.0.close_display(self.1);
// }
//}
// TODO: Layer trait?
//struct ManagedLayer(Arc<IManagerDisplayService>, u64);
//
//impl Drop for ManagedLayer {
// fn drop(&mut self) {
// self.0.destroy_managed_layer(self.1);
// }
//}
/// Binder object in a parcel
#[repr(C)]
#[derive(Debug)]
struct FlatBinderObject {
ty: u32,
flags: u32,
inner: usize, // Can either be a void *binder or a u32 handle
cookie: usize
}
impl FlatBinderObject {
fn from_parcel(parcel: &mut ParcelReader) -> FlatBinderObject {
FlatBinderObject {
ty: parcel.read_u32(),
flags: parcel.read_u32(),
inner: parcel.read_u64() as usize,
cookie: parcel.read_u64() as usize
}
}
}
// Returned by igbp_connect
#[repr(C)]
#[derive(Debug)]
struct QueueBufferOutput {
width: u32,
height: u32,
transform_hint: u32,
num_pending_buffers: u32
}
impl QueueBufferOutput {
fn from_parcel(parcel: &mut ParcelReader) -> QueueBufferOutput {
let width = parcel.read_u32();
let height = parcel.read_u32();
let transform_hint = parcel.read_u32();
let num_pending_buffers = parcel.read_u32();
QueueBufferOutput {
width, height, transform_hint, num_pending_buffers
}
}
}
#[repr(C)]
struct GraphicBuffer<'a> {
width: u32,
height: u32,
stride: u32,
format: u32,
usage: u32,
gpu_buffer: &'a GpuBuffer,
index: u32,
offset_gpu_buffer: u32,
}
impl<'a> GraphicBuffer<'a> {
fn write_to_parcel(&self, parcel: &mut OwnedParcel) {
}
}
#[repr(C)]
struct GpuBuffer {
nvmap_handle: u32,
size: usize,
alignment: u32,
kind: u8
}
// nvmap stuff
#[repr(C, align(4096))]
struct BufferMemory([u32; 0x3c0000/4]);
impl std::ops::Deref for BufferMemory {
type Target = [u32];
fn deref(&self) -> &[u32] {
&self.0[..]
}
}
impl std::ops::DerefMut for BufferMemory {
fn deref_mut(&mut self) -> &mut [u32] {
&mut self.0[..]
}
}
const NVMAP_IOC_CREATE: u32 = 0xC0080101;
const NVMAP_IOC_FROM_ID: u32 = 0xC0080103;
const NVMAP_IOC_ALLOC: u32 = 0xC0200104;
const NVMAP_IOC_FREE: u32 = 0xC0180105;
const NVMAP_IOC_PARAM: u32 = 0xC00C0109;
const NVMAP_IOC_GET_ID: u32 = 0xC008010E;
#[repr(C)]
#[derive(Debug)]
struct NvMapIocCreateArgs{
/// In, size of the buffer in bytes
size: u32,
/// Out, handle to use for other operations
handle: u32
}
#[repr(C)]
#[derive(Debug)]
struct NvMapIocAllocArgs {
handle: u32,
heapmask: u32,
/// (0=read-only, 1=read-write)
flags: u32,
align: u32,
kind: u8,
pad: [u8; 7],
addr: u64,
}
// vi stuff. I should reuse some code from rust-binder, instead of rolling my
// own again.
const REQUEST_BUFFER: u32 = 0x1;
const SET_BUFFER_COUNT: u32 = 0x2;
const DEQUEUE_BUFFER: u32 = 0x3;
const DETACH_BUFFER: u32 = 0x4;
const DETACH_NEXT_BUFFER: u32 = 0x5;
const ATTACH_BUFFER: u32 = 0x6;
const QUEUE_BUFFER: u32 = 0x7;
const CANCEL_BUFFER: u32 = 0x8;
const QUERY: u32 = 0x9;
const CONNECT: u32 = 0xA;
const DISCONNECT: u32 = 0xB;
// 0xC might be SET_SIDEBAND_STREAM but I'm not sure
const ALLOCATE_BUFFERS: u32 = 0xD;
const SET_PREALLOCATED_BUFFER: u32 = 0xE;
#[derive(Debug)]
struct OwnedParcel(Vec<u8>);
impl OwnedParcel {
pub fn new() -> OwnedParcel {
OwnedParcel(Vec::new())
}
pub fn write_u32(&mut self, data: u32) {
self.0.write_u32::<LE>(data).unwrap();
}
pub fn write_string16(&mut self, s: &str) {
let encoded_s_count = s.encode_utf16().count();
self.write_u32(encoded_s_count as u32);
for c in s.encode_utf16() {
self.0.write_u16::<LE>(c).unwrap();
}
// zero-terminated
self.0.write_u16::<LE>(0).unwrap();
// Padding
if (encoded_s_count + 1) % 2 == 1 {
self.0.write_u16::<LE>(0).unwrap();
}
}
pub fn write_interface_token(&mut self, token: &str) {
self.write_u32(0x100);
self.write_string16(token);
}
pub fn build(self) -> RawParcel {
let mut parcel = RawParcel {
data_size: self.0.len() as u32,
data_offset: 0x10,
objects_size: 0,
objects_offset: 0,
payload: [0; 0x200]
};
parcel.payload[..self.0.len()].copy_from_slice(&self.0[..]);
parcel
}
}
#[derive(Debug)]
struct ParcelReader(std::io::Cursor<Vec<u8>>);
impl ParcelReader {
pub fn read_u32(&mut self) -> u32 {
self.0.read_u32::<LE>().unwrap()
}
pub fn read_u64(&mut self) -> u64 {
self.0.read_u64::<LE>().unwrap()
}
}
#[repr(C)]
struct RawParcel {
data_size: u32,
data_offset: u32,
objects_size: u32,
objects_offset: u32,
payload: [u8; 0x200]
}
impl std::fmt::Debug for RawParcel {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
let mut s = f.debug_struct("RawParcel");
s.field("data_size", &self.data_size)
.field("data_offset", &self.data_offset)
.field("objects_size", &self.objects_size)
.field("objects_offset", &self.objects_offset)
.field("payload", &&self.payload[..])
.finish()
}
}
impl Default for RawParcel {
fn default() -> RawParcel {
RawParcel {
data_size: 0x200,
data_offset: 0x10,
objects_size: 0,
objects_offset: 0,
payload: [0; 0x200]
}
}
}
impl RawParcel {
pub fn as_bytes(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self as *const RawParcel as *const u8, 0x10 + self.data_size as usize) }
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self as *mut RawParcel as *mut u8, std::mem::size_of::<RawParcel>()) }
}
pub fn into_parcel_reader(self) -> ParcelReader {
ParcelReader(std::io::Cursor::new(Vec::from(&self.payload[(self.data_offset - 0x10) as usize..(self.data_offset - 0x10 + self.data_size) as usize])))
}
} | random_line_split | |
main.rs | //#![no_std]
#![feature(termination_trait)]
extern crate megaton_hammer;
extern crate byteorder;
extern crate image;
extern crate math;
use megaton_hammer::ipcdefs as megaton_ipc;
use byteorder::{ReadBytesExt, WriteBytesExt, LE, ByteOrder};
use megaton_hammer::kernel::{TransferMemory, KObject, FromKObject, Event, svc};
use megaton_ipc::{nn, nns};
use std::io::{Seek, SeekFrom, Cursor};
//use image::png::PNGDecoder;
use image::bmp::BMPDecoder;
use image::{Pixel, ImageDecoder};
// TODO: This kind of sucks. And is only a problem because my IPC bindings don't
// have a concept of strings yet. We need to fix this.
pub fn u8_slice_to_i8_slice(slice: &[u8]) -> &[i8] {
unsafe { &*(slice as *const _ as *const [i8]) }
}
#[derive(Debug)]
enum MyError {
MegatonError(megaton_hammer::error::Error),
ImageError(image::ImageError),
IoctlError(u32),
ParcelError(u32)
}
impl From<image::ImageError> for MyError {
fn from(err: image::ImageError) -> MyError |
}
impl From<megaton_hammer::error::Error> for MyError {
fn from(err: megaton_hammer::error::Error) -> MyError {
MyError::MegatonError(err)
}
}
fn main() -> std::result::Result<(), MyError> {
// Let's get ferris to show up on my switch.
println!("Initialize NV");
let nvdrv = nns::nvdrv::INvDrvServices::new_nvdrv_a(|cb| {
println!("Create transfer memory");
let transfer_mem = TransferMemory::new(0x30000)?;
// TODO: Find a better way.
let temporary_process = unsafe { KObject::new(megaton_hammer::kernel::svc::CURRENT_PROCESS) };
let ret = cb(0x30000, &temporary_process, transfer_mem.as_ref());
unsafe { std::mem::forget(temporary_process); }
ret
})?;
println!("Open /dev/nvhost-as-gpu");
let (nvasgpu, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvhost-as-gpu"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Open /dev/nvmap");
let (nvmap, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvmap"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Initialize vi");
let vi_m = nn::visrv::sf::IManagerRootService::new()?;
println!("get_display_service");
let disp_svc = vi_m.get_display_service(1)?;
println!("get_relay_service");
let relay_svc = disp_svc.get_relay_service()?;
println!("get_system_display_service");
let system_disp_svc = disp_svc.get_system_display_service()?;
println!("get_manager_display_service");
let manager_disp_svc = disp_svc.get_manager_display_service()?;
println!("Open display");
let display_id = {
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
disp_svc.open_display(display)?
};
println!("Open a layer");
let layer_id = manager_disp_svc.create_managed_layer(1, display_id, 0)?;
let binder_id = {
let mut parcel = RawParcel::default();
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
let _window_size = disp_svc.open_layer(display, layer_id, 0, parcel.as_bytes_mut())?;
let mut reader = parcel.into_parcel_reader();
let fbo = FlatBinderObject::from_parcel(&mut reader);
let binder = fbo.inner as i32;
relay_svc.adjust_refcount(binder, 1, 0)?;
relay_svc.adjust_refcount(binder, 1, 1)?;
binder
};
// Connect to the IGBP. Take a look at the following link for reference.
// https://android.googlesource.com/platform/frameworks/native/+/e2786ea5aec3a12d948feb85ffbb535fc89c0fe6/libs/gui/IGraphicBufferProducer.cpp#297
println!("Connect to the IGBP");
let queue_buffer_output = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(0); // IProducerListener is null because we don't support it in MegatonHammer (nor in libt) yet.
parcel.write_u32(2); // API
parcel.write_u32(0); // ProducerControlledByApp.
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, CONNECT, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut reader = parcel_out.into_parcel_reader();
let qbo = QueueBufferOutput::from_parcel(&mut reader);
if reader.read_u32() != 0 {
println!("Failed to connect to igbp");
return Ok(());
}
qbo
};
println!("Allocate framebuffers");
let mut mem : Vec<BufferMemory> = Vec::with_capacity(3);
unsafe { mem.set_len(3); }
// Disables caching when talking to the gpu.
unsafe { svc::set_memory_attribute(mem.as_mut_ptr() as _, mem.len() * std::mem::size_of::<BufferMemory>(), 0x8, 0x8).expect("Failed to set memory attribute"); }
let gpu_buffer = {
let mut create = NvMapIocCreateArgs {
size: (mem.len() * std::mem::size_of::<BufferMemory>()) as u32,
handle: 0
};
println!("NVMAP_IOC_CREATE {:?} ({:?})", create, unsafe { std::mem::transmute::<&NvMapIocCreateArgs, &[u8; std::mem::size_of::<NvMapIocCreateArgs>()]>(&create) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_CREATE,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&create as *const NvMapIocCreateArgs as *const u8, std::mem::size_of::<NvMapIocCreateArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut create as *mut NvMapIocCreateArgs as *mut u8, std::mem::size_of::<NvMapIocCreateArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
GpuBuffer {
nvmap_handle: create.handle,
size: mem.len() * std::mem::size_of::<BufferMemory>(),
alignment: 0x1000,
kind: 0
}
};
let buffers = {
let mut alloc = NvMapIocAllocArgs {
handle: gpu_buffer.nvmap_handle,
heapmask: 0,
flags: 0,
align: gpu_buffer.alignment,
kind: gpu_buffer.kind,
pad: [0; 7],
addr: mem.as_mut_ptr() as u64
};
println!("NVMAP_IOC_ALLOC {:?} ({:?})", alloc, unsafe { std::mem::transmute::<&NvMapIocAllocArgs, &[u8; std::mem::size_of::<NvMapIocAllocArgs>()]>(&alloc) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_ALLOC,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&alloc as *const NvMapIocAllocArgs as *const u8, std::mem::size_of::<NvMapIocAllocArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut alloc as *mut NvMapIocAllocArgs as *mut u8, std::mem::size_of::<NvMapIocAllocArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
let mut buffers = Vec::with_capacity(3);
for i in 0..3 {
buffers.push(GraphicBuffer {
width: queue_buffer_output.width,
height: queue_buffer_output.height,
stride: queue_buffer_output.width,
format: 1, // RGBA_8888
usage: 0xb00, // TODO: Wat?
gpu_buffer: &gpu_buffer,
index: i,
offset_gpu_buffer: 0x3c0000 * i
});
}
buffers
};
println!("Tell IGBP about the buffers");
for (i, buf) in buffers.iter().enumerate() {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(i as u32); // slot
parcel.write_u32(1); // Unknown
parcel.write_u32(0x16c); // Flattened GraphicsBuffer length
parcel.write_u32(0); // Unknown
buf.write_to_parcel(&mut parcel);
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, SET_PREALLOCATED_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
}
println!("Set scaling mode");
disp_svc.set_layer_scaling_mode(2, layer_id)?;
println!("Add layer to stack");
for stack in [0x0, 0x2, 0x4, 0x5, 0xA].iter() {
manager_disp_svc.add_to_layer_stack(*stack, layer_id)?;
}
println!("Set Z layer");
system_disp_svc.set_layer_z(layer_id, 2)?;
println!("Loading image from FERRIS");
let image = BMPDecoder::new(Cursor::new(&FERRIS[..]));
println!("Getting frame");
let frame = image.into_frames()?.next().unwrap().into_buffer();
//println!("Resizing FERRIS");
//let frame = image::imageops::resize(&image.into_frames()?.next().unwrap().into_buffer(), 1280, 760, image::FilterType::Lanczos3);
let vevent = unsafe { Event::from_kobject(disp_svc.get_display_vsync_event(display_id)?) };
for _ in 0..60 {
println!("Dequeue buffer");
let slot = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(1); // Pixel format
parcel.write_u32(1280); // width
parcel.write_u32(720); // height
parcel.write_u32(0); // get_frame_timestamp
parcel.write_u32(0xb00); // usage
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, DEQUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
let mut parcel_out = parcel_out.into_parcel_reader();
let slot = parcel_out.read_u32();
// Read fence
parcel_out.0.seek(SeekFrom::Current(44));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
slot
};
// Request buffer if it hasn't been requested already.
println!("Request buffer {}", slot);
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, REQUEST_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
let non_null = parcel_out.read_u32() != 0;
if non_null {
let len = parcel_out.read_u32();
if len != 0x16c {
println!("Invalid length: {}", len);
return Ok(())
}
let unk = parcel_out.read_u32();
// TODO: Get graphicbuffer.
parcel_out.0.seek(SeekFrom::Current(0x16c));
}
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
// Blit
println!("Blit");
{
fn pdep(mask: u32, mut value: u32) -> u32 {
let mut out = 0;
for shift in 0..32 {
let bit = 1 << shift;
if mask & bit != 0 {
if value & 1 != 0 {
out |= bit
}
value >>= 1;
}
}
out
}
fn swizzle_x(v: u32) -> u32 { pdep(!0x7B4, v) }
fn swizzle_y(v: u32) -> u32 { pdep(0x7B4, v) }
let x0 = 0;
let y0 = 0;
let mut offs_x0 = swizzle_x(x0);
let mut offs_y = swizzle_y(y0);
let x_mask = swizzle_x(!0);
let y_mask = swizzle_y(!0);
let incr_y = swizzle_x(128 * 10);
let tile_height = 128;
offs_x0 += incr_y * (y0 / tile_height);
// TODO: Add clipping.
for y in 0..frame.height() {
let mut offs_x = offs_x0;
for x in 0..frame.width() {
let pixel = frame.get_pixel(x, y);
mem[slot as usize][offs_y as usize + offs_x as usize] = LE::read_u32(pixel.channels());
offs_x = offs_x.wrapping_sub(x_mask) & x_mask;
}
offs_y = offs_y.wrapping_sub(y_mask) & y_mask;
if offs_y == 0 {
offs_x0 += incr_y; // wrap into next tile row
}
}
}
// Enqueue buffer
println!("Enqueue buffer");
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
parcel.write_u32(0x54); parcel.write_u32(0); // unknown, but always those values
parcel.write_u32(0x588bbba9); parcel.write_u32(0); // Timestamp, u64
parcel.write_u32(1); // unknown, but always those values
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // sometimes zero
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // Also seen 2
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(1); // fence?
parcel.write_u32(1);
parcel.write_u32(0xa3);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, QUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
println!("{:?}", QueueBufferOutput::from_parcel(&mut parcel_out));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
vevent.wait()?;
vevent.reset()?;
}
Ok(())
}
//static FERRIS : &'static [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
//static FERRIS: &'static [u8; 33061] = include_bytes!("../img/ferris.png");
static FERRIS: &'static [u8; 153718] = include_bytes!("../img/ferris.bmp");
// Graphic buffer stuff
//struct IGraphicBufferProducer(Arc<IHOSBinderDriver>, u32);
//
//impl IGraphicBufferProducer {
// pub fn dequeue_buffer(&self) {
//
// }
//}
//struct Display(Arc<IManagerDisplayService>, u64);
//
//impl Drop for Display {
// fn drop(&mut self) {
// self.0.close_display(self.1);
// }
//}
// TODO: Layer trait?
//struct ManagedLayer(Arc<IManagerDisplayService>, u64);
//
//impl Drop for ManagedLayer {
// fn drop(&mut self) {
// self.0.destroy_managed_layer(self.1);
// }
//}
/// Binder object in a parcel
#[repr(C)]
#[derive(Debug)]
struct FlatBinderObject {
ty: u32,
flags: u32,
inner: usize, // Can either be a void *binder or a u32 handle
cookie: usize
}
impl FlatBinderObject {
fn from_parcel(parcel: &mut ParcelReader) -> FlatBinderObject {
FlatBinderObject {
ty: parcel.read_u32(),
flags: parcel.read_u32(),
inner: parcel.read_u64() as usize,
cookie: parcel.read_u64() as usize
}
}
}
// Returned by igbp_connect
#[repr(C)]
#[derive(Debug)]
struct QueueBufferOutput {
width: u32,
height: u32,
transform_hint: u32,
num_pending_buffers: u32
}
impl QueueBufferOutput {
fn from_parcel(parcel: &mut ParcelReader) -> QueueBufferOutput {
let width = parcel.read_u32();
let height = parcel.read_u32();
let transform_hint = parcel.read_u32();
let num_pending_buffers = parcel.read_u32();
QueueBufferOutput {
width, height, transform_hint, num_pending_buffers
}
}
}
#[repr(C)]
struct GraphicBuffer<'a> {
width: u32,
height: u32,
stride: u32,
format: u32,
usage: u32,
gpu_buffer: &'a GpuBuffer,
index: u32,
offset_gpu_buffer: u32,
}
impl<'a> GraphicBuffer<'a> {
fn write_to_parcel(&self, parcel: &mut OwnedParcel) {
}
}
#[repr(C)]
struct GpuBuffer {
nvmap_handle: u32,
size: usize,
alignment: u32,
kind: u8
}
// nvmap stuff
#[repr(C, align(4096))]
struct BufferMemory([u32; 0x3c0000/4]);
impl std::ops::Deref for BufferMemory {
type Target = [u32];
fn deref(&self) -> &[u32] {
&self.0[..]
}
}
impl std::ops::DerefMut for BufferMemory {
fn deref_mut(&mut self) -> &mut [u32] {
&mut self.0[..]
}
}
const NVMAP_IOC_CREATE: u32 = 0xC0080101;
const NVMAP_IOC_FROM_ID: u32 = 0xC0080103;
const NVMAP_IOC_ALLOC: u32 = 0xC0200104;
const NVMAP_IOC_FREE: u32 = 0xC0180105;
const NVMAP_IOC_PARAM: u32 = 0xC00C0109;
const NVMAP_IOC_GET_ID: u32 = 0xC008010E;
#[repr(C)]
#[derive(Debug)]
struct NvMapIocCreateArgs{
/// In, size of the buffer in bytes
size: u32,
/// Out, handle to use for other operations
handle: u32
}
#[repr(C)]
#[derive(Debug)]
struct NvMapIocAllocArgs {
handle: u32,
heapmask: u32,
/// (0=read-only, 1=read-write)
flags: u32,
align: u32,
kind: u8,
pad: [u8; 7],
addr: u64,
}
// vi stuff. I should reuse some code from rust-binder, instead of rolling my
// own again.
const REQUEST_BUFFER: u32 = 0x1;
const SET_BUFFER_COUNT: u32 = 0x2;
const DEQUEUE_BUFFER: u32 = 0x3;
const DETACH_BUFFER: u32 = 0x4;
const DETACH_NEXT_BUFFER: u32 = 0x5;
const ATTACH_BUFFER: u32 = 0x6;
const QUEUE_BUFFER: u32 = 0x7;
const CANCEL_BUFFER: u32 = 0x8;
const QUERY: u32 = 0x9;
const CONNECT: u32 = 0xA;
const DISCONNECT: u32 = 0xB;
// 0xC might be SET_SIDEBAND_STREAM but I'm not sure
const ALLOCATE_BUFFERS: u32 = 0xD;
const SET_PREALLOCATED_BUFFER: u32 = 0xE;
#[derive(Debug)]
struct OwnedParcel(Vec<u8>);
impl OwnedParcel {
pub fn new() -> OwnedParcel {
OwnedParcel(Vec::new())
}
pub fn write_u32(&mut self, data: u32) {
self.0.write_u32::<LE>(data).unwrap();
}
pub fn write_string16(&mut self, s: &str) {
let encoded_s_count = s.encode_utf16().count();
self.write_u32(encoded_s_count as u32);
for c in s.encode_utf16() {
self.0.write_u16::<LE>(c).unwrap();
}
// zero-terminated
self.0.write_u16::<LE>(0).unwrap();
// Padding
if (encoded_s_count + 1) % 2 == 1 {
self.0.write_u16::<LE>(0).unwrap();
}
}
pub fn write_interface_token(&mut self, token: &str) {
self.write_u32(0x100);
self.write_string16(token);
}
pub fn build(self) -> RawParcel {
let mut parcel = RawParcel {
data_size: self.0.len() as u32,
data_offset: 0x10,
objects_size: 0,
objects_offset: 0,
payload: [0; 0x200]
};
parcel.payload[..self.0.len()].copy_from_slice(&self.0[..]);
parcel
}
}
#[derive(Debug)]
struct ParcelReader(std::io::Cursor<Vec<u8>>);
impl ParcelReader {
pub fn read_u32(&mut self) -> u32 {
self.0.read_u32::<LE>().unwrap()
}
pub fn read_u64(&mut self) -> u64 {
self.0.read_u64::<LE>().unwrap()
}
}
#[repr(C)]
struct RawParcel {
data_size: u32,
data_offset: u32,
objects_size: u32,
objects_offset: u32,
payload: [u8; 0x200]
}
impl std::fmt::Debug for RawParcel {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
let mut s = f.debug_struct("RawParcel");
s.field("data_size", &self.data_size)
.field("data_offset", &self.data_offset)
.field("objects_size", &self.objects_size)
.field("objects_offset", &self.objects_offset)
.field("payload", &&self.payload[..])
.finish()
}
}
impl Default for RawParcel {
fn default() -> RawParcel {
RawParcel {
data_size: 0x200,
data_offset: 0x10,
objects_size: 0,
objects_offset: 0,
payload: [0; 0x200]
}
}
}
impl RawParcel {
pub fn as_bytes(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self as *const RawParcel as *const u8, 0x10 + self.data_size as usize) }
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self as *mut RawParcel as *mut u8, std::mem::size_of::<RawParcel>()) }
}
pub fn into_parcel_reader(self) -> ParcelReader {
ParcelReader(std::io::Cursor::new(Vec::from(&self.payload[(self.data_offset - 0x10) as usize..(self.data_offset - 0x10 + self.data_size) as usize])))
}
}
| {
MyError::ImageError(err)
} | identifier_body |
multi.rs | // Copyright 2021 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
//! The multi-threaded worker, which is used when there are multiple worker
//! threads configured. This worker parses buffers to produce requests, sends
//! the requests to the storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser, | _storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
}
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
}
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn handle_new_sessions(&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
);
if let Ok(token) = self.poll.add_session(session) {
if pending > 0 {
// handle any pending data immediately
if self.handle_data(token).is_err() {
self.handle_error(token);
}
}
}
}
}
}
impl<Storage, Parser, Request, Response> EventLoop
for MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
fn handle_data(&mut self, token: Token) -> Result<(), std::io::Error> {
let _ = self.handle_session_read(token);
Ok(())
}
fn poll(&mut self) -> &mut Poll {
&mut self.poll
}
} | poll: Poll,
timeout: Duration, | random_line_split |
multi.rs | // Copyright 2021 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
//! The multi-threaded worker, which is used when there are multiple worker
//! threads configured. This worker parses buffers to produce requests, sends
//! the requests to the storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
_storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
}
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() |
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn handle_new_sessions(&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
);
if let Ok(token) = self.poll.add_session(session) {
if pending > 0 {
// handle any pending data immediately
if self.handle_data(token).is_err() {
self.handle_error(token);
}
}
}
}
}
}
impl<Storage, Parser, Request, Response> EventLoop
for MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
fn handle_data(&mut self, token: Token) -> Result<(), std::io::Error> {
let _ = self.handle_session_read(token);
Ok(())
}
fn poll(&mut self) -> &mut Poll {
&mut self.poll
}
}
| {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
} | conditional_block |
multi.rs | // Copyright 2021 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
//! The multi-threaded worker, which is used when there are multiple worker
//! threads configured. This worker parses buffers to produce requests, sends
//! the requests to the storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
_storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> |
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
}
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn handle_new_sessions(&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
);
if let Ok(token) = self.poll.add_session(session) {
if pending > 0 {
// handle any pending data immediately
if self.handle_data(token).is_err() {
self.handle_error(token);
}
}
}
}
}
}
impl<Storage, Parser, Request, Response> EventLoop
for MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
fn handle_data(&mut self, token: Token) -> Result<(), std::io::Error> {
let _ = self.handle_session_read(token);
Ok(())
}
fn poll(&mut self) -> &mut Poll {
&mut self.poll
}
}
| {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
} | identifier_body |
multi.rs | // Copyright 2021 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
//! The multi-threaded worker, which is used when there are multiple worker
//! threads configured. This worker parses buffers to produce requests, sends
//! the requests to the storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
_storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
}
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
}
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn | (&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
);
if let Ok(token) = self.poll.add_session(session) {
if pending > 0 {
// handle any pending data immediately
if self.handle_data(token).is_err() {
self.handle_error(token);
}
}
}
}
}
}
impl<Storage, Parser, Request, Response> EventLoop
for MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
fn handle_data(&mut self, token: Token) -> Result<(), std::io::Error> {
let _ = self.handle_session_read(token);
Ok(())
}
fn poll(&mut self) -> &mut Poll {
&mut self.poll
}
}
| handle_new_sessions | identifier_name |
scanner.go | package ipv4
import (
"code.google.com/p/go-uuid/uuid"
"fmt"
"github.com/thejerf/suture"
log "gopkg.in/inconshreveable/log15.v2"
logext "gopkg.in/inconshreveable/log15.v2/ext"
"net"
"strconv"
"sync"
"time"
)
const (
numIPv4Checkers = 100
)
var (
ignoredIPv4Interfaces = map[string]struct{}{
// DARWIN
"lo0": struct{}{}, // loopback
"tun0": struct{}{}, // tunnel (VPN)
// TODO: Windows ignored?
}
)
// An IScanner searches the IPv4 network for services matching given
// descriptions. Once services are found, they are locked and returned to the
// caller. It is up to the caller to unlock IPs (via Unlock()) if they are no
// longer in use.
type IScanner interface {
// Add a description to search for; return the ID used if a match is returned
AddDescription(desc ServiceDescription) string
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
Scan() map[string][]string
// By default, after an IP is found with Scan it is ignored in future searches.
// Unlock instructs the scanner to include responses for that IP address in future scans.
Unlock(ip net.IP)
}
// A ServiceFoundNotification indicates that a service was found at ip IP which
// matched all of MatchingDescriptionIDs.
type ServiceFoundNotification struct {
IP net.IP
MatchingDescriptionIDs []string
}
// Scanner implements IScanner. It searches the IPv4 network for services
// matching given descriptions. Once services are found, they are locked and
// returned to the caller. It is up to the caller to unlock IPs (via Unlock())
// if they are no longer in use.
type Scanner struct {
interfaces map[string]net.Interface
ilock sync.RWMutex // protects interfaces
descriptionsByID map[string]ServiceDescription
dlock sync.RWMutex // protects descriptionsByID
activeServicesByIP map[string]struct{}
slock *sync.RWMutex // protects activeServicesByIP
log log.Logger
}
// NewScanner properly instantiates a Scanner.
func NewScanner() *Scanner {
s := &Scanner{
interfaces: make(map[string]net.Interface),
ilock: sync.RWMutex{},
descriptionsByID: make(map[string]ServiceDescription),
dlock: sync.RWMutex{},
activeServicesByIP: make(map[string]struct{}),
slock: &sync.RWMutex{},
log: Log.New("obj", "ipv4.scanner", "id", logext.RandId(8)),
}
err := s.refreshInterfaces()
if err != nil {
panic("ipv4.NewScanner(): scanner could not refresh interfaces: " + err.Error())
}
return s
}
// AddDescription adds a ServiceDescription to the Scanner. On following scans,
// the Scanner will find services which match the description.
func (s *Scanner) AddDescription(desc ServiceDescription) string {
s.dlock.Lock()
defer s.dlock.Unlock()
id := uuid.New()
s.descriptionsByID[id] = desc
return id
}
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
func (s *Scanner) Scan() map[string][]string |
// Unlock unlocks the provided IP, such that it will no longer be ignored in
// future scans.
func (s *Scanner) Unlock(ip net.IP) {
s.slock.Lock()
defer s.slock.Unlock()
delete(s.activeServicesByIP, ip.String())
s.log.Debug("ipv4 scanner unlocked IP", "ip", ip.String())
}
func (s *Scanner) getMatchingDescriptions(ip net.IP) []string {
var matchedDrivers []string
matchedPorts := make(map[uint16]bool) // true if found open, false if not, nil key if untested
s.dlock.RLock()
defer s.dlock.RUnlock()
for id, desc := range s.descriptionsByID {
match := true
for _, port := range desc.OpenPorts {
// Try the cache first
if portIsOpen, ok := matchedPorts[port]; ok {
if !portIsOpen {
match = false
s.log.Debug("ipv4 scanner found a service description which is not a match for services available at the target IP, because a port which is expected to be open is closed (according to cache)", "ip", ip.String(), "description_id", id, "port", port, "desc_ports", desc.OpenPorts)
break
}
} else {
// No cached entry, try dialing
timeout := 1 * time.Second
url := ip.String() + ":" + strconv.Itoa(int(port))
conn, err := net.DialTimeout("tcp", url, timeout)
if err != nil {
match = false
matchedPorts[port] = false
s.log.Debug("ipv4 scanner found a service description which is not a match for the services available at the target IP, because a port which is expected to be open is closed (timed out trying)", "ip", ip.String(), "description_id", id, "port", port, "url", url, "timeout", timeout, "err", err, "desc_ports", desc.OpenPorts)
break
}
conn.Close()
}
}
if match {
matchedDrivers = append(matchedDrivers, id) // add
s.log.Debug("ipv4 scanner found a service description match", "ip", ip.String(), "service_desc", id)
}
}
return matchedDrivers
}
// refreshInterfaces searches the device for any new or removed network interfaces
func (s *Scanner) refreshInterfaces() error {
s.ilock.Lock() // Lock the interfaces for writing
defer s.ilock.Unlock()
ifaces, err := net.Interfaces()
if err != nil {
return fmt.Errorf("error while refreshing ipv4 interfaces: %v", err)
}
foundInterfaces := make(map[string]net.Interface)
newInterfaces := make(map[string]struct{})
for _, iface := range ifaces {
name := iface.Name
if _, ok := ignoredIPv4Interfaces[name]; !ok { // ignore certain interfaces
foundInterfaces[name] = iface
if _, ok := s.interfaces[name]; !ok {
newInterfaces[name] = struct{}{}
}
delete(s.interfaces, name) // unmark the interface - anything left once we're done has disappeared
}
}
if len(s.interfaces) > 0 {
names := make([]string, len(s.interfaces))
i := 0
for name := range s.interfaces {
names[i] = name
}
s.log.Warn("STUB: IPv4 interfaces have disappeared, but handling logic is unimplemented! Services on the missing interfaces may still be active", "deleted_interfaces", names)
}
s.interfaces = foundInterfaces
if len(newInterfaces) > 0 {
names := make([]string, len(newInterfaces))
i := 0
for name := range newInterfaces {
names[i] = name
i++
}
s.log.Debug("new IPv4 interfaces found", "interfaces", names)
}
return nil
}
// incrementIP increments an IPv4 address
func incrementIP(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 {
break
}
}
}
// An IContinuousScanner is a Scanner that scans continuously. Scan results are
// passed to the channel provided by FoundServices()
type IContinuousScanner interface {
suture.Service
IScanner
FoundServices() chan ServiceFoundNotification
}
// ContinuousScanner implements IContinuousScanner. It scans continuously,
// putting results into the channel provided by FoundServices().
type ContinuousScanner struct {
*Scanner
foundIPChan chan ServiceFoundNotification
period time.Duration
stop chan struct{}
}
// NewContinousScanner properly instantiates a ContinuousScanner. The new
// Scanner will wait between scans for a time defined by `period`.
func NewContinousScanner(period time.Duration) *ContinuousScanner {
return &ContinuousScanner{
Scanner: NewScanner(),
foundIPChan: make(chan ServiceFoundNotification),
period: period,
stop: make(chan struct{}),
}
}
// FoundServices returns a channel which will be populated with services found
// by the ContinuousScanner
func (s *ContinuousScanner) FoundServices() chan ServiceFoundNotification {
return s.foundIPChan
}
// Serve begins serving the ContinuousScanner.
func (s *ContinuousScanner) Serve() {
s.log.Debug("starting continuous ipv4 scanner", "period", s.period)
timer := time.NewTimer(time.Hour)
for {
// Perform a scan
s.log.Debug("doing ipv4 scan")
for ip, serviceIDs := range s.Scan() {
s.log.Debug("found ipv4 scan", "ip", net.ParseIP(ip), "descriptions", serviceIDs)
s.foundIPChan <- ServiceFoundNotification{
IP: net.ParseIP(ip),
MatchingDescriptionIDs: serviceIDs,
}
}
// Wait for s.period
s.log.Debug("waiting", "duration", s.period)
timer.Reset(s.period)
select {
case <-s.stop:
return
case <-timer.C:
}
}
}
// Stop stops the ContinousScanner
func (s *ContinuousScanner) Stop() {
s.stop <- struct{}{}
}
| {
if len(s.descriptionsByID) == 0 {
s.log.Debug("scanner has no descriptions, ignoring scan")
return map[string][]string{}
}
s.log.Debug("ip4v scanner beginning scan", "interfaces", s.interfaces, "target_descriptions", s.descriptionsByID)
foundServices := make(map[string][]string)
flock := sync.Mutex{} // protects foundServices
var wg sync.WaitGroup
var numChecked, numFound, numAlreadyInUse int
s.ilock.RLock()
defer s.ilock.RUnlock()
for name, intf := range s.interfaces {
addrs, err := intf.Addrs()
if err != nil {
panic("could not get addresses from " + name + ": " + err.Error())
}
s.log.Debug("ip4v scanner scanning interface", "interface", name, "num_addrs", len(addrs))
for _, a := range addrs {
switch v := a.(type) {
case *net.IPAddr:
s.log.Warn("ipv4 scanner got a *net.IPAddr, which isn't useful and maybe shoudln't happen?", "interface", intf.Name, "*net.IPAddr", v)
case *net.IPNet:
if v.IP.DefaultMask() != nil { // ignore IPs without default mask (IPv6?)
ip := v.IP
for ip := ip.Mask(v.Mask); v.Contains(ip); incrementIP(ip) {
wg.Add(1)
numChecked++
// To save space, try and only use 4 bytes
if x := ip.To4(); x != nil {
ip = x
}
dup := make(net.IP, len(ip)) // make a copy of the IP ([]byte)
copy(dup, ip)
go func() {
defer wg.Done()
s.slock.RLock()
_, ok := s.activeServicesByIP[dup.String()] // ignore IPs already in use
s.slock.RUnlock()
if ok { // ignore IPs already in use
s.log.Debug("scanner ignoring IP that is already in use", "ip", dup.String())
numAlreadyInUse++
} else {
ids := s.getMatchingDescriptions(dup)
if len(ids) > 0 { // At least one service matches
s.log.Debug("found possible matches for ipv4 service", "num_matches", len(ids), "matching_ids", ids)
numFound++
flock.Lock()
foundServices[dup.String()] = ids
flock.Unlock()
s.slock.Lock()
s.activeServicesByIP[dup.String()] = struct{}{} // mark IP as in use
s.slock.Unlock()
}
}
}()
}
}
default:
s.log.Warn("ipv4 scanner encountered address of unknown type", "type", fmt.Sprintf("%T", a))
}
}
}
s.log.Debug("ipv4 scanner waiting for waitgroup to finish")
wg.Wait()
s.log.Debug("ipv4 scanner done waiting (all waitgroup items completed)")
s.log.Info("ipv4 scan complete", "ips_checked", numChecked, "possibilities_found", numFound, "ips_already_in_use", numAlreadyInUse)
return foundServices
} | identifier_body |
scanner.go | package ipv4
import (
"code.google.com/p/go-uuid/uuid"
"fmt"
"github.com/thejerf/suture"
log "gopkg.in/inconshreveable/log15.v2"
logext "gopkg.in/inconshreveable/log15.v2/ext"
"net"
"strconv"
"sync"
"time"
)
const (
numIPv4Checkers = 100
)
var (
ignoredIPv4Interfaces = map[string]struct{}{
// DARWIN
"lo0": struct{}{}, // loopback
"tun0": struct{}{}, // tunnel (VPN)
// TODO: Windows ignored?
}
)
// An IScanner searches the IPv4 network for services matching given
// descriptions. Once services are found, they are locked and returned to the
// caller. It is up to the caller to unlock IPs (via Unlock()) if they are no
// longer in use.
type IScanner interface {
// Add a description to search for; return the ID used if a match is returned
AddDescription(desc ServiceDescription) string
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
Scan() map[string][]string
// By default, after an IP is found with Scan it is ignored in future searches.
// Unlock instructs the scanner to include responses for that IP address in future scans.
Unlock(ip net.IP)
}
// A ServiceFoundNotification indicates that a service was found at ip IP which
// matched all of MatchingDescriptionIDs.
type ServiceFoundNotification struct {
IP net.IP
MatchingDescriptionIDs []string
}
// Scanner implements IScanner. It searches the IPv4 network for services
// matching given descriptions. Once services are found, they are locked and
// returned to the caller. It is up to the caller to unlock IPs (via Unlock())
// if they are no longer in use.
type Scanner struct {
interfaces map[string]net.Interface
ilock sync.RWMutex // protects interfaces
descriptionsByID map[string]ServiceDescription
dlock sync.RWMutex // protects descriptionsByID
activeServicesByIP map[string]struct{}
slock *sync.RWMutex // protects activeServicesByIP
log log.Logger
}
// NewScanner properly instantiates a Scanner.
func NewScanner() *Scanner {
s := &Scanner{
interfaces: make(map[string]net.Interface),
ilock: sync.RWMutex{},
descriptionsByID: make(map[string]ServiceDescription),
dlock: sync.RWMutex{},
activeServicesByIP: make(map[string]struct{}),
slock: &sync.RWMutex{},
log: Log.New("obj", "ipv4.scanner", "id", logext.RandId(8)),
}
err := s.refreshInterfaces()
if err != nil {
panic("ipv4.NewScanner(): scanner could not refresh interfaces: " + err.Error())
}
return s
}
// AddDescription adds a ServiceDescription to the Scanner. On following scans,
// the Scanner will find services which match the description.
func (s *Scanner) AddDescription(desc ServiceDescription) string {
s.dlock.Lock()
defer s.dlock.Unlock()
id := uuid.New()
s.descriptionsByID[id] = desc
return id
}
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
func (s *Scanner) Scan() map[string][]string {
if len(s.descriptionsByID) == 0 {
s.log.Debug("scanner has no descriptions, ignoring scan")
return map[string][]string{}
}
s.log.Debug("ip4v scanner beginning scan", "interfaces", s.interfaces, "target_descriptions", s.descriptionsByID)
foundServices := make(map[string][]string)
flock := sync.Mutex{} // protects foundServices
var wg sync.WaitGroup
var numChecked, numFound, numAlreadyInUse int
s.ilock.RLock()
defer s.ilock.RUnlock()
for name, intf := range s.interfaces {
addrs, err := intf.Addrs()
if err != nil {
panic("could not get addresses from " + name + ": " + err.Error())
}
s.log.Debug("ip4v scanner scanning interface", "interface", name, "num_addrs", len(addrs))
for _, a := range addrs {
switch v := a.(type) {
case *net.IPAddr:
s.log.Warn("ipv4 scanner got a *net.IPAddr, which isn't useful and maybe shoudln't happen?", "interface", intf.Name, "*net.IPAddr", v)
case *net.IPNet:
if v.IP.DefaultMask() != nil { // ignore IPs without default mask (IPv6?)
ip := v.IP
for ip := ip.Mask(v.Mask); v.Contains(ip); incrementIP(ip) {
wg.Add(1)
numChecked++
// To save space, try and only use 4 bytes
if x := ip.To4(); x != nil {
ip = x
}
dup := make(net.IP, len(ip)) // make a copy of the IP ([]byte)
copy(dup, ip)
go func() {
defer wg.Done()
s.slock.RLock()
_, ok := s.activeServicesByIP[dup.String()] // ignore IPs already in use
s.slock.RUnlock()
if ok { // ignore IPs already in use
s.log.Debug("scanner ignoring IP that is already in use", "ip", dup.String())
numAlreadyInUse++
} else {
ids := s.getMatchingDescriptions(dup)
if len(ids) > 0 { // At least one service matches
s.log.Debug("found possible matches for ipv4 service", "num_matches", len(ids), "matching_ids", ids)
numFound++
flock.Lock()
foundServices[dup.String()] = ids
flock.Unlock()
s.slock.Lock()
s.activeServicesByIP[dup.String()] = struct{}{} // mark IP as in use
s.slock.Unlock()
}
}
}()
}
}
default:
s.log.Warn("ipv4 scanner encountered address of unknown type", "type", fmt.Sprintf("%T", a))
}
}
}
s.log.Debug("ipv4 scanner waiting for waitgroup to finish")
wg.Wait()
s.log.Debug("ipv4 scanner done waiting (all waitgroup items completed)")
s.log.Info("ipv4 scan complete", "ips_checked", numChecked, "possibilities_found", numFound, "ips_already_in_use", numAlreadyInUse)
return foundServices
}
// Unlock unlocks the provided IP, such that it will no longer be ignored in
// future scans.
func (s *Scanner) Unlock(ip net.IP) {
s.slock.Lock()
defer s.slock.Unlock()
delete(s.activeServicesByIP, ip.String())
s.log.Debug("ipv4 scanner unlocked IP", "ip", ip.String())
}
func (s *Scanner) getMatchingDescriptions(ip net.IP) []string {
var matchedDrivers []string
matchedPorts := make(map[uint16]bool) // true if found open, false if not, nil key if untested
s.dlock.RLock()
defer s.dlock.RUnlock()
for id, desc := range s.descriptionsByID {
match := true
for _, port := range desc.OpenPorts {
// Try the cache first
if portIsOpen, ok := matchedPorts[port]; ok {
if !portIsOpen {
match = false
s.log.Debug("ipv4 scanner found a service description which is not a match for services available at the target IP, because a port which is expected to be open is closed (according to cache)", "ip", ip.String(), "description_id", id, "port", port, "desc_ports", desc.OpenPorts)
break
}
} else {
// No cached entry, try dialing
timeout := 1 * time.Second
url := ip.String() + ":" + strconv.Itoa(int(port))
conn, err := net.DialTimeout("tcp", url, timeout)
if err != nil {
match = false
matchedPorts[port] = false
s.log.Debug("ipv4 scanner found a service description which is not a match for the services available at the target IP, because a port which is expected to be open is closed (timed out trying)", "ip", ip.String(), "description_id", id, "port", port, "url", url, "timeout", timeout, "err", err, "desc_ports", desc.OpenPorts)
break
}
conn.Close()
}
}
if match {
matchedDrivers = append(matchedDrivers, id) // add
s.log.Debug("ipv4 scanner found a service description match", "ip", ip.String(), "service_desc", id)
}
}
return matchedDrivers
}
// refreshInterfaces searches the device for any new or removed network interfaces
func (s *Scanner) refreshInterfaces() error {
s.ilock.Lock() // Lock the interfaces for writing
defer s.ilock.Unlock()
ifaces, err := net.Interfaces()
if err != nil {
return fmt.Errorf("error while refreshing ipv4 interfaces: %v", err)
}
foundInterfaces := make(map[string]net.Interface)
newInterfaces := make(map[string]struct{})
for _, iface := range ifaces {
name := iface.Name
if _, ok := ignoredIPv4Interfaces[name]; !ok { // ignore certain interfaces
foundInterfaces[name] = iface
if _, ok := s.interfaces[name]; !ok {
newInterfaces[name] = struct{}{}
}
delete(s.interfaces, name) // unmark the interface - anything left once we're done has disappeared
}
}
if len(s.interfaces) > 0 |
s.interfaces = foundInterfaces
if len(newInterfaces) > 0 {
names := make([]string, len(newInterfaces))
i := 0
for name := range newInterfaces {
names[i] = name
i++
}
s.log.Debug("new IPv4 interfaces found", "interfaces", names)
}
return nil
}
// incrementIP increments an IPv4 address
func incrementIP(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 {
break
}
}
}
// An IContinuousScanner is a Scanner that scans continuously. Scan results are
// passed to the channel provided by FoundServices()
type IContinuousScanner interface {
suture.Service
IScanner
FoundServices() chan ServiceFoundNotification
}
// ContinuousScanner implements IContinuousScanner. It scans continuously,
// putting results into the channel provided by FoundServices().
type ContinuousScanner struct {
*Scanner
foundIPChan chan ServiceFoundNotification
period time.Duration
stop chan struct{}
}
// NewContinousScanner properly instantiates a ContinuousScanner. The new
// Scanner will wait between scans for a time defined by `period`.
func NewContinousScanner(period time.Duration) *ContinuousScanner {
return &ContinuousScanner{
Scanner: NewScanner(),
foundIPChan: make(chan ServiceFoundNotification),
period: period,
stop: make(chan struct{}),
}
}
// FoundServices returns a channel which will be populated with services found
// by the ContinuousScanner
func (s *ContinuousScanner) FoundServices() chan ServiceFoundNotification {
return s.foundIPChan
}
// Serve begins serving the ContinuousScanner.
func (s *ContinuousScanner) Serve() {
s.log.Debug("starting continuous ipv4 scanner", "period", s.period)
timer := time.NewTimer(time.Hour)
for {
// Perform a scan
s.log.Debug("doing ipv4 scan")
for ip, serviceIDs := range s.Scan() {
s.log.Debug("found ipv4 scan", "ip", net.ParseIP(ip), "descriptions", serviceIDs)
s.foundIPChan <- ServiceFoundNotification{
IP: net.ParseIP(ip),
MatchingDescriptionIDs: serviceIDs,
}
}
// Wait for s.period
s.log.Debug("waiting", "duration", s.period)
timer.Reset(s.period)
select {
case <-s.stop:
return
case <-timer.C:
}
}
}
// Stop stops the ContinousScanner
func (s *ContinuousScanner) Stop() {
s.stop <- struct{}{}
}
| {
names := make([]string, len(s.interfaces))
i := 0
for name := range s.interfaces {
names[i] = name
}
s.log.Warn("STUB: IPv4 interfaces have disappeared, but handling logic is unimplemented! Services on the missing interfaces may still be active", "deleted_interfaces", names)
} | conditional_block |
scanner.go | package ipv4
import (
"code.google.com/p/go-uuid/uuid"
"fmt"
"github.com/thejerf/suture"
log "gopkg.in/inconshreveable/log15.v2"
logext "gopkg.in/inconshreveable/log15.v2/ext"
"net"
"strconv"
"sync"
"time"
)
const (
numIPv4Checkers = 100
)
var (
ignoredIPv4Interfaces = map[string]struct{}{
// DARWIN
"lo0": struct{}{}, // loopback
"tun0": struct{}{}, // tunnel (VPN)
// TODO: Windows ignored?
}
)
// An IScanner searches the IPv4 network for services matching given
// descriptions. Once services are found, they are locked and returned to the
// caller. It is up to the caller to unlock IPs (via Unlock()) if they are no
// longer in use.
type IScanner interface {
// Add a description to search for; return the ID used if a match is returned
AddDescription(desc ServiceDescription) string
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
Scan() map[string][]string
// By default, after an IP is found with Scan it is ignored in future searches.
// Unlock instructs the scanner to include responses for that IP address in future scans.
Unlock(ip net.IP)
}
// A ServiceFoundNotification indicates that a service was found at ip IP which
// matched all of MatchingDescriptionIDs.
type ServiceFoundNotification struct {
IP net.IP
MatchingDescriptionIDs []string
}
// Scanner implements IScanner. It searches the IPv4 network for services
// matching given descriptions. Once services are found, they are locked and
// returned to the caller. It is up to the caller to unlock IPs (via Unlock())
// if they are no longer in use.
type Scanner struct {
interfaces map[string]net.Interface
ilock sync.RWMutex // protects interfaces
descriptionsByID map[string]ServiceDescription
dlock sync.RWMutex // protects descriptionsByID
activeServicesByIP map[string]struct{}
slock *sync.RWMutex // protects activeServicesByIP
log log.Logger
}
// NewScanner properly instantiates a Scanner.
func NewScanner() *Scanner {
s := &Scanner{
interfaces: make(map[string]net.Interface),
ilock: sync.RWMutex{},
descriptionsByID: make(map[string]ServiceDescription),
dlock: sync.RWMutex{},
activeServicesByIP: make(map[string]struct{}),
slock: &sync.RWMutex{},
log: Log.New("obj", "ipv4.scanner", "id", logext.RandId(8)),
}
err := s.refreshInterfaces()
if err != nil {
panic("ipv4.NewScanner(): scanner could not refresh interfaces: " + err.Error())
}
return s
}
// AddDescription adds a ServiceDescription to the Scanner. On following scans,
// the Scanner will find services which match the description.
func (s *Scanner) AddDescription(desc ServiceDescription) string {
s.dlock.Lock()
defer s.dlock.Unlock()
id := uuid.New()
s.descriptionsByID[id] = desc
return id
}
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
func (s *Scanner) Scan() map[string][]string {
if len(s.descriptionsByID) == 0 {
s.log.Debug("scanner has no descriptions, ignoring scan")
return map[string][]string{}
}
s.log.Debug("ip4v scanner beginning scan", "interfaces", s.interfaces, "target_descriptions", s.descriptionsByID)
foundServices := make(map[string][]string)
flock := sync.Mutex{} // protects foundServices
var wg sync.WaitGroup
var numChecked, numFound, numAlreadyInUse int
s.ilock.RLock()
defer s.ilock.RUnlock()
for name, intf := range s.interfaces {
addrs, err := intf.Addrs()
if err != nil {
panic("could not get addresses from " + name + ": " + err.Error())
}
s.log.Debug("ip4v scanner scanning interface", "interface", name, "num_addrs", len(addrs))
for _, a := range addrs {
switch v := a.(type) {
case *net.IPAddr:
s.log.Warn("ipv4 scanner got a *net.IPAddr, which isn't useful and maybe shoudln't happen?", "interface", intf.Name, "*net.IPAddr", v)
case *net.IPNet:
if v.IP.DefaultMask() != nil { // ignore IPs without default mask (IPv6?)
ip := v.IP
for ip := ip.Mask(v.Mask); v.Contains(ip); incrementIP(ip) {
wg.Add(1)
numChecked++
// To save space, try and only use 4 bytes
if x := ip.To4(); x != nil {
ip = x
}
dup := make(net.IP, len(ip)) // make a copy of the IP ([]byte)
copy(dup, ip)
go func() {
defer wg.Done()
s.slock.RLock()
_, ok := s.activeServicesByIP[dup.String()] // ignore IPs already in use
s.slock.RUnlock()
if ok { // ignore IPs already in use
s.log.Debug("scanner ignoring IP that is already in use", "ip", dup.String())
numAlreadyInUse++
} else {
ids := s.getMatchingDescriptions(dup)
if len(ids) > 0 { // At least one service matches
s.log.Debug("found possible matches for ipv4 service", "num_matches", len(ids), "matching_ids", ids)
numFound++
flock.Lock()
foundServices[dup.String()] = ids
flock.Unlock()
s.slock.Lock()
s.activeServicesByIP[dup.String()] = struct{}{} // mark IP as in use
s.slock.Unlock()
}
}
}()
}
}
default:
s.log.Warn("ipv4 scanner encountered address of unknown type", "type", fmt.Sprintf("%T", a))
}
}
}
s.log.Debug("ipv4 scanner waiting for waitgroup to finish")
wg.Wait()
s.log.Debug("ipv4 scanner done waiting (all waitgroup items completed)")
s.log.Info("ipv4 scan complete", "ips_checked", numChecked, "possibilities_found", numFound, "ips_already_in_use", numAlreadyInUse)
return foundServices
}
// Unlock unlocks the provided IP, such that it will no longer be ignored in
// future scans.
func (s *Scanner) Unlock(ip net.IP) {
s.slock.Lock()
defer s.slock.Unlock()
delete(s.activeServicesByIP, ip.String())
s.log.Debug("ipv4 scanner unlocked IP", "ip", ip.String())
}
func (s *Scanner) getMatchingDescriptions(ip net.IP) []string {
var matchedDrivers []string
matchedPorts := make(map[uint16]bool) // true if found open, false if not, nil key if untested
s.dlock.RLock()
defer s.dlock.RUnlock()
for id, desc := range s.descriptionsByID {
match := true
for _, port := range desc.OpenPorts {
// Try the cache first
if portIsOpen, ok := matchedPorts[port]; ok {
if !portIsOpen {
match = false
s.log.Debug("ipv4 scanner found a service description which is not a match for services available at the target IP, because a port which is expected to be open is closed (according to cache)", "ip", ip.String(), "description_id", id, "port", port, "desc_ports", desc.OpenPorts)
break
}
} else {
// No cached entry, try dialing
timeout := 1 * time.Second
url := ip.String() + ":" + strconv.Itoa(int(port))
conn, err := net.DialTimeout("tcp", url, timeout)
if err != nil {
match = false
matchedPorts[port] = false
s.log.Debug("ipv4 scanner found a service description which is not a match for the services available at the target IP, because a port which is expected to be open is closed (timed out trying)", "ip", ip.String(), "description_id", id, "port", port, "url", url, "timeout", timeout, "err", err, "desc_ports", desc.OpenPorts)
break
}
conn.Close()
}
}
if match {
matchedDrivers = append(matchedDrivers, id) // add
s.log.Debug("ipv4 scanner found a service description match", "ip", ip.String(), "service_desc", id)
}
}
return matchedDrivers
}
// refreshInterfaces searches the device for any new or removed network interfaces
func (s *Scanner) refreshInterfaces() error {
s.ilock.Lock() // Lock the interfaces for writing
defer s.ilock.Unlock()
ifaces, err := net.Interfaces()
if err != nil {
return fmt.Errorf("error while refreshing ipv4 interfaces: %v", err)
}
foundInterfaces := make(map[string]net.Interface)
newInterfaces := make(map[string]struct{})
for _, iface := range ifaces {
name := iface.Name
if _, ok := ignoredIPv4Interfaces[name]; !ok { // ignore certain interfaces
foundInterfaces[name] = iface
if _, ok := s.interfaces[name]; !ok {
newInterfaces[name] = struct{}{}
}
delete(s.interfaces, name) // unmark the interface - anything left once we're done has disappeared
}
}
if len(s.interfaces) > 0 {
names := make([]string, len(s.interfaces))
i := 0
for name := range s.interfaces {
names[i] = name
}
s.log.Warn("STUB: IPv4 interfaces have disappeared, but handling logic is unimplemented! Services on the missing interfaces may still be active", "deleted_interfaces", names)
}
s.interfaces = foundInterfaces
if len(newInterfaces) > 0 {
names := make([]string, len(newInterfaces))
i := 0
for name := range newInterfaces {
names[i] = name
i++
}
s.log.Debug("new IPv4 interfaces found", "interfaces", names)
}
return nil
}
// incrementIP increments an IPv4 address | break
}
}
}
// An IContinuousScanner is a Scanner that scans continuously. Scan results are
// passed to the channel provided by FoundServices()
type IContinuousScanner interface {
suture.Service
IScanner
FoundServices() chan ServiceFoundNotification
}
// ContinuousScanner implements IContinuousScanner. It scans continuously,
// putting results into the channel provided by FoundServices().
type ContinuousScanner struct {
*Scanner
foundIPChan chan ServiceFoundNotification
period time.Duration
stop chan struct{}
}
// NewContinousScanner properly instantiates a ContinuousScanner. The new
// Scanner will wait between scans for a time defined by `period`.
func NewContinousScanner(period time.Duration) *ContinuousScanner {
return &ContinuousScanner{
Scanner: NewScanner(),
foundIPChan: make(chan ServiceFoundNotification),
period: period,
stop: make(chan struct{}),
}
}
// FoundServices returns a channel which will be populated with services found
// by the ContinuousScanner
func (s *ContinuousScanner) FoundServices() chan ServiceFoundNotification {
return s.foundIPChan
}
// Serve begins serving the ContinuousScanner.
func (s *ContinuousScanner) Serve() {
s.log.Debug("starting continuous ipv4 scanner", "period", s.period)
timer := time.NewTimer(time.Hour)
for {
// Perform a scan
s.log.Debug("doing ipv4 scan")
for ip, serviceIDs := range s.Scan() {
s.log.Debug("found ipv4 scan", "ip", net.ParseIP(ip), "descriptions", serviceIDs)
s.foundIPChan <- ServiceFoundNotification{
IP: net.ParseIP(ip),
MatchingDescriptionIDs: serviceIDs,
}
}
// Wait for s.period
s.log.Debug("waiting", "duration", s.period)
timer.Reset(s.period)
select {
case <-s.stop:
return
case <-timer.C:
}
}
}
// Stop stops the ContinousScanner
func (s *ContinuousScanner) Stop() {
s.stop <- struct{}{}
} | func incrementIP(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 { | random_line_split |
scanner.go | package ipv4
import (
"code.google.com/p/go-uuid/uuid"
"fmt"
"github.com/thejerf/suture"
log "gopkg.in/inconshreveable/log15.v2"
logext "gopkg.in/inconshreveable/log15.v2/ext"
"net"
"strconv"
"sync"
"time"
)
const (
numIPv4Checkers = 100
)
var (
ignoredIPv4Interfaces = map[string]struct{}{
// DARWIN
"lo0": struct{}{}, // loopback
"tun0": struct{}{}, // tunnel (VPN)
// TODO: Windows ignored?
}
)
// An IScanner searches the IPv4 network for services matching given
// descriptions. Once services are found, they are locked and returned to the
// caller. It is up to the caller to unlock IPs (via Unlock()) if they are no
// longer in use.
type IScanner interface {
// Add a description to search for; return the ID used if a match is returned
AddDescription(desc ServiceDescription) string
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
Scan() map[string][]string
// By default, after an IP is found with Scan it is ignored in future searches.
// Unlock instructs the scanner to include responses for that IP address in future scans.
Unlock(ip net.IP)
}
// A ServiceFoundNotification indicates that a service was found at ip IP which
// matched all of MatchingDescriptionIDs.
type ServiceFoundNotification struct {
IP net.IP
MatchingDescriptionIDs []string
}
// Scanner implements IScanner. It searches the IPv4 network for services
// matching given descriptions. Once services are found, they are locked and
// returned to the caller. It is up to the caller to unlock IPs (via Unlock())
// if they are no longer in use.
type Scanner struct {
interfaces map[string]net.Interface
ilock sync.RWMutex // protects interfaces
descriptionsByID map[string]ServiceDescription
dlock sync.RWMutex // protects descriptionsByID
activeServicesByIP map[string]struct{}
slock *sync.RWMutex // protects activeServicesByIP
log log.Logger
}
// NewScanner properly instantiates a Scanner.
func NewScanner() *Scanner {
s := &Scanner{
interfaces: make(map[string]net.Interface),
ilock: sync.RWMutex{},
descriptionsByID: make(map[string]ServiceDescription),
dlock: sync.RWMutex{},
activeServicesByIP: make(map[string]struct{}),
slock: &sync.RWMutex{},
log: Log.New("obj", "ipv4.scanner", "id", logext.RandId(8)),
}
err := s.refreshInterfaces()
if err != nil {
panic("ipv4.NewScanner(): scanner could not refresh interfaces: " + err.Error())
}
return s
}
// AddDescription adds a ServiceDescription to the Scanner. On following scans,
// the Scanner will find services which match the description.
func (s *Scanner) AddDescription(desc ServiceDescription) string {
s.dlock.Lock()
defer s.dlock.Unlock()
id := uuid.New()
s.descriptionsByID[id] = desc
return id
}
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
func (s *Scanner) Scan() map[string][]string {
if len(s.descriptionsByID) == 0 {
s.log.Debug("scanner has no descriptions, ignoring scan")
return map[string][]string{}
}
s.log.Debug("ip4v scanner beginning scan", "interfaces", s.interfaces, "target_descriptions", s.descriptionsByID)
foundServices := make(map[string][]string)
flock := sync.Mutex{} // protects foundServices
var wg sync.WaitGroup
var numChecked, numFound, numAlreadyInUse int
s.ilock.RLock()
defer s.ilock.RUnlock()
for name, intf := range s.interfaces {
addrs, err := intf.Addrs()
if err != nil {
panic("could not get addresses from " + name + ": " + err.Error())
}
s.log.Debug("ip4v scanner scanning interface", "interface", name, "num_addrs", len(addrs))
for _, a := range addrs {
switch v := a.(type) {
case *net.IPAddr:
s.log.Warn("ipv4 scanner got a *net.IPAddr, which isn't useful and maybe shoudln't happen?", "interface", intf.Name, "*net.IPAddr", v)
case *net.IPNet:
if v.IP.DefaultMask() != nil { // ignore IPs without default mask (IPv6?)
ip := v.IP
for ip := ip.Mask(v.Mask); v.Contains(ip); incrementIP(ip) {
wg.Add(1)
numChecked++
// To save space, try and only use 4 bytes
if x := ip.To4(); x != nil {
ip = x
}
dup := make(net.IP, len(ip)) // make a copy of the IP ([]byte)
copy(dup, ip)
go func() {
defer wg.Done()
s.slock.RLock()
_, ok := s.activeServicesByIP[dup.String()] // ignore IPs already in use
s.slock.RUnlock()
if ok { // ignore IPs already in use
s.log.Debug("scanner ignoring IP that is already in use", "ip", dup.String())
numAlreadyInUse++
} else {
ids := s.getMatchingDescriptions(dup)
if len(ids) > 0 { // At least one service matches
s.log.Debug("found possible matches for ipv4 service", "num_matches", len(ids), "matching_ids", ids)
numFound++
flock.Lock()
foundServices[dup.String()] = ids
flock.Unlock()
s.slock.Lock()
s.activeServicesByIP[dup.String()] = struct{}{} // mark IP as in use
s.slock.Unlock()
}
}
}()
}
}
default:
s.log.Warn("ipv4 scanner encountered address of unknown type", "type", fmt.Sprintf("%T", a))
}
}
}
s.log.Debug("ipv4 scanner waiting for waitgroup to finish")
wg.Wait()
s.log.Debug("ipv4 scanner done waiting (all waitgroup items completed)")
s.log.Info("ipv4 scan complete", "ips_checked", numChecked, "possibilities_found", numFound, "ips_already_in_use", numAlreadyInUse)
return foundServices
}
// Unlock unlocks the provided IP, such that it will no longer be ignored in
// future scans.
func (s *Scanner) Unlock(ip net.IP) {
s.slock.Lock()
defer s.slock.Unlock()
delete(s.activeServicesByIP, ip.String())
s.log.Debug("ipv4 scanner unlocked IP", "ip", ip.String())
}
func (s *Scanner) getMatchingDescriptions(ip net.IP) []string {
var matchedDrivers []string
matchedPorts := make(map[uint16]bool) // true if found open, false if not, nil key if untested
s.dlock.RLock()
defer s.dlock.RUnlock()
for id, desc := range s.descriptionsByID {
match := true
for _, port := range desc.OpenPorts {
// Try the cache first
if portIsOpen, ok := matchedPorts[port]; ok {
if !portIsOpen {
match = false
s.log.Debug("ipv4 scanner found a service description which is not a match for services available at the target IP, because a port which is expected to be open is closed (according to cache)", "ip", ip.String(), "description_id", id, "port", port, "desc_ports", desc.OpenPorts)
break
}
} else {
// No cached entry, try dialing
timeout := 1 * time.Second
url := ip.String() + ":" + strconv.Itoa(int(port))
conn, err := net.DialTimeout("tcp", url, timeout)
if err != nil {
match = false
matchedPorts[port] = false
s.log.Debug("ipv4 scanner found a service description which is not a match for the services available at the target IP, because a port which is expected to be open is closed (timed out trying)", "ip", ip.String(), "description_id", id, "port", port, "url", url, "timeout", timeout, "err", err, "desc_ports", desc.OpenPorts)
break
}
conn.Close()
}
}
if match {
matchedDrivers = append(matchedDrivers, id) // add
s.log.Debug("ipv4 scanner found a service description match", "ip", ip.String(), "service_desc", id)
}
}
return matchedDrivers
}
// refreshInterfaces searches the device for any new or removed network interfaces
func (s *Scanner) refreshInterfaces() error {
s.ilock.Lock() // Lock the interfaces for writing
defer s.ilock.Unlock()
ifaces, err := net.Interfaces()
if err != nil {
return fmt.Errorf("error while refreshing ipv4 interfaces: %v", err)
}
foundInterfaces := make(map[string]net.Interface)
newInterfaces := make(map[string]struct{})
for _, iface := range ifaces {
name := iface.Name
if _, ok := ignoredIPv4Interfaces[name]; !ok { // ignore certain interfaces
foundInterfaces[name] = iface
if _, ok := s.interfaces[name]; !ok {
newInterfaces[name] = struct{}{}
}
delete(s.interfaces, name) // unmark the interface - anything left once we're done has disappeared
}
}
if len(s.interfaces) > 0 {
names := make([]string, len(s.interfaces))
i := 0
for name := range s.interfaces {
names[i] = name
}
s.log.Warn("STUB: IPv4 interfaces have disappeared, but handling logic is unimplemented! Services on the missing interfaces may still be active", "deleted_interfaces", names)
}
s.interfaces = foundInterfaces
if len(newInterfaces) > 0 {
names := make([]string, len(newInterfaces))
i := 0
for name := range newInterfaces {
names[i] = name
i++
}
s.log.Debug("new IPv4 interfaces found", "interfaces", names)
}
return nil
}
// incrementIP increments an IPv4 address
func incrementIP(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 {
break
}
}
}
// An IContinuousScanner is a Scanner that scans continuously. Scan results are
// passed to the channel provided by FoundServices()
type IContinuousScanner interface {
suture.Service
IScanner
FoundServices() chan ServiceFoundNotification
}
// ContinuousScanner implements IContinuousScanner. It scans continuously,
// putting results into the channel provided by FoundServices().
type ContinuousScanner struct {
*Scanner
foundIPChan chan ServiceFoundNotification
period time.Duration
stop chan struct{}
}
// NewContinousScanner properly instantiates a ContinuousScanner. The new
// Scanner will wait between scans for a time defined by `period`.
func NewContinousScanner(period time.Duration) *ContinuousScanner {
return &ContinuousScanner{
Scanner: NewScanner(),
foundIPChan: make(chan ServiceFoundNotification),
period: period,
stop: make(chan struct{}),
}
}
// FoundServices returns a channel which will be populated with services found
// by the ContinuousScanner
func (s *ContinuousScanner) | () chan ServiceFoundNotification {
return s.foundIPChan
}
// Serve begins serving the ContinuousScanner.
func (s *ContinuousScanner) Serve() {
s.log.Debug("starting continuous ipv4 scanner", "period", s.period)
timer := time.NewTimer(time.Hour)
for {
// Perform a scan
s.log.Debug("doing ipv4 scan")
for ip, serviceIDs := range s.Scan() {
s.log.Debug("found ipv4 scan", "ip", net.ParseIP(ip), "descriptions", serviceIDs)
s.foundIPChan <- ServiceFoundNotification{
IP: net.ParseIP(ip),
MatchingDescriptionIDs: serviceIDs,
}
}
// Wait for s.period
s.log.Debug("waiting", "duration", s.period)
timer.Reset(s.period)
select {
case <-s.stop:
return
case <-timer.C:
}
}
}
// Stop stops the ContinousScanner
func (s *ContinuousScanner) Stop() {
s.stop <- struct{}{}
}
| FoundServices | identifier_name |
menus.py | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QFrame,
QLabel, QMainWindow, QMenu, QMessageBox, QSizePolicy, QVBoxLayout,
QWidget)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
widget = QWidget()
self.setCentralWidget(widget)
topFiller = QWidget()
topFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.infoLabel = QLabel(
"<i>Choose a menu option, or right-click to invoke a context menu</i>",
alignment=Qt.AlignCenter)
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
bottomFiller = QWidget()
bottomFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setContentsMargins(5, 5, 5, 5)
vbox.addWidget(topFiller)
vbox.addWidget(self.infoLabel)
vbox.addWidget(bottomFiller)
widget.setLayout(vbox)
self.createActions()
self.createMenus()
message = "A context menu is available by right-clicking"
self.statusBar().showMessage(message)
self.setWindowTitle("Menus")
self.setMinimumSize(160,160)
self.resize(480,320)
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction(self.cutAct)
menu.addAction(self.copyAct)
menu.addAction(self.pasteAct)
menu.exec_(event.globalPos())
def newFile(self):
self.infoLabel.setText("Invoked <b>File|New</b>")
def open(self):
self.infoLabel.setText("Invoked <b>File|Open</b>")
def save(self):
self.infoLabel.setText("Invoked <b>File|Save</b>")
def print_(self):
self.infoLabel.setText("Invoked <b>File|Print</b>")
def undo(self):
self.infoLabel.setText("Invoked <b>Edit|Undo</b>")
def redo(self):
self.infoLabel.setText("Invoked <b>Edit|Redo</b>")
def cut(self):
self.infoLabel.setText("Invoked <b>Edit|Cut</b>")
def copy(self):
self.infoLabel.setText("Invoked <b>Edit|Copy</b>")
def paste(self):
self.infoLabel.setText("Invoked <b>Edit|Paste</b>")
def bold(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Bold</b>")
def italic(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Italic</b>")
def leftAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Left Align</b>")
def rightAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Right Align</b>")
def justify(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Justify</b>")
def center(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Center</b>")
def setLineSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Line Spacing</b>")
def setParagraphSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Paragraph Spacing</b>")
def about(self):
self.infoLabel.setText("Invoked <b>Help|About</b>")
QMessageBox.about(self, "About Menu",
"The <b>Menu</b> example shows how to create menu-bar menus "
"and context menus.")
def aboutQt(self):
self.infoLabel.setText("Invoked <b>Help|About Qt</b>")
def createActions(self):
self.newAct = QAction("&New", self, shortcut=QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QAction("&Open...", self, shortcut=QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QAction("&Save", self, shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.printAct = QAction("&Print...", self, shortcut=QKeySequence.Print,
statusTip="Print the document", triggered=self.print_)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application", triggered=self.close)
self.undoAct = QAction("&Undo", self, shortcut=QKeySequence.Undo,
statusTip="Undo the last operation", triggered=self.undo)
self.redoAct = QAction("&Redo", self, shortcut=QKeySequence.Redo,
statusTip="Redo the last operation", triggered=self.redo)
self.cutAct = QAction("Cu&t", self, shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QAction("&Copy", self, shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QAction("&Paste", self, shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.boldAct = QAction("&Bold", self, checkable=True,
shortcut="Ctrl+B", statusTip="Make the text bold",
triggered=self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction("&Italic", self, checkable=True,
shortcut="Ctrl+I", statusTip="Make the text italic",
triggered=self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction("Set &Line Spacing...", self,
statusTip="Change the gap between the lines of a paragraph",
triggered=self.setLineSpacing)
self.setParagraphSpacingAct = QAction("Set &Paragraph Spacing...",
self, statusTip="Change the gap between paragraphs",
triggered=self.setParagraphSpacing)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=self.aboutQt)
self.aboutQtAct.triggered.connect(QApplication.instance().aboutQt)
self.leftAlignAct = QAction("&Left Align", self, checkable=True,
shortcut="Ctrl+L", statusTip="Left align the selected text",
triggered=self.leftAlign)
self.rightAlignAct = QAction("&Right Align", self, checkable=True,
shortcut="Ctrl+R", statusTip="Right align the selected text",
triggered=self.rightAlign)
self.justifyAct = QAction("&Justify", self, checkable=True,
shortcut="Ctrl+J", statusTip="Justify the selected text",
triggered=self.justify)
self.centerAct = QAction("&Center", self, checkable=True,
shortcut="Ctrl+C", statusTip="Center the selected text",
triggered=self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.justifyAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu("&Edit")
self.editMenu.addAction(self.undoAct)
self.editMenu.addAction(self.redoAct)
self.editMenu.addSeparator()
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.editMenu.addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
self.formatMenu = self.editMenu.addMenu("&Format")
self.formatMenu.addAction(self.boldAct)
self.formatMenu.addAction(self.italicAct)
self.formatMenu.addSeparator().setText("Alignment")
self.formatMenu.addAction(self.leftAlignAct)
self.formatMenu.addAction(self.rightAlignAct)
self.formatMenu.addAction(self.justifyAct)
self.formatMenu.addAction(self.centerAct)
self.formatMenu.addSeparator()
self.formatMenu.addAction(self.setLineSpacingAct)
self.formatMenu.addAction(self.setParagraphSpacingAct)
if __name__ == '__main__':
| import sys
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_()) | conditional_block | |
menus.py | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QFrame,
QLabel, QMainWindow, QMenu, QMessageBox, QSizePolicy, QVBoxLayout,
QWidget)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
widget = QWidget()
self.setCentralWidget(widget)
topFiller = QWidget()
topFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.infoLabel = QLabel(
"<i>Choose a menu option, or right-click to invoke a context menu</i>",
alignment=Qt.AlignCenter)
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
bottomFiller = QWidget()
bottomFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setContentsMargins(5, 5, 5, 5)
vbox.addWidget(topFiller)
vbox.addWidget(self.infoLabel)
vbox.addWidget(bottomFiller)
widget.setLayout(vbox)
self.createActions()
self.createMenus()
message = "A context menu is available by right-clicking"
self.statusBar().showMessage(message)
self.setWindowTitle("Menus")
self.setMinimumSize(160,160)
self.resize(480,320)
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction(self.cutAct)
menu.addAction(self.copyAct)
menu.addAction(self.pasteAct)
menu.exec_(event.globalPos())
def newFile(self):
self.infoLabel.setText("Invoked <b>File|New</b>")
def open(self):
self.infoLabel.setText("Invoked <b>File|Open</b>")
def save(self):
self.infoLabel.setText("Invoked <b>File|Save</b>")
def print_(self):
self.infoLabel.setText("Invoked <b>File|Print</b>")
def undo(self):
self.infoLabel.setText("Invoked <b>Edit|Undo</b>")
def redo(self):
self.infoLabel.setText("Invoked <b>Edit|Redo</b>")
def cut(self):
self.infoLabel.setText("Invoked <b>Edit|Cut</b>")
def copy(self):
self.infoLabel.setText("Invoked <b>Edit|Copy</b>")
def paste(self):
self.infoLabel.setText("Invoked <b>Edit|Paste</b>")
def bold(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Bold</b>")
def italic(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Italic</b>")
def leftAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Left Align</b>")
def rightAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Right Align</b>")
def justify(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Justify</b>")
def center(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Center</b>")
def setLineSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Line Spacing</b>")
def setParagraphSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Paragraph Spacing</b>")
def about(self):
self.infoLabel.setText("Invoked <b>Help|About</b>")
QMessageBox.about(self, "About Menu", | self.infoLabel.setText("Invoked <b>Help|About Qt</b>")
def createActions(self):
self.newAct = QAction("&New", self, shortcut=QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QAction("&Open...", self, shortcut=QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QAction("&Save", self, shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.printAct = QAction("&Print...", self, shortcut=QKeySequence.Print,
statusTip="Print the document", triggered=self.print_)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application", triggered=self.close)
self.undoAct = QAction("&Undo", self, shortcut=QKeySequence.Undo,
statusTip="Undo the last operation", triggered=self.undo)
self.redoAct = QAction("&Redo", self, shortcut=QKeySequence.Redo,
statusTip="Redo the last operation", triggered=self.redo)
self.cutAct = QAction("Cu&t", self, shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QAction("&Copy", self, shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QAction("&Paste", self, shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.boldAct = QAction("&Bold", self, checkable=True,
shortcut="Ctrl+B", statusTip="Make the text bold",
triggered=self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction("&Italic", self, checkable=True,
shortcut="Ctrl+I", statusTip="Make the text italic",
triggered=self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction("Set &Line Spacing...", self,
statusTip="Change the gap between the lines of a paragraph",
triggered=self.setLineSpacing)
self.setParagraphSpacingAct = QAction("Set &Paragraph Spacing...",
self, statusTip="Change the gap between paragraphs",
triggered=self.setParagraphSpacing)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=self.aboutQt)
self.aboutQtAct.triggered.connect(QApplication.instance().aboutQt)
self.leftAlignAct = QAction("&Left Align", self, checkable=True,
shortcut="Ctrl+L", statusTip="Left align the selected text",
triggered=self.leftAlign)
self.rightAlignAct = QAction("&Right Align", self, checkable=True,
shortcut="Ctrl+R", statusTip="Right align the selected text",
triggered=self.rightAlign)
self.justifyAct = QAction("&Justify", self, checkable=True,
shortcut="Ctrl+J", statusTip="Justify the selected text",
triggered=self.justify)
self.centerAct = QAction("&Center", self, checkable=True,
shortcut="Ctrl+C", statusTip="Center the selected text",
triggered=self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.justifyAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu("&Edit")
self.editMenu.addAction(self.undoAct)
self.editMenu.addAction(self.redoAct)
self.editMenu.addSeparator()
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.editMenu.addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
self.formatMenu = self.editMenu.addMenu("&Format")
self.formatMenu.addAction(self.boldAct)
self.formatMenu.addAction(self.italicAct)
self.formatMenu.addSeparator().setText("Alignment")
self.formatMenu.addAction(self.leftAlignAct)
self.formatMenu.addAction(self.rightAlignAct)
self.formatMenu.addAction(self.justifyAct)
self.formatMenu.addAction(self.centerAct)
self.formatMenu.addSeparator()
self.formatMenu.addAction(self.setLineSpacingAct)
self.formatMenu.addAction(self.setParagraphSpacingAct)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_()) | "The <b>Menu</b> example shows how to create menu-bar menus "
"and context menus.")
def aboutQt(self): | random_line_split |
menus.py | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QFrame,
QLabel, QMainWindow, QMenu, QMessageBox, QSizePolicy, QVBoxLayout,
QWidget)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
widget = QWidget()
self.setCentralWidget(widget)
topFiller = QWidget()
topFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.infoLabel = QLabel(
"<i>Choose a menu option, or right-click to invoke a context menu</i>",
alignment=Qt.AlignCenter)
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
bottomFiller = QWidget()
bottomFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setContentsMargins(5, 5, 5, 5)
vbox.addWidget(topFiller)
vbox.addWidget(self.infoLabel)
vbox.addWidget(bottomFiller)
widget.setLayout(vbox)
self.createActions()
self.createMenus()
message = "A context menu is available by right-clicking"
self.statusBar().showMessage(message)
self.setWindowTitle("Menus")
self.setMinimumSize(160,160)
self.resize(480,320)
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction(self.cutAct)
menu.addAction(self.copyAct)
menu.addAction(self.pasteAct)
menu.exec_(event.globalPos())
def newFile(self):
self.infoLabel.setText("Invoked <b>File|New</b>")
def | (self):
self.infoLabel.setText("Invoked <b>File|Open</b>")
def save(self):
self.infoLabel.setText("Invoked <b>File|Save</b>")
def print_(self):
self.infoLabel.setText("Invoked <b>File|Print</b>")
def undo(self):
self.infoLabel.setText("Invoked <b>Edit|Undo</b>")
def redo(self):
self.infoLabel.setText("Invoked <b>Edit|Redo</b>")
def cut(self):
self.infoLabel.setText("Invoked <b>Edit|Cut</b>")
def copy(self):
self.infoLabel.setText("Invoked <b>Edit|Copy</b>")
def paste(self):
self.infoLabel.setText("Invoked <b>Edit|Paste</b>")
def bold(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Bold</b>")
def italic(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Italic</b>")
def leftAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Left Align</b>")
def rightAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Right Align</b>")
def justify(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Justify</b>")
def center(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Center</b>")
def setLineSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Line Spacing</b>")
def setParagraphSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Paragraph Spacing</b>")
def about(self):
self.infoLabel.setText("Invoked <b>Help|About</b>")
QMessageBox.about(self, "About Menu",
"The <b>Menu</b> example shows how to create menu-bar menus "
"and context menus.")
def aboutQt(self):
self.infoLabel.setText("Invoked <b>Help|About Qt</b>")
def createActions(self):
self.newAct = QAction("&New", self, shortcut=QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QAction("&Open...", self, shortcut=QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QAction("&Save", self, shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.printAct = QAction("&Print...", self, shortcut=QKeySequence.Print,
statusTip="Print the document", triggered=self.print_)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application", triggered=self.close)
self.undoAct = QAction("&Undo", self, shortcut=QKeySequence.Undo,
statusTip="Undo the last operation", triggered=self.undo)
self.redoAct = QAction("&Redo", self, shortcut=QKeySequence.Redo,
statusTip="Redo the last operation", triggered=self.redo)
self.cutAct = QAction("Cu&t", self, shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QAction("&Copy", self, shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QAction("&Paste", self, shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.boldAct = QAction("&Bold", self, checkable=True,
shortcut="Ctrl+B", statusTip="Make the text bold",
triggered=self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction("&Italic", self, checkable=True,
shortcut="Ctrl+I", statusTip="Make the text italic",
triggered=self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction("Set &Line Spacing...", self,
statusTip="Change the gap between the lines of a paragraph",
triggered=self.setLineSpacing)
self.setParagraphSpacingAct = QAction("Set &Paragraph Spacing...",
self, statusTip="Change the gap between paragraphs",
triggered=self.setParagraphSpacing)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=self.aboutQt)
self.aboutQtAct.triggered.connect(QApplication.instance().aboutQt)
self.leftAlignAct = QAction("&Left Align", self, checkable=True,
shortcut="Ctrl+L", statusTip="Left align the selected text",
triggered=self.leftAlign)
self.rightAlignAct = QAction("&Right Align", self, checkable=True,
shortcut="Ctrl+R", statusTip="Right align the selected text",
triggered=self.rightAlign)
self.justifyAct = QAction("&Justify", self, checkable=True,
shortcut="Ctrl+J", statusTip="Justify the selected text",
triggered=self.justify)
self.centerAct = QAction("&Center", self, checkable=True,
shortcut="Ctrl+C", statusTip="Center the selected text",
triggered=self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.justifyAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu("&Edit")
self.editMenu.addAction(self.undoAct)
self.editMenu.addAction(self.redoAct)
self.editMenu.addSeparator()
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.editMenu.addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
self.formatMenu = self.editMenu.addMenu("&Format")
self.formatMenu.addAction(self.boldAct)
self.formatMenu.addAction(self.italicAct)
self.formatMenu.addSeparator().setText("Alignment")
self.formatMenu.addAction(self.leftAlignAct)
self.formatMenu.addAction(self.rightAlignAct)
self.formatMenu.addAction(self.justifyAct)
self.formatMenu.addAction(self.centerAct)
self.formatMenu.addSeparator()
self.formatMenu.addAction(self.setLineSpacingAct)
self.formatMenu.addAction(self.setParagraphSpacingAct)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| open | identifier_name |
menus.py | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QFrame,
QLabel, QMainWindow, QMenu, QMessageBox, QSizePolicy, QVBoxLayout,
QWidget)
class MainWindow(QMainWindow):
def __init__(self):
|
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction(self.cutAct)
menu.addAction(self.copyAct)
menu.addAction(self.pasteAct)
menu.exec_(event.globalPos())
def newFile(self):
self.infoLabel.setText("Invoked <b>File|New</b>")
def open(self):
self.infoLabel.setText("Invoked <b>File|Open</b>")
def save(self):
self.infoLabel.setText("Invoked <b>File|Save</b>")
def print_(self):
self.infoLabel.setText("Invoked <b>File|Print</b>")
def undo(self):
self.infoLabel.setText("Invoked <b>Edit|Undo</b>")
def redo(self):
self.infoLabel.setText("Invoked <b>Edit|Redo</b>")
def cut(self):
self.infoLabel.setText("Invoked <b>Edit|Cut</b>")
def copy(self):
self.infoLabel.setText("Invoked <b>Edit|Copy</b>")
def paste(self):
self.infoLabel.setText("Invoked <b>Edit|Paste</b>")
def bold(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Bold</b>")
def italic(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Italic</b>")
def leftAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Left Align</b>")
def rightAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Right Align</b>")
def justify(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Justify</b>")
def center(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Center</b>")
def setLineSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Line Spacing</b>")
def setParagraphSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Paragraph Spacing</b>")
def about(self):
self.infoLabel.setText("Invoked <b>Help|About</b>")
QMessageBox.about(self, "About Menu",
"The <b>Menu</b> example shows how to create menu-bar menus "
"and context menus.")
def aboutQt(self):
self.infoLabel.setText("Invoked <b>Help|About Qt</b>")
def createActions(self):
self.newAct = QAction("&New", self, shortcut=QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QAction("&Open...", self, shortcut=QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QAction("&Save", self, shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.printAct = QAction("&Print...", self, shortcut=QKeySequence.Print,
statusTip="Print the document", triggered=self.print_)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application", triggered=self.close)
self.undoAct = QAction("&Undo", self, shortcut=QKeySequence.Undo,
statusTip="Undo the last operation", triggered=self.undo)
self.redoAct = QAction("&Redo", self, shortcut=QKeySequence.Redo,
statusTip="Redo the last operation", triggered=self.redo)
self.cutAct = QAction("Cu&t", self, shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QAction("&Copy", self, shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QAction("&Paste", self, shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.boldAct = QAction("&Bold", self, checkable=True,
shortcut="Ctrl+B", statusTip="Make the text bold",
triggered=self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction("&Italic", self, checkable=True,
shortcut="Ctrl+I", statusTip="Make the text italic",
triggered=self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction("Set &Line Spacing...", self,
statusTip="Change the gap between the lines of a paragraph",
triggered=self.setLineSpacing)
self.setParagraphSpacingAct = QAction("Set &Paragraph Spacing...",
self, statusTip="Change the gap between paragraphs",
triggered=self.setParagraphSpacing)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=self.aboutQt)
self.aboutQtAct.triggered.connect(QApplication.instance().aboutQt)
self.leftAlignAct = QAction("&Left Align", self, checkable=True,
shortcut="Ctrl+L", statusTip="Left align the selected text",
triggered=self.leftAlign)
self.rightAlignAct = QAction("&Right Align", self, checkable=True,
shortcut="Ctrl+R", statusTip="Right align the selected text",
triggered=self.rightAlign)
self.justifyAct = QAction("&Justify", self, checkable=True,
shortcut="Ctrl+J", statusTip="Justify the selected text",
triggered=self.justify)
self.centerAct = QAction("&Center", self, checkable=True,
shortcut="Ctrl+C", statusTip="Center the selected text",
triggered=self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.justifyAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu("&Edit")
self.editMenu.addAction(self.undoAct)
self.editMenu.addAction(self.redoAct)
self.editMenu.addSeparator()
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.editMenu.addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
self.formatMenu = self.editMenu.addMenu("&Format")
self.formatMenu.addAction(self.boldAct)
self.formatMenu.addAction(self.italicAct)
self.formatMenu.addSeparator().setText("Alignment")
self.formatMenu.addAction(self.leftAlignAct)
self.formatMenu.addAction(self.rightAlignAct)
self.formatMenu.addAction(self.justifyAct)
self.formatMenu.addAction(self.centerAct)
self.formatMenu.addSeparator()
self.formatMenu.addAction(self.setLineSpacingAct)
self.formatMenu.addAction(self.setParagraphSpacingAct)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| super(MainWindow, self).__init__()
widget = QWidget()
self.setCentralWidget(widget)
topFiller = QWidget()
topFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.infoLabel = QLabel(
"<i>Choose a menu option, or right-click to invoke a context menu</i>",
alignment=Qt.AlignCenter)
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
bottomFiller = QWidget()
bottomFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setContentsMargins(5, 5, 5, 5)
vbox.addWidget(topFiller)
vbox.addWidget(self.infoLabel)
vbox.addWidget(bottomFiller)
widget.setLayout(vbox)
self.createActions()
self.createMenus()
message = "A context menu is available by right-clicking"
self.statusBar().showMessage(message)
self.setWindowTitle("Menus")
self.setMinimumSize(160,160)
self.resize(480,320) | identifier_body |
wasm.rs | //! Wrappers over the Rust part of the IDE codebase.
use crate::prelude::*;
use crate::paths::generated::RepoRootDistWasm;
use crate::paths::generated::RepoRootTargetEnsoglPackLinkedDist;
use crate::project::Context;
use crate::project::IsArtifact;
use crate::project::IsTarget;
use crate::project::IsWatchable;
use crate::source::BuildTargetJob;
use crate::source::WatchTargetJob;
use crate::source::WithDestination;
use derivative::Derivative;
use ide_ci::cache;
use ide_ci::fs::compressed_size;
use ide_ci::fs::copy_file_if_different;
use ide_ci::goodies::shader_tools::ShaderTools;
use ide_ci::programs::cargo;
use ide_ci::programs::wasm_opt;
use ide_ci::programs::wasm_opt::WasmOpt;
use ide_ci::programs::wasm_pack;
use ide_ci::programs::Cargo;
use ide_ci::programs::WasmPack;
use semver::VersionReq;
use std::time::Duration;
use tempfile::tempdir;
use tokio::process::Child;
// ==============
// === Export ===
// ==============
pub mod env;
pub mod test;
pub const BINARYEN_VERSION_TO_INSTALL: u32 = 108;
pub const DEFAULT_INTEGRATION_TESTS_WASM_TIMEOUT: Duration = Duration::from_secs(300);
pub const INTEGRATION_TESTS_CRATE_NAME: &str = "enso-integration-test";
pub const OUTPUT_NAME: &str = "ide";
/// Name of the artifact that will be uploaded as part of CI run.
pub const WASM_ARTIFACT_NAME: &str = "gui_wasm";
pub const DEFAULT_TARGET_CRATE: &str = "app/gui";
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum ProfilingLevel {
#[default]
Objective,
Task,
Detail,
Debug,
}
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum LogLevel {
Error,
#[default]
Warn,
Info,
Debug,
Trace,
}
#[derive(clap::ArgEnum, Clone, Copy, Debug, PartialEq, Eq, strum::Display, strum::AsRefStr)]
#[strum(serialize_all = "kebab-case")]
pub enum Profile {
Dev,
Profile,
Release,
// Production,
}
impl From<Profile> for wasm_pack::Profile {
fn from(profile: Profile) -> Self {
match profile {
Profile::Dev => Self::Dev,
Profile::Profile => Self::Profile,
Profile::Release => Self::Release,
// Profile::Production => Self::Release,
}
}
}
impl Profile {
pub fn should_check_size(self) -> bool {
match self {
Profile::Dev => false,
Profile::Profile => false,
Profile::Release => true,
// Profile::Production => true,
}
}
pub fn extra_rust_options(self) -> Vec<String> {
match self {
// Profile::Production => ["-Clto=fat", "-Ccodegen-units=1", "-Cincremental=false"]
// .into_iter()
// .map(ToString::to_string)
// .collect(),
Profile::Dev | Profile::Profile | Profile::Release => vec![],
}
}
pub fn optimization_level(self) -> wasm_opt::OptimizationLevel {
match self {
Profile::Dev => wasm_opt::OptimizationLevel::O0,
Profile::Profile => wasm_opt::OptimizationLevel::O,
Profile::Release => wasm_opt::OptimizationLevel::O3,
}
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true);
info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true);
if !self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default() != ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> |
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.", ?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn check(&self) -> Result {
Cargo
.cmd()?
.apply(&cargo::Command::Check)
.apply(&cargo::Options::Workspace)
.apply(&cargo::Options::Package(INTEGRATION_TESTS_CRATE_NAME.into()))
.apply(&cargo::Options::AllTargets)
.run_ok()
.await
}
pub async fn test(&self, repo_root: PathBuf, wasm: &[test::Browser], native: bool) -> Result {
async fn maybe_run<Fut: Future<Output = Result>>(
name: &str,
enabled: bool,
f: impl (FnOnce() -> Fut),
) -> Result {
if enabled {
info!("Will run {name} tests.");
f().await.context(format!("Running {name} tests."))
} else {
info!("Skipping {name} tests.");
Ok(())
}
}
maybe_run("native", native, async || {
Cargo
.cmd()?
.current_dir(repo_root.clone())
.apply(&cargo::Command::Test)
.apply(&cargo::Options::Workspace)
// Color needs to be passed to tests themselves separately.
// See: https://github.com/rust-lang/cargo/issues/1983
.arg("--")
.apply(&cargo::Color::Always)
.run_ok()
.await
})
.await?;
maybe_run("wasm", !wasm.is_empty(), || test::test_all(repo_root.clone(), wasm)).await?;
Ok(())
}
pub async fn integration_test(
&self,
source_root: PathBuf,
_project_manager: Option<Child>,
headless: bool,
additional_options: Vec<String>,
wasm_timeout: Option<Duration>,
) -> Result {
info!("Running Rust WASM test suite.");
use wasm_pack::TestFlags::*;
WasmPack
.cmd()?
.current_dir(source_root)
.set_env_opt(
env::WASM_BINDGEN_TEST_TIMEOUT,
wasm_timeout.map(|d| d.as_secs()).as_ref(),
)?
.test()
.apply_opt(headless.then_some(&Headless))
.apply(&test::BROWSER_FOR_WASM_TESTS)
.arg("integration-test")
.arg("--profile=integration-test")
.args(additional_options)
.run_ok()
.await
// PM will be automatically killed by dropping the handle.
}
/// Process "raw" WASM (as compiled) by optionally invoking wasm-opt.
pub async fn finalize_wasm(
wasm_opt_options: &[String],
skip_wasm_opt: bool,
profile: Profile,
temp_dist: &RepoRootDistWasm,
) -> Result {
let should_call_wasm_opt = {
if profile == Profile::Dev {
debug!("Skipping wasm-opt invocation, as it is not part of profile {profile}.");
false
} else if skip_wasm_opt {
debug!("Skipping wasm-opt invocation, as it was explicitly requested.");
false
} else {
true
}
};
if should_call_wasm_opt {
let mut wasm_opt_command = WasmOpt.cmd()?;
let has_custom_opt_level = wasm_opt_options.iter().any(|opt| {
wasm_opt::OptimizationLevel::from_str(opt.trim_start_matches('-')).is_ok()
});
if !has_custom_opt_level {
wasm_opt_command.apply(&profile.optimization_level());
}
wasm_opt_command
.args(wasm_opt_options)
.arg(&temp_dist.pkg_wasm)
.apply(&wasm_opt::Output(&temp_dist.pkg_opt_wasm))
.run_ok()
.await?;
} else {
copy_file_if_different(&temp_dist.pkg_wasm, &temp_dist.pkg_opt_wasm)?;
}
Ok(())
}
}
| {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts = ?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
} | identifier_body |
wasm.rs | //! Wrappers over the Rust part of the IDE codebase.
use crate::prelude::*;
use crate::paths::generated::RepoRootDistWasm;
use crate::paths::generated::RepoRootTargetEnsoglPackLinkedDist;
use crate::project::Context;
use crate::project::IsArtifact;
use crate::project::IsTarget;
use crate::project::IsWatchable;
use crate::source::BuildTargetJob;
use crate::source::WatchTargetJob;
use crate::source::WithDestination;
use derivative::Derivative;
use ide_ci::cache;
use ide_ci::fs::compressed_size;
use ide_ci::fs::copy_file_if_different;
use ide_ci::goodies::shader_tools::ShaderTools;
use ide_ci::programs::cargo;
use ide_ci::programs::wasm_opt;
use ide_ci::programs::wasm_opt::WasmOpt;
use ide_ci::programs::wasm_pack;
use ide_ci::programs::Cargo;
use ide_ci::programs::WasmPack;
use semver::VersionReq;
use std::time::Duration;
use tempfile::tempdir;
use tokio::process::Child;
// ==============
// === Export ===
// ==============
pub mod env;
pub mod test;
pub const BINARYEN_VERSION_TO_INSTALL: u32 = 108;
pub const DEFAULT_INTEGRATION_TESTS_WASM_TIMEOUT: Duration = Duration::from_secs(300);
pub const INTEGRATION_TESTS_CRATE_NAME: &str = "enso-integration-test";
pub const OUTPUT_NAME: &str = "ide";
/// Name of the artifact that will be uploaded as part of CI run.
pub const WASM_ARTIFACT_NAME: &str = "gui_wasm";
pub const DEFAULT_TARGET_CRATE: &str = "app/gui";
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum ProfilingLevel {
#[default]
Objective,
Task,
Detail,
Debug,
}
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum LogLevel {
Error,
#[default]
Warn,
Info,
Debug,
Trace,
}
#[derive(clap::ArgEnum, Clone, Copy, Debug, PartialEq, Eq, strum::Display, strum::AsRefStr)]
#[strum(serialize_all = "kebab-case")]
pub enum Profile {
Dev,
Profile,
Release,
// Production,
}
impl From<Profile> for wasm_pack::Profile {
fn from(profile: Profile) -> Self {
match profile {
Profile::Dev => Self::Dev,
Profile::Profile => Self::Profile,
Profile::Release => Self::Release,
// Profile::Production => Self::Release,
}
}
}
impl Profile {
pub fn should_check_size(self) -> bool {
match self {
Profile::Dev => false,
Profile::Profile => false,
Profile::Release => true,
// Profile::Production => true,
}
}
pub fn extra_rust_options(self) -> Vec<String> {
match self {
// Profile::Production => ["-Clto=fat", "-Ccodegen-units=1", "-Cincremental=false"]
// .into_iter()
// .map(ToString::to_string)
// .collect(),
Profile::Dev | Profile::Profile | Profile::Release => vec![],
}
}
pub fn optimization_level(self) -> wasm_opt::OptimizationLevel {
match self {
Profile::Dev => wasm_opt::OptimizationLevel::O0,
Profile::Profile => wasm_opt::OptimizationLevel::O,
Profile::Release => wasm_opt::OptimizationLevel::O3,
}
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true); | if !self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default() != ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts = ?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.", ?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn check(&self) -> Result {
Cargo
.cmd()?
.apply(&cargo::Command::Check)
.apply(&cargo::Options::Workspace)
.apply(&cargo::Options::Package(INTEGRATION_TESTS_CRATE_NAME.into()))
.apply(&cargo::Options::AllTargets)
.run_ok()
.await
}
pub async fn test(&self, repo_root: PathBuf, wasm: &[test::Browser], native: bool) -> Result {
async fn maybe_run<Fut: Future<Output = Result>>(
name: &str,
enabled: bool,
f: impl (FnOnce() -> Fut),
) -> Result {
if enabled {
info!("Will run {name} tests.");
f().await.context(format!("Running {name} tests."))
} else {
info!("Skipping {name} tests.");
Ok(())
}
}
maybe_run("native", native, async || {
Cargo
.cmd()?
.current_dir(repo_root.clone())
.apply(&cargo::Command::Test)
.apply(&cargo::Options::Workspace)
// Color needs to be passed to tests themselves separately.
// See: https://github.com/rust-lang/cargo/issues/1983
.arg("--")
.apply(&cargo::Color::Always)
.run_ok()
.await
})
.await?;
maybe_run("wasm", !wasm.is_empty(), || test::test_all(repo_root.clone(), wasm)).await?;
Ok(())
}
pub async fn integration_test(
&self,
source_root: PathBuf,
_project_manager: Option<Child>,
headless: bool,
additional_options: Vec<String>,
wasm_timeout: Option<Duration>,
) -> Result {
info!("Running Rust WASM test suite.");
use wasm_pack::TestFlags::*;
WasmPack
.cmd()?
.current_dir(source_root)
.set_env_opt(
env::WASM_BINDGEN_TEST_TIMEOUT,
wasm_timeout.map(|d| d.as_secs()).as_ref(),
)?
.test()
.apply_opt(headless.then_some(&Headless))
.apply(&test::BROWSER_FOR_WASM_TESTS)
.arg("integration-test")
.arg("--profile=integration-test")
.args(additional_options)
.run_ok()
.await
// PM will be automatically killed by dropping the handle.
}
/// Process "raw" WASM (as compiled) by optionally invoking wasm-opt.
pub async fn finalize_wasm(
wasm_opt_options: &[String],
skip_wasm_opt: bool,
profile: Profile,
temp_dist: &RepoRootDistWasm,
) -> Result {
let should_call_wasm_opt = {
if profile == Profile::Dev {
debug!("Skipping wasm-opt invocation, as it is not part of profile {profile}.");
false
} else if skip_wasm_opt {
debug!("Skipping wasm-opt invocation, as it was explicitly requested.");
false
} else {
true
}
};
if should_call_wasm_opt {
let mut wasm_opt_command = WasmOpt.cmd()?;
let has_custom_opt_level = wasm_opt_options.iter().any(|opt| {
wasm_opt::OptimizationLevel::from_str(opt.trim_start_matches('-')).is_ok()
});
if !has_custom_opt_level {
wasm_opt_command.apply(&profile.optimization_level());
}
wasm_opt_command
.args(wasm_opt_options)
.arg(&temp_dist.pkg_wasm)
.apply(&wasm_opt::Output(&temp_dist.pkg_opt_wasm))
.run_ok()
.await?;
} else {
copy_file_if_different(&temp_dist.pkg_wasm, &temp_dist.pkg_opt_wasm)?;
}
Ok(())
}
} | info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true); | random_line_split |
wasm.rs | //! Wrappers over the Rust part of the IDE codebase.
use crate::prelude::*;
use crate::paths::generated::RepoRootDistWasm;
use crate::paths::generated::RepoRootTargetEnsoglPackLinkedDist;
use crate::project::Context;
use crate::project::IsArtifact;
use crate::project::IsTarget;
use crate::project::IsWatchable;
use crate::source::BuildTargetJob;
use crate::source::WatchTargetJob;
use crate::source::WithDestination;
use derivative::Derivative;
use ide_ci::cache;
use ide_ci::fs::compressed_size;
use ide_ci::fs::copy_file_if_different;
use ide_ci::goodies::shader_tools::ShaderTools;
use ide_ci::programs::cargo;
use ide_ci::programs::wasm_opt;
use ide_ci::programs::wasm_opt::WasmOpt;
use ide_ci::programs::wasm_pack;
use ide_ci::programs::Cargo;
use ide_ci::programs::WasmPack;
use semver::VersionReq;
use std::time::Duration;
use tempfile::tempdir;
use tokio::process::Child;
// ==============
// === Export ===
// ==============
pub mod env;
pub mod test;
pub const BINARYEN_VERSION_TO_INSTALL: u32 = 108;
pub const DEFAULT_INTEGRATION_TESTS_WASM_TIMEOUT: Duration = Duration::from_secs(300);
pub const INTEGRATION_TESTS_CRATE_NAME: &str = "enso-integration-test";
pub const OUTPUT_NAME: &str = "ide";
/// Name of the artifact that will be uploaded as part of CI run.
pub const WASM_ARTIFACT_NAME: &str = "gui_wasm";
pub const DEFAULT_TARGET_CRATE: &str = "app/gui";
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum ProfilingLevel {
#[default]
Objective,
Task,
Detail,
Debug,
}
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum LogLevel {
Error,
#[default]
Warn,
Info,
Debug,
Trace,
}
#[derive(clap::ArgEnum, Clone, Copy, Debug, PartialEq, Eq, strum::Display, strum::AsRefStr)]
#[strum(serialize_all = "kebab-case")]
pub enum Profile {
Dev,
Profile,
Release,
// Production,
}
impl From<Profile> for wasm_pack::Profile {
fn from(profile: Profile) -> Self {
match profile {
Profile::Dev => Self::Dev,
Profile::Profile => Self::Profile,
Profile::Release => Self::Release,
// Profile::Production => Self::Release,
}
}
}
impl Profile {
pub fn should_check_size(self) -> bool {
match self {
Profile::Dev => false,
Profile::Profile => false,
Profile::Release => true,
// Profile::Production => true,
}
}
pub fn extra_rust_options(self) -> Vec<String> {
match self {
// Profile::Production => ["-Clto=fat", "-Ccodegen-units=1", "-Cincremental=false"]
// .into_iter()
// .map(ToString::to_string)
// .collect(),
Profile::Dev | Profile::Profile | Profile::Release => vec![],
}
}
pub fn optimization_level(self) -> wasm_opt::OptimizationLevel {
match self {
Profile::Dev => wasm_opt::OptimizationLevel::O0,
Profile::Profile => wasm_opt::OptimizationLevel::O,
Profile::Release => wasm_opt::OptimizationLevel::O3,
}
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true);
info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true);
if !self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default() != ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts = ?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.", ?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn | (&self) -> Result {
Cargo
.cmd()?
.apply(&cargo::Command::Check)
.apply(&cargo::Options::Workspace)
.apply(&cargo::Options::Package(INTEGRATION_TESTS_CRATE_NAME.into()))
.apply(&cargo::Options::AllTargets)
.run_ok()
.await
}
pub async fn test(&self, repo_root: PathBuf, wasm: &[test::Browser], native: bool) -> Result {
async fn maybe_run<Fut: Future<Output = Result>>(
name: &str,
enabled: bool,
f: impl (FnOnce() -> Fut),
) -> Result {
if enabled {
info!("Will run {name} tests.");
f().await.context(format!("Running {name} tests."))
} else {
info!("Skipping {name} tests.");
Ok(())
}
}
maybe_run("native", native, async || {
Cargo
.cmd()?
.current_dir(repo_root.clone())
.apply(&cargo::Command::Test)
.apply(&cargo::Options::Workspace)
// Color needs to be passed to tests themselves separately.
// See: https://github.com/rust-lang/cargo/issues/1983
.arg("--")
.apply(&cargo::Color::Always)
.run_ok()
.await
})
.await?;
maybe_run("wasm", !wasm.is_empty(), || test::test_all(repo_root.clone(), wasm)).await?;
Ok(())
}
pub async fn integration_test(
&self,
source_root: PathBuf,
_project_manager: Option<Child>,
headless: bool,
additional_options: Vec<String>,
wasm_timeout: Option<Duration>,
) -> Result {
info!("Running Rust WASM test suite.");
use wasm_pack::TestFlags::*;
WasmPack
.cmd()?
.current_dir(source_root)
.set_env_opt(
env::WASM_BINDGEN_TEST_TIMEOUT,
wasm_timeout.map(|d| d.as_secs()).as_ref(),
)?
.test()
.apply_opt(headless.then_some(&Headless))
.apply(&test::BROWSER_FOR_WASM_TESTS)
.arg("integration-test")
.arg("--profile=integration-test")
.args(additional_options)
.run_ok()
.await
// PM will be automatically killed by dropping the handle.
}
/// Process "raw" WASM (as compiled) by optionally invoking wasm-opt.
pub async fn finalize_wasm(
wasm_opt_options: &[String],
skip_wasm_opt: bool,
profile: Profile,
temp_dist: &RepoRootDistWasm,
) -> Result {
let should_call_wasm_opt = {
if profile == Profile::Dev {
debug!("Skipping wasm-opt invocation, as it is not part of profile {profile}.");
false
} else if skip_wasm_opt {
debug!("Skipping wasm-opt invocation, as it was explicitly requested.");
false
} else {
true
}
};
if should_call_wasm_opt {
let mut wasm_opt_command = WasmOpt.cmd()?;
let has_custom_opt_level = wasm_opt_options.iter().any(|opt| {
wasm_opt::OptimizationLevel::from_str(opt.trim_start_matches('-')).is_ok()
});
if !has_custom_opt_level {
wasm_opt_command.apply(&profile.optimization_level());
}
wasm_opt_command
.args(wasm_opt_options)
.arg(&temp_dist.pkg_wasm)
.apply(&wasm_opt::Output(&temp_dist.pkg_opt_wasm))
.run_ok()
.await?;
} else {
copy_file_if_different(&temp_dist.pkg_wasm, &temp_dist.pkg_opt_wasm)?;
}
Ok(())
}
}
| check | identifier_name |
wasm.rs | //! Wrappers over the Rust part of the IDE codebase.
use crate::prelude::*;
use crate::paths::generated::RepoRootDistWasm;
use crate::paths::generated::RepoRootTargetEnsoglPackLinkedDist;
use crate::project::Context;
use crate::project::IsArtifact;
use crate::project::IsTarget;
use crate::project::IsWatchable;
use crate::source::BuildTargetJob;
use crate::source::WatchTargetJob;
use crate::source::WithDestination;
use derivative::Derivative;
use ide_ci::cache;
use ide_ci::fs::compressed_size;
use ide_ci::fs::copy_file_if_different;
use ide_ci::goodies::shader_tools::ShaderTools;
use ide_ci::programs::cargo;
use ide_ci::programs::wasm_opt;
use ide_ci::programs::wasm_opt::WasmOpt;
use ide_ci::programs::wasm_pack;
use ide_ci::programs::Cargo;
use ide_ci::programs::WasmPack;
use semver::VersionReq;
use std::time::Duration;
use tempfile::tempdir;
use tokio::process::Child;
// ==============
// === Export ===
// ==============
pub mod env;
pub mod test;
pub const BINARYEN_VERSION_TO_INSTALL: u32 = 108;
pub const DEFAULT_INTEGRATION_TESTS_WASM_TIMEOUT: Duration = Duration::from_secs(300);
pub const INTEGRATION_TESTS_CRATE_NAME: &str = "enso-integration-test";
pub const OUTPUT_NAME: &str = "ide";
/// Name of the artifact that will be uploaded as part of CI run.
pub const WASM_ARTIFACT_NAME: &str = "gui_wasm";
pub const DEFAULT_TARGET_CRATE: &str = "app/gui";
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum ProfilingLevel {
#[default]
Objective,
Task,
Detail,
Debug,
}
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum LogLevel {
Error,
#[default]
Warn,
Info,
Debug,
Trace,
}
#[derive(clap::ArgEnum, Clone, Copy, Debug, PartialEq, Eq, strum::Display, strum::AsRefStr)]
#[strum(serialize_all = "kebab-case")]
pub enum Profile {
Dev,
Profile,
Release,
// Production,
}
impl From<Profile> for wasm_pack::Profile {
fn from(profile: Profile) -> Self {
match profile {
Profile::Dev => Self::Dev,
Profile::Profile => Self::Profile,
Profile::Release => Self::Release,
// Profile::Production => Self::Release,
}
}
}
impl Profile {
pub fn should_check_size(self) -> bool {
match self {
Profile::Dev => false,
Profile::Profile => false,
Profile::Release => true,
// Profile::Production => true,
}
}
pub fn extra_rust_options(self) -> Vec<String> {
match self {
// Profile::Production => ["-Clto=fat", "-Ccodegen-units=1", "-Cincremental=false"]
// .into_iter()
// .map(ToString::to_string)
// .collect(),
Profile::Dev | Profile::Profile | Profile::Release => vec![],
}
}
pub fn optimization_level(self) -> wasm_opt::OptimizationLevel {
match self {
Profile::Dev => wasm_opt::OptimizationLevel::O0,
Profile::Profile => wasm_opt::OptimizationLevel::O,
Profile::Release => wasm_opt::OptimizationLevel::O3,
}
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true);
info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true);
if !self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default() != ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts = ?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.", ?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn check(&self) -> Result {
Cargo
.cmd()?
.apply(&cargo::Command::Check)
.apply(&cargo::Options::Workspace)
.apply(&cargo::Options::Package(INTEGRATION_TESTS_CRATE_NAME.into()))
.apply(&cargo::Options::AllTargets)
.run_ok()
.await
}
pub async fn test(&self, repo_root: PathBuf, wasm: &[test::Browser], native: bool) -> Result {
async fn maybe_run<Fut: Future<Output = Result>>(
name: &str,
enabled: bool,
f: impl (FnOnce() -> Fut),
) -> Result {
if enabled {
info!("Will run {name} tests.");
f().await.context(format!("Running {name} tests."))
} else {
info!("Skipping {name} tests.");
Ok(())
}
}
maybe_run("native", native, async || {
Cargo
.cmd()?
.current_dir(repo_root.clone())
.apply(&cargo::Command::Test)
.apply(&cargo::Options::Workspace)
// Color needs to be passed to tests themselves separately.
// See: https://github.com/rust-lang/cargo/issues/1983
.arg("--")
.apply(&cargo::Color::Always)
.run_ok()
.await
})
.await?;
maybe_run("wasm", !wasm.is_empty(), || test::test_all(repo_root.clone(), wasm)).await?;
Ok(())
}
pub async fn integration_test(
&self,
source_root: PathBuf,
_project_manager: Option<Child>,
headless: bool,
additional_options: Vec<String>,
wasm_timeout: Option<Duration>,
) -> Result {
info!("Running Rust WASM test suite.");
use wasm_pack::TestFlags::*;
WasmPack
.cmd()?
.current_dir(source_root)
.set_env_opt(
env::WASM_BINDGEN_TEST_TIMEOUT,
wasm_timeout.map(|d| d.as_secs()).as_ref(),
)?
.test()
.apply_opt(headless.then_some(&Headless))
.apply(&test::BROWSER_FOR_WASM_TESTS)
.arg("integration-test")
.arg("--profile=integration-test")
.args(additional_options)
.run_ok()
.await
// PM will be automatically killed by dropping the handle.
}
/// Process "raw" WASM (as compiled) by optionally invoking wasm-opt.
pub async fn finalize_wasm(
wasm_opt_options: &[String],
skip_wasm_opt: bool,
profile: Profile,
temp_dist: &RepoRootDistWasm,
) -> Result {
let should_call_wasm_opt = {
if profile == Profile::Dev {
debug!("Skipping wasm-opt invocation, as it is not part of profile {profile}.");
false
} else if skip_wasm_opt {
debug!("Skipping wasm-opt invocation, as it was explicitly requested.");
false
} else {
true
}
};
if should_call_wasm_opt | else {
copy_file_if_different(&temp_dist.pkg_wasm, &temp_dist.pkg_opt_wasm)?;
}
Ok(())
}
}
| {
let mut wasm_opt_command = WasmOpt.cmd()?;
let has_custom_opt_level = wasm_opt_options.iter().any(|opt| {
wasm_opt::OptimizationLevel::from_str(opt.trim_start_matches('-')).is_ok()
});
if !has_custom_opt_level {
wasm_opt_command.apply(&profile.optimization_level());
}
wasm_opt_command
.args(wasm_opt_options)
.arg(&temp_dist.pkg_wasm)
.apply(&wasm_opt::Output(&temp_dist.pkg_opt_wasm))
.run_ok()
.await?;
} | conditional_block |
server.rs | use std::thread;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::collections::VecDeque;
use std::io::BufReader;
// MIO
use mio::tcp::{listen, TcpListener, TcpStream};
use mio::util::Slab;
use mio::Socket;
use mio::buf::{RingBuf};
use mio::{Interest, PollOpt, NonBlock, Token, EventLoop, Handler, ReadHint};
use mio::buf::Buf;
use mio::{TryRead, TryWrite};
use rand::{self, Rng};
// Data structures.
use store::Store;
use replica::{Replica, Emit, Broadcast};
use state_machine::StateMachine;
// Cap'n Proto
use capnp::serialize_packed;
use capnp::{
MessageBuilder,
MessageReader,
ReaderOptions,
MallocMessageBuilder,
OwnedSpaceMessageReader,
};
use messages_capnp::{
rpc_request,
rpc_response,
client_request,
client_response,
request_vote_response,
append_entries_response,
append_entries_request,
};
use super::{Error, Result};
// MIO Tokens
const ELECTION_TIMEOUT: Token = Token(0);
const HEARTBEAT_TIMEOUT: Token = Token(1);
const LISTENER: Token = Token(2);
const ELECTION_MIN: u64 = 150;
const ELECTION_MAX: u64 = 300;
const HEARTBEAT_DURATION: u64 = 50;
const RINGBUF_SIZE: usize = 4096;
/// The Raft Distributed Consensus Algorithm requires two RPC calls to be available:
///
/// * `append_entries` which is used as both a heartbeat (with no payload) and the primary
/// interface for requests.
/// * `request_vote` which is used by candidates during campaigns to obtain a vote.
///
/// A `Server` acts as a replicated state machine. The server's role in the cluster depends on it's
/// own status. It will maintain both volatile state (which can be safely lost) and persistent
/// state (which must be carefully stored and kept safe).
///
/// Currently, the `Server` API is not well defined. **We are looking for feedback and suggestions.**
pub struct Server<S, M> where S: Store, M: StateMachine {
replica: Replica<S, M>,
// Channels and Sockets
listener: NonBlock<TcpListener>,
connections: Slab<Connection>,
}
/// The implementation of the Server. In most use cases, creating a `Server` should just be
/// done via `::new()`.
impl<S, M> Server<S, M> where S: Store, M: StateMachine {
/// Creates a new Raft node with the cluster members specified.
///
/// # Arguments
///
/// * `addr` - The address of the new node.
/// * `peers` - The address of all peers in the Raft cluster.
/// * `store` - The persitent log store.
/// * `state_machine` - The client state machine to which client commands will be applied.
pub fn spawn(addr: SocketAddr,
peers: HashSet<SocketAddr>,
store: S,
state_machine: M) {
debug!("Spawning Server");
// Create an event loop
let mut event_loop = EventLoop::<Server<S, M>>::new().unwrap();
// Setup the socket, make it not block.
let listener = listen(&addr).unwrap();
listener.set_reuseaddr(true).unwrap();
event_loop.register(&listener, LISTENER).unwrap();
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
event_loop.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
event_loop.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
let replica = Replica::new(addr, peers, store, state_machine);
// Fire up the thread.
thread::Builder::new().name(format!("Server {}", addr)).spawn(move || {
let mut raft_node = Server {
listener: listener,
replica: replica,
connections: Slab::new_starting_at(Token(2), 128),
};
event_loop.run(&mut raft_node).unwrap();
}).unwrap();
}
}
impl<S, M> Handler for Server<S, M> where S: Store, M: StateMachine {
type Message = RingBuf;
type Timeout = Token;
/// A registered IoHandle has available writing space.
fn writable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Writeable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => unreachable!(),
tok => {
self.connections[tok].writable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered IoHandle has available data to read
fn readable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token, _hint: ReadHint) {
debug!("Readable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => {
let stream = match self.listener.accept().unwrap() {
Some(s) => s,
None => return, // Socket isn't quite ready.
}; // Result<Option<_>,_>
let conn = Connection::new(stream);
let tok = self.connections.insert(conn)
.ok().expect("Could not add connection to slab.");
// Register the connection
self.connections[tok].token = tok;
reactor.register_opt(&self.connections[tok].stream, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("Could not register socket with event loop.");
},
tok => {
self.connections[tok].readable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered timer has expired. This is either:
///
/// * An election timeout, when a `Follower` node has waited too long for a heartbeat and doing
/// to become a `Candidate`.
/// * A heartbeat timeout, when the `Leader` node needs to refresh it's authority over the
/// followers. Initializes and sends an `AppendEntries` request to all followers.
fn timeout(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Timeout");
let mut message = MallocMessageBuilder::new_default();
let mut send_message = None;
match token {
ELECTION_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.election_timeout(request.init_request_vote());
// Set timeout.
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
reactor.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
},
HEARTBEAT_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.heartbeat_timeout(request.init_append_entries());
// Set Timeout
reactor.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
},
_ => unreachable!(),
}
// Send if necessary.
match send_message {
Some(Broadcast) => {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut message
).unwrap();
for connection in self.connections.iter_mut() {
connection.add_write(buf.clone());
}
},
None => (),
}
}
}
struct Connection {
stream: NonBlock<TcpStream>,
token: Token,
interest: Interest,
current_read: BufReader<RingBuf>,
current_write: BufReader<RingBuf>,
next_write: VecDeque<RingBuf>,
}
impl Connection {
/// Note: The caller must manually assign `token` to what is desired.
fn new(sock: NonBlock<TcpStream>) -> Connection {
Connection {
stream: sock,
token: Token(0), // Effectively a `null`. This needs to be assigned by the caller.
interest: Interest::hup(),
current_read: BufReader::new(RingBuf::new(4096)),
current_write: BufReader::new(RingBuf::new(4096)),
next_write: VecDeque::with_capacity(10),
}
}
/// A registered IoHandle has available writing space.
fn | <S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
// Attempt to write data.
// The `current_write` buffer will be advanced based on how much we wrote.
match self.stream.write(self.current_write.get_mut()) {
Ok(None) => {
// This is a buffer flush. WOULDBLOCK
self.interest.insert(Interest::writable());
},
Ok(Some(r)) => {
// We managed to write data!
match (self.current_write.get_ref().has_remaining(), self.next_write.is_empty()) {
// Need to write more of what we have.
(true, _) => (),
// Need to roll over.
(false, false) => self.current_write = BufReader::new(self.next_write.pop_front().unwrap()),
// We're done writing for now.
(false, true) => self.interest.remove(Interest::writable()),
}
},
Err(e) => return Err(Error::from(e)),
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// A registered IoHandle has available data to read.
/// This does not necessarily mean that there is an entire packed item on the stream. We could
/// get some, all of it, or none. We'll use the buffer to read in until we can find one.
fn readable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
let mut read = 0;
match self.stream.read(self.current_read.get_mut()) {
Ok(Some(r)) => {
// Just read `r` bytes.
read = r;
},
Ok(None) => panic!("We just got readable, but were unable to read from the socket?"),
Err(e) => return Err(Error::from(e)),
};
if read > 0 {
match serialize_packed::read_message(&mut self.current_read, ReaderOptions::new()) {
// We have something reasonably interesting in the buffer!
Ok(reader) => {
self.handle_reader(reader, event_loop, replica);
},
// It's not read entirely yet.
// Should roll back, pending changes to bytes upstream.
// TODO: This was fixed.
Err(_) => unimplemented!(),
}
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// This is called when there is a full reader available in the buffer.
/// It handles what to do with the data.
fn handle_reader<S, M>(&mut self, reader: OwnedSpaceMessageReader,
event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
where S: Store, M: StateMachine {
let mut builder_message = MallocMessageBuilder::new_default();
let from = self.stream.peer_addr().unwrap();
if let Ok(request) = reader.get_root::<rpc_request::Reader>() {
match request.which().unwrap() {
// TODO: Move these into replica?
rpc_request::Which::AppendEntries(Ok(call)) => {
let builder = builder_message.init_root::<append_entries_response::Builder>();
match replica.append_entries_request(from, call, builder) {
Some(Emit) => {
// Special Cirsumstance Detection
unimplemented!();
},
None => (),
}
},
rpc_request::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<request_vote_response::Builder>();
replica.request_vote_request(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
} else if let Ok(response) = reader.get_root::<rpc_response::Reader>() {
// We won't be responding. This is already a response.
match response.which().unwrap() {
rpc_response::Which::AppendEntries(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.append_entries_response(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
rpc_response::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.request_vote_response(from, call, builder)
};
match respond {
Some(Broadcast) => {
// Won an election!
self.broadcast(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
}
} else if let Ok(client_req) = reader.get_root::<client_request::Reader>() {
let mut should_die = false;
// We will be responding.
match client_req.which().unwrap() {
client_request::Which::Append(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_append(from, call, builder)
};
match respond {
Some(emit) => {
self.emit(builder_message);
},
None => (),
}
},
client_request::Which::Die(Ok(call)) => {
should_die = true;
let mut builder = builder_message.init_root::<client_response::Builder>();
builder.set_success(());
self.interest.insert(Interest::writable());
debug!("Got a Die request from Client({}). Reason: {}", from, call);
},
client_request::Which::LeaderRefresh(()) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_leader_refresh(from, builder)
};
match respond {
Some(Emit) => {
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
// Do this here so that we can send the response.
if should_die {
panic!("Got a Die request.");
}
} else {
// It's something we don't understand.
unimplemented!();
}
}
fn broadcast(&mut self, builder: MallocMessageBuilder) {
unimplemented!();
}
/// Push the new message into `self.next_write`. This does not actually send the message, it
/// just queues it up.
pub fn emit(&mut self, mut builder: MallocMessageBuilder) {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut builder
).unwrap();
self.add_write(buf);
}
/// This queues a byte buffer into the write queue. This is used primarily when message has
/// already been packed.
pub fn add_write(&mut self, buf: RingBuf) {
self.next_write.push_back(buf);
}
}
| writable | identifier_name |
server.rs | use std::thread;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::collections::VecDeque;
use std::io::BufReader;
// MIO
use mio::tcp::{listen, TcpListener, TcpStream};
use mio::util::Slab;
use mio::Socket;
use mio::buf::{RingBuf};
use mio::{Interest, PollOpt, NonBlock, Token, EventLoop, Handler, ReadHint};
use mio::buf::Buf;
use mio::{TryRead, TryWrite};
use rand::{self, Rng};
// Data structures.
use store::Store;
use replica::{Replica, Emit, Broadcast};
use state_machine::StateMachine;
// Cap'n Proto
use capnp::serialize_packed;
use capnp::{
MessageBuilder,
MessageReader,
ReaderOptions,
MallocMessageBuilder,
OwnedSpaceMessageReader,
};
use messages_capnp::{
rpc_request,
rpc_response,
client_request,
client_response,
request_vote_response,
append_entries_response,
append_entries_request,
};
use super::{Error, Result};
// MIO Tokens
const ELECTION_TIMEOUT: Token = Token(0);
const HEARTBEAT_TIMEOUT: Token = Token(1);
const LISTENER: Token = Token(2);
const ELECTION_MIN: u64 = 150;
const ELECTION_MAX: u64 = 300;
const HEARTBEAT_DURATION: u64 = 50;
const RINGBUF_SIZE: usize = 4096;
/// The Raft Distributed Consensus Algorithm requires two RPC calls to be available:
///
/// * `append_entries` which is used as both a heartbeat (with no payload) and the primary
/// interface for requests.
/// * `request_vote` which is used by candidates during campaigns to obtain a vote.
///
/// A `Server` acts as a replicated state machine. The server's role in the cluster depends on it's
/// own status. It will maintain both volatile state (which can be safely lost) and persistent
/// state (which must be carefully stored and kept safe).
///
/// Currently, the `Server` API is not well defined. **We are looking for feedback and suggestions.**
pub struct Server<S, M> where S: Store, M: StateMachine {
replica: Replica<S, M>,
// Channels and Sockets
listener: NonBlock<TcpListener>,
connections: Slab<Connection>,
}
/// The implementation of the Server. In most use cases, creating a `Server` should just be
/// done via `::new()`.
impl<S, M> Server<S, M> where S: Store, M: StateMachine {
/// Creates a new Raft node with the cluster members specified.
///
/// # Arguments
///
/// * `addr` - The address of the new node.
/// * `peers` - The address of all peers in the Raft cluster.
/// * `store` - The persitent log store.
/// * `state_machine` - The client state machine to which client commands will be applied.
pub fn spawn(addr: SocketAddr,
peers: HashSet<SocketAddr>,
store: S,
state_machine: M) {
debug!("Spawning Server");
// Create an event loop
let mut event_loop = EventLoop::<Server<S, M>>::new().unwrap();
// Setup the socket, make it not block.
let listener = listen(&addr).unwrap();
listener.set_reuseaddr(true).unwrap();
event_loop.register(&listener, LISTENER).unwrap();
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
event_loop.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
event_loop.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
let replica = Replica::new(addr, peers, store, state_machine);
// Fire up the thread.
thread::Builder::new().name(format!("Server {}", addr)).spawn(move || {
let mut raft_node = Server {
listener: listener,
replica: replica,
connections: Slab::new_starting_at(Token(2), 128),
};
event_loop.run(&mut raft_node).unwrap();
}).unwrap();
}
}
impl<S, M> Handler for Server<S, M> where S: Store, M: StateMachine {
type Message = RingBuf;
type Timeout = Token;
/// A registered IoHandle has available writing space.
fn writable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Writeable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => unreachable!(),
tok => {
self.connections[tok].writable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered IoHandle has available data to read
fn readable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token, _hint: ReadHint) {
debug!("Readable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => {
let stream = match self.listener.accept().unwrap() {
Some(s) => s,
None => return, // Socket isn't quite ready.
}; // Result<Option<_>,_>
let conn = Connection::new(stream);
let tok = self.connections.insert(conn)
.ok().expect("Could not add connection to slab.");
// Register the connection
self.connections[tok].token = tok;
reactor.register_opt(&self.connections[tok].stream, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("Could not register socket with event loop.");
},
tok => {
self.connections[tok].readable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered timer has expired. This is either:
///
/// * An election timeout, when a `Follower` node has waited too long for a heartbeat and doing
/// to become a `Candidate`.
/// * A heartbeat timeout, when the `Leader` node needs to refresh it's authority over the
/// followers. Initializes and sends an `AppendEntries` request to all followers.
fn timeout(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Timeout");
let mut message = MallocMessageBuilder::new_default();
let mut send_message = None;
match token {
ELECTION_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.election_timeout(request.init_request_vote());
// Set timeout.
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
reactor.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
},
HEARTBEAT_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.heartbeat_timeout(request.init_append_entries());
// Set Timeout
reactor.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
},
_ => unreachable!(),
}
// Send if necessary.
match send_message {
Some(Broadcast) => {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut message
).unwrap();
for connection in self.connections.iter_mut() {
connection.add_write(buf.clone());
}
},
None => (),
}
}
}
struct Connection {
stream: NonBlock<TcpStream>,
token: Token,
interest: Interest,
current_read: BufReader<RingBuf>,
current_write: BufReader<RingBuf>,
next_write: VecDeque<RingBuf>,
}
impl Connection {
/// Note: The caller must manually assign `token` to what is desired.
fn new(sock: NonBlock<TcpStream>) -> Connection {
Connection {
stream: sock,
token: Token(0), // Effectively a `null`. This needs to be assigned by the caller.
interest: Interest::hup(),
current_read: BufReader::new(RingBuf::new(4096)),
current_write: BufReader::new(RingBuf::new(4096)),
next_write: VecDeque::with_capacity(10),
}
}
/// A registered IoHandle has available writing space.
fn writable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
// Attempt to write data.
// The `current_write` buffer will be advanced based on how much we wrote.
match self.stream.write(self.current_write.get_mut()) {
Ok(None) => {
// This is a buffer flush. WOULDBLOCK
self.interest.insert(Interest::writable());
},
Ok(Some(r)) => {
// We managed to write data!
match (self.current_write.get_ref().has_remaining(), self.next_write.is_empty()) {
// Need to write more of what we have.
(true, _) => (),
// Need to roll over.
(false, false) => self.current_write = BufReader::new(self.next_write.pop_front().unwrap()),
// We're done writing for now.
(false, true) => self.interest.remove(Interest::writable()),
}
},
Err(e) => return Err(Error::from(e)),
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// A registered IoHandle has available data to read.
/// This does not necessarily mean that there is an entire packed item on the stream. We could
/// get some, all of it, or none. We'll use the buffer to read in until we can find one. | -> Result<()>
where S: Store, M: StateMachine {
let mut read = 0;
match self.stream.read(self.current_read.get_mut()) {
Ok(Some(r)) => {
// Just read `r` bytes.
read = r;
},
Ok(None) => panic!("We just got readable, but were unable to read from the socket?"),
Err(e) => return Err(Error::from(e)),
};
if read > 0 {
match serialize_packed::read_message(&mut self.current_read, ReaderOptions::new()) {
// We have something reasonably interesting in the buffer!
Ok(reader) => {
self.handle_reader(reader, event_loop, replica);
},
// It's not read entirely yet.
// Should roll back, pending changes to bytes upstream.
// TODO: This was fixed.
Err(_) => unimplemented!(),
}
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// This is called when there is a full reader available in the buffer.
/// It handles what to do with the data.
fn handle_reader<S, M>(&mut self, reader: OwnedSpaceMessageReader,
event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
where S: Store, M: StateMachine {
let mut builder_message = MallocMessageBuilder::new_default();
let from = self.stream.peer_addr().unwrap();
if let Ok(request) = reader.get_root::<rpc_request::Reader>() {
match request.which().unwrap() {
// TODO: Move these into replica?
rpc_request::Which::AppendEntries(Ok(call)) => {
let builder = builder_message.init_root::<append_entries_response::Builder>();
match replica.append_entries_request(from, call, builder) {
Some(Emit) => {
// Special Cirsumstance Detection
unimplemented!();
},
None => (),
}
},
rpc_request::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<request_vote_response::Builder>();
replica.request_vote_request(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
} else if let Ok(response) = reader.get_root::<rpc_response::Reader>() {
// We won't be responding. This is already a response.
match response.which().unwrap() {
rpc_response::Which::AppendEntries(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.append_entries_response(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
rpc_response::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.request_vote_response(from, call, builder)
};
match respond {
Some(Broadcast) => {
// Won an election!
self.broadcast(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
}
} else if let Ok(client_req) = reader.get_root::<client_request::Reader>() {
let mut should_die = false;
// We will be responding.
match client_req.which().unwrap() {
client_request::Which::Append(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_append(from, call, builder)
};
match respond {
Some(emit) => {
self.emit(builder_message);
},
None => (),
}
},
client_request::Which::Die(Ok(call)) => {
should_die = true;
let mut builder = builder_message.init_root::<client_response::Builder>();
builder.set_success(());
self.interest.insert(Interest::writable());
debug!("Got a Die request from Client({}). Reason: {}", from, call);
},
client_request::Which::LeaderRefresh(()) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_leader_refresh(from, builder)
};
match respond {
Some(Emit) => {
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
// Do this here so that we can send the response.
if should_die {
panic!("Got a Die request.");
}
} else {
// It's something we don't understand.
unimplemented!();
}
}
fn broadcast(&mut self, builder: MallocMessageBuilder) {
unimplemented!();
}
/// Push the new message into `self.next_write`. This does not actually send the message, it
/// just queues it up.
pub fn emit(&mut self, mut builder: MallocMessageBuilder) {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut builder
).unwrap();
self.add_write(buf);
}
/// This queues a byte buffer into the write queue. This is used primarily when message has
/// already been packed.
pub fn add_write(&mut self, buf: RingBuf) {
self.next_write.push_back(buf);
}
} | fn readable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>) | random_line_split |
server.rs | use std::thread;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::collections::VecDeque;
use std::io::BufReader;
// MIO
use mio::tcp::{listen, TcpListener, TcpStream};
use mio::util::Slab;
use mio::Socket;
use mio::buf::{RingBuf};
use mio::{Interest, PollOpt, NonBlock, Token, EventLoop, Handler, ReadHint};
use mio::buf::Buf;
use mio::{TryRead, TryWrite};
use rand::{self, Rng};
// Data structures.
use store::Store;
use replica::{Replica, Emit, Broadcast};
use state_machine::StateMachine;
// Cap'n Proto
use capnp::serialize_packed;
use capnp::{
MessageBuilder,
MessageReader,
ReaderOptions,
MallocMessageBuilder,
OwnedSpaceMessageReader,
};
use messages_capnp::{
rpc_request,
rpc_response,
client_request,
client_response,
request_vote_response,
append_entries_response,
append_entries_request,
};
use super::{Error, Result};
// MIO Tokens
const ELECTION_TIMEOUT: Token = Token(0);
const HEARTBEAT_TIMEOUT: Token = Token(1);
const LISTENER: Token = Token(2);
const ELECTION_MIN: u64 = 150;
const ELECTION_MAX: u64 = 300;
const HEARTBEAT_DURATION: u64 = 50;
const RINGBUF_SIZE: usize = 4096;
/// The Raft Distributed Consensus Algorithm requires two RPC calls to be available:
///
/// * `append_entries` which is used as both a heartbeat (with no payload) and the primary
/// interface for requests.
/// * `request_vote` which is used by candidates during campaigns to obtain a vote.
///
/// A `Server` acts as a replicated state machine. The server's role in the cluster depends on it's
/// own status. It will maintain both volatile state (which can be safely lost) and persistent
/// state (which must be carefully stored and kept safe).
///
/// Currently, the `Server` API is not well defined. **We are looking for feedback and suggestions.**
pub struct Server<S, M> where S: Store, M: StateMachine {
replica: Replica<S, M>,
// Channels and Sockets
listener: NonBlock<TcpListener>,
connections: Slab<Connection>,
}
/// The implementation of the Server. In most use cases, creating a `Server` should just be
/// done via `::new()`.
impl<S, M> Server<S, M> where S: Store, M: StateMachine {
/// Creates a new Raft node with the cluster members specified.
///
/// # Arguments
///
/// * `addr` - The address of the new node.
/// * `peers` - The address of all peers in the Raft cluster.
/// * `store` - The persitent log store.
/// * `state_machine` - The client state machine to which client commands will be applied.
pub fn spawn(addr: SocketAddr,
peers: HashSet<SocketAddr>,
store: S,
state_machine: M) {
debug!("Spawning Server");
// Create an event loop
let mut event_loop = EventLoop::<Server<S, M>>::new().unwrap();
// Setup the socket, make it not block.
let listener = listen(&addr).unwrap();
listener.set_reuseaddr(true).unwrap();
event_loop.register(&listener, LISTENER).unwrap();
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
event_loop.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
event_loop.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
let replica = Replica::new(addr, peers, store, state_machine);
// Fire up the thread.
thread::Builder::new().name(format!("Server {}", addr)).spawn(move || {
let mut raft_node = Server {
listener: listener,
replica: replica,
connections: Slab::new_starting_at(Token(2), 128),
};
event_loop.run(&mut raft_node).unwrap();
}).unwrap();
}
}
impl<S, M> Handler for Server<S, M> where S: Store, M: StateMachine {
type Message = RingBuf;
type Timeout = Token;
/// A registered IoHandle has available writing space.
fn writable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Writeable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => unreachable!(),
tok => {
self.connections[tok].writable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered IoHandle has available data to read
fn readable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token, _hint: ReadHint) {
debug!("Readable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => {
let stream = match self.listener.accept().unwrap() {
Some(s) => s,
None => return, // Socket isn't quite ready.
}; // Result<Option<_>,_>
let conn = Connection::new(stream);
let tok = self.connections.insert(conn)
.ok().expect("Could not add connection to slab.");
// Register the connection
self.connections[tok].token = tok;
reactor.register_opt(&self.connections[tok].stream, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("Could not register socket with event loop.");
},
tok => {
self.connections[tok].readable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered timer has expired. This is either:
///
/// * An election timeout, when a `Follower` node has waited too long for a heartbeat and doing
/// to become a `Candidate`.
/// * A heartbeat timeout, when the `Leader` node needs to refresh it's authority over the
/// followers. Initializes and sends an `AppendEntries` request to all followers.
fn timeout(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Timeout");
let mut message = MallocMessageBuilder::new_default();
let mut send_message = None;
match token {
ELECTION_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.election_timeout(request.init_request_vote());
// Set timeout.
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
reactor.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
},
HEARTBEAT_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.heartbeat_timeout(request.init_append_entries());
// Set Timeout
reactor.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
},
_ => unreachable!(),
}
// Send if necessary.
match send_message {
Some(Broadcast) => {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut message
).unwrap();
for connection in self.connections.iter_mut() {
connection.add_write(buf.clone());
}
},
None => (),
}
}
}
struct Connection {
stream: NonBlock<TcpStream>,
token: Token,
interest: Interest,
current_read: BufReader<RingBuf>,
current_write: BufReader<RingBuf>,
next_write: VecDeque<RingBuf>,
}
impl Connection {
/// Note: The caller must manually assign `token` to what is desired.
fn new(sock: NonBlock<TcpStream>) -> Connection {
Connection {
stream: sock,
token: Token(0), // Effectively a `null`. This needs to be assigned by the caller.
interest: Interest::hup(),
current_read: BufReader::new(RingBuf::new(4096)),
current_write: BufReader::new(RingBuf::new(4096)),
next_write: VecDeque::with_capacity(10),
}
}
/// A registered IoHandle has available writing space.
fn writable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
// Attempt to write data.
// The `current_write` buffer will be advanced based on how much we wrote.
match self.stream.write(self.current_write.get_mut()) {
Ok(None) => {
// This is a buffer flush. WOULDBLOCK
self.interest.insert(Interest::writable());
},
Ok(Some(r)) => {
// We managed to write data!
match (self.current_write.get_ref().has_remaining(), self.next_write.is_empty()) {
// Need to write more of what we have.
(true, _) => (),
// Need to roll over.
(false, false) => self.current_write = BufReader::new(self.next_write.pop_front().unwrap()),
// We're done writing for now.
(false, true) => self.interest.remove(Interest::writable()),
}
},
Err(e) => return Err(Error::from(e)),
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// A registered IoHandle has available data to read.
/// This does not necessarily mean that there is an entire packed item on the stream. We could
/// get some, all of it, or none. We'll use the buffer to read in until we can find one.
fn readable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
let mut read = 0;
match self.stream.read(self.current_read.get_mut()) {
Ok(Some(r)) => {
// Just read `r` bytes.
read = r;
},
Ok(None) => panic!("We just got readable, but were unable to read from the socket?"),
Err(e) => return Err(Error::from(e)),
};
if read > 0 {
match serialize_packed::read_message(&mut self.current_read, ReaderOptions::new()) {
// We have something reasonably interesting in the buffer!
Ok(reader) => {
self.handle_reader(reader, event_loop, replica);
},
// It's not read entirely yet.
// Should roll back, pending changes to bytes upstream.
// TODO: This was fixed.
Err(_) => unimplemented!(),
}
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// This is called when there is a full reader available in the buffer.
/// It handles what to do with the data.
fn handle_reader<S, M>(&mut self, reader: OwnedSpaceMessageReader,
event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
where S: Store, M: StateMachine {
let mut builder_message = MallocMessageBuilder::new_default();
let from = self.stream.peer_addr().unwrap();
if let Ok(request) = reader.get_root::<rpc_request::Reader>() {
match request.which().unwrap() {
// TODO: Move these into replica?
rpc_request::Which::AppendEntries(Ok(call)) => {
let builder = builder_message.init_root::<append_entries_response::Builder>();
match replica.append_entries_request(from, call, builder) {
Some(Emit) => {
// Special Cirsumstance Detection
unimplemented!();
},
None => (),
}
},
rpc_request::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<request_vote_response::Builder>();
replica.request_vote_request(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
} else if let Ok(response) = reader.get_root::<rpc_response::Reader>() {
// We won't be responding. This is already a response.
match response.which().unwrap() {
rpc_response::Which::AppendEntries(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.append_entries_response(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
rpc_response::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.request_vote_response(from, call, builder)
};
match respond {
Some(Broadcast) => | ,
None => (),
}
},
_ => unimplemented!(),
}
} else if let Ok(client_req) = reader.get_root::<client_request::Reader>() {
let mut should_die = false;
// We will be responding.
match client_req.which().unwrap() {
client_request::Which::Append(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_append(from, call, builder)
};
match respond {
Some(emit) => {
self.emit(builder_message);
},
None => (),
}
},
client_request::Which::Die(Ok(call)) => {
should_die = true;
let mut builder = builder_message.init_root::<client_response::Builder>();
builder.set_success(());
self.interest.insert(Interest::writable());
debug!("Got a Die request from Client({}). Reason: {}", from, call);
},
client_request::Which::LeaderRefresh(()) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_leader_refresh(from, builder)
};
match respond {
Some(Emit) => {
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
// Do this here so that we can send the response.
if should_die {
panic!("Got a Die request.");
}
} else {
// It's something we don't understand.
unimplemented!();
}
}
fn broadcast(&mut self, builder: MallocMessageBuilder) {
unimplemented!();
}
/// Push the new message into `self.next_write`. This does not actually send the message, it
/// just queues it up.
pub fn emit(&mut self, mut builder: MallocMessageBuilder) {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut builder
).unwrap();
self.add_write(buf);
}
/// This queues a byte buffer into the write queue. This is used primarily when message has
/// already been packed.
pub fn add_write(&mut self, buf: RingBuf) {
self.next_write.push_back(buf);
}
}
| {
// Won an election!
self.broadcast(builder_message);
} | conditional_block |
group.rs | //! Contains methods to generate many symmetry groups.
// Circumvents rust-analyzer bug.
#[allow(unused_imports)]
use crate::{cox, EPS};
use super::{convex, cox::CoxMatrix, geometry::Point, Concrete};
use approx::{abs_diff_ne, relative_eq};
use nalgebra::{
storage::Storage, DMatrix as Matrix, DVector as Vector, Dim, Dynamic, Quaternion, VecStorage,
U1,
};
use std::{
collections::{BTreeMap, BTreeSet, VecDeque},
f64::consts::PI,
};
/// Converts a 3D rotation matrix into a quaternion. Uses the code from
/// [Day (2015)](https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf).
fn mat_to_quat(mat: Matrix<f64>) -> Quaternion<f64> {
debug_assert!(
relative_eq!(mat.determinant(), 1.0, epsilon = EPS),
"Only matrices with determinant 1 can be turned into quaternions."
);
let t;
let q;
if mat[(2, 2)] < 0.0 {
if mat[(0, 0)] > mat[(1, 1)] {
t = 1.0 + mat[(0, 0)] - mat[(1, 1)] - mat[(2, 2)];
q = Quaternion::new(
t,
mat[(0, 1)] + mat[(1, 0)],
mat[(2, 0)] + mat[(0, 2)],
mat[(1, 2)] - mat[(2, 1)],
);
} else {
t = 1.0 - mat[(0, 0)] + mat[(1, 1)] - mat[(2, 2)];
q = Quaternion::new(
mat[(0, 1)] + mat[(1, 0)],
t,
mat[(1, 2)] + mat[(2, 1)],
mat[(2, 0)] - mat[(0, 2)],
);
}
} else if mat[(0, 0)] < -mat[(1, 1)] {
t = 1.0 - mat[(0, 0)] - mat[(1, 1)] + mat[(2, 2)];
q = Quaternion::new(
mat[(2, 0)] + mat[(0, 2)],
mat[(1, 2)] + mat[(2, 1)],
t,
mat[(0, 1)] - mat[(1, 0)],
);
} else {
t = 1.0 + mat[(0, 0)] + mat[(1, 1)] + mat[(2, 2)];
q = Quaternion::new(
mat[(1, 2)] - mat[(2, 1)],
mat[(2, 0)] - mat[(0, 2)],
mat[(0, 1)] - mat[(1, 0)],
t,
);
}
q * 0.5 / t.sqrt()
}
/// Converts a quaternion into a matrix, depending on whether it's a left or
/// right quaternion multiplication.
fn quat_to_mat(q: Quaternion<f64>, left: bool) -> Matrix<f64> {
let size = Dynamic::new(4);
let left = if left { 1.0 } else { -1.0 };
Matrix::from_data(VecStorage::new(
size,
size,
vec![
q.w,
q.i,
q.j,
q.k,
-q.i,
q.w,
left * q.k,
-left * q.j,
-q.j,
-left * q.k,
q.w,
left * q.i,
-q.k,
left * q.j,
-left * q.i,
q.w,
],
))
}
/// Computes the [direct sum](https://en.wikipedia.org/wiki/Block_matrix#Direct_sum)
/// of two matrices.
fn direct_sum(mat1: Matrix<f64>, mat2: Matrix<f64>) -> Matrix<f64> {
let dim1 = mat1.nrows();
let dim = dim1 + mat2.nrows();
Matrix::from_fn(dim, dim, |i, j| {
if i < dim1 {
if j < dim1 {
mat1[(i, j)]
} else {
0.0
}
} else if j >= dim1 {
mat2[(i - dim1, j - dim1)]
} else {
0.0
}
})
}
/// An iterator such that `dyn` objects using it can be cloned. Used to get
/// around orphan rules.
trait GroupIter: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone {}
impl<T: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone> GroupIter for T {}
dyn_clone::clone_trait_object!(GroupIter);
/// A [group](https://en.wikipedia.org/wiki/Group_(mathematics)) of matrices,
/// acting on a space of a certain dimension.
#[derive(Clone)]
pub struct Group {
/// The dimension of the matrices of the group. Stored separately so that
/// the iterator doesn't have to be peekable.
dim: usize,
/// The underlying iterator, which actually outputs the matrices.
iter: Box<dyn GroupIter>,
}
impl Iterator for Group {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
impl Group {
/// Gets all of the elements of the group. Consumes the iterator.
pub fn elements(self) -> Vec<Matrix<f64>> {
self.collect()
}
/// Gets the number of elements of the group. Consumes the iterators.
pub fn order(self) -> usize {
self.count()
}
pub fn from_gens(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
Self {
dim,
iter: Box::new(GenIter::new(dim, gens)),
}
}
/// Buils the rotation subgroup of a group.
pub fn rotations(self) -> Self {
// The determinant might not be exactly 1, so we're extra lenient and
// just test for positive determinants.
Self {
dim: self.dim,
iter: Box::new(self.filter(|el| el.determinant() > 0.0)),
}
}
/// Builds an iterator over the set of either left or a right quaternions
/// from a 3D group. **These won't actually generate a group,** as they
/// don't contain central inversion.
fn quaternions(self, left: bool) -> Box<dyn GroupIter> {
if self.dim != 3 {
panic!("Quaternions can only be generated from 3D matrices.");
}
Box::new(
self.rotations()
.map(move |el| quat_to_mat(mat_to_quat(el), left)),
)
}
/// Returns the swirl symmetry group of two 3D groups.
pub fn swirl(g: Self, h: Self) -> Self {
if g.dim != 3 {
panic!("g must be a group of 3D matrices.");
}
if h.dim != 3 {
panic!("h must be a group of 3D matrices.");
}
Self {
dim: 4,
iter: Box::new(
itertools::iproduct!(g.quaternions(true), h.quaternions(false))
.map(|(mat1, mat2)| {
let mat = mat1 * mat2;
std::iter::once(mat.clone()).chain(std::iter::once(-mat))
})
.flatten(),
),
}
}
/// Returns a new group whose elements have all been generated already, so
/// that they can be used multiple times quickly.
pub fn cache(self) -> Self {
self.elements().into()
}
/// Returns the exact same group, but now asserts that each generated
/// element has the appropriate dimension. Used for debugging purposes.
pub fn debug(self) -> Self |
/// Generates the trivial group of a certain dimension.
pub fn trivial(dim: usize) -> Self {
Self {
dim,
iter: Box::new(std::iter::once(Matrix::identity(dim, dim))),
}
}
/// Generates the group with the identity and a central inversion of a
/// certain dimension.
pub fn central_inv(dim: usize) -> Self {
Self {
dim,
iter: Box::new(
vec![Matrix::identity(dim, dim), -Matrix::identity(dim, dim)].into_iter(),
),
}
}
/// Generates a step prism group from a base group and a homomorphism into
/// another group.
pub fn step(g: Self, f: impl Fn(Matrix<f64>) -> Matrix<f64> + Clone + 'static) -> Self {
let dim = g.dim * 2;
Self {
dim,
iter: Box::new(g.map(move |mat| {
let clone = mat.clone();
direct_sum(clone, f(mat))
})),
}
}
/// Generates a Coxeter group from its [`CoxMatrix`], or returns `None` if
/// the group doesn't fit as a matrix group in spherical space.
pub fn cox_group(cox: CoxMatrix) -> Option<Self> {
Some(Self {
dim: cox.nrows(),
iter: Box::new(GenIter::from_cox_mat(cox)?),
})
}
/// Generates the direct product of two groups. Uses the specified function
/// to uniquely map the ordered pairs of matrices into other matrices.
pub fn fn_product(
g: Self,
h: Self,
dim: usize,
product: (impl Fn((Matrix<f64>, Matrix<f64>)) -> Matrix<f64> + Clone + 'static),
) -> Self {
Self {
dim,
iter: Box::new(itertools::iproduct!(g, h).map(product)),
}
}
/// Returns the group determined by all products between elements of the
/// first and the second group. **Is meant only for groups that commute with
/// one another.**
pub fn matrix_product(g: Self, h: Self) -> Option<Self> {
// The two matrices must have the same size.
if g.dim != h.dim {
return None;
}
let dim = g.dim;
Some(Self::fn_product(g, h, dim, |(mat1, mat2)| mat1 * mat2))
}
/// Calculates the direct product of two groups. Pairs of matrices are then
/// mapped to their direct sum.
pub fn direct_product(g: Self, h: Self) -> Self {
let dim = g.dim + h.dim;
Self::fn_product(g, h, dim, |(mat1, mat2)| direct_sum(mat1, mat2))
}
/// Generates the [wreath product](https://en.wikipedia.org/wiki/Wreath_product)
/// of two symmetry groups.
pub fn wreath(g: Self, h: Self) -> Self {
let h = h.elements();
let h_len = h.len();
let g_dim = g.dim;
let dim = g_dim * h_len;
// Indexes each element in h.
let mut h_indices = BTreeMap::new();
for (i, h_el) in h.iter().enumerate() {
h_indices.insert(OrdMatrix::new(h_el.clone()), i);
}
// Converts h into a permutation group.
let mut permutations = Vec::with_capacity(h_len);
for h_el_1 in &h {
let mut perm = Vec::with_capacity(h.len());
for h_el_2 in &h {
perm.push(
*h_indices
.get(&OrdMatrix::new(h_el_1 * h_el_2))
.expect("h is not a valid group!"),
);
}
permutations.push(perm);
}
// Computes the direct product of g with itself |h| times.
let g_prod = vec![&g; h_len - 1]
.into_iter()
.cloned()
.fold(g.clone(), |acc, g| Group::direct_product(g, acc));
Self {
dim,
iter: Box::new(
g_prod
.map(move |g_el| {
let mut matrices = Vec::new();
for perm in &permutations {
let mut new_el = Matrix::zeros(dim, dim);
// Permutes the blocks on the diagonal of g_el.
for (i, &j) in perm.iter().enumerate() {
for x in 0..g_dim {
for y in 0..g_dim {
new_el[(i * g_dim + x, j * g_dim + y)] =
g_el[(i * g_dim + x, i * g_dim + y)];
}
}
}
matrices.push(new_el);
}
matrices.into_iter()
})
.flatten(),
),
}
}
/// Generates the orbit of a point under a given symmetry group.
pub fn orbit(self, p: Point) -> Vec<Point> {
let mut points = BTreeSet::new();
for m in self {
points.insert(OrdPoint::new(m * &p));
}
points.into_iter().map(|x| x.0).collect()
}
/// Generates a polytope as the convex hull of the orbit of a point under a
/// given symmetry group.
pub fn into_polytope(self, p: Point) -> Concrete {
convex::convex_hull(self.orbit(p))
}
}
impl From<Vec<Matrix<f64>>> for Group {
fn from(elements: Vec<Matrix<f64>>) -> Self {
Self {
dim: elements
.get(0)
.expect("Group must have at least one element.")
.nrows(),
iter: Box::new(elements.into_iter()),
}
}
}
/// The result of trying to get the next element in a group.
pub enum GroupNext {
/// We've already found all elements of the group.
None,
/// We found an element we had found previously.
Repeat,
/// We found a new element.
New(Matrix<f64>),
}
#[allow(clippy::upper_case_acronyms)]
type MatrixMN<R, C> = nalgebra::Matrix<f64, R, C, VecStorage<f64, R, C>>;
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
/// A matrix ordered by fuzzy lexicographic ordering. Used to quickly
/// determine whether an element in a [`GenIter`](GenIter) is a
/// duplicate.
pub struct OrdMatrixMN<R: Dim, C: Dim>(pub MatrixMN<R, C>)
where
VecStorage<f64, R, C>: Storage<f64, R, C>;
impl<R: Dim, C: Dim> std::ops::Deref for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
type Target = MatrixMN<R, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<R: Dim, C: Dim> std::ops::DerefMut for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<R: Dim, C: Dim> PartialEq for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn eq(&self, other: &Self) -> bool {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
}
impl<R: Dim, C: Dim> Eq for OrdMatrixMN<R, C> where VecStorage<f64, R, C>: Storage<f64, R, C> {}
impl<R: Dim, C: Dim> PartialOrd for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return x.partial_cmp(y);
}
}
Some(std::cmp::Ordering::Equal)
}
}
impl<R: Dim, C: Dim> Ord for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
impl<R: Dim, C: Dim> OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
pub fn new(mat: MatrixMN<R, C>) -> Self {
Self(mat)
}
}
type OrdMatrix = OrdMatrixMN<Dynamic, Dynamic>;
type OrdPoint = OrdMatrixMN<Dynamic, U1>;
/// An iterator for a `Group` [generated](https://en.wikipedia.org/wiki/Generator_(mathematics))
/// by a set of floating point matrices. Its elements are built in a BFS order.
/// It contains a lookup table, used to figure out whether an element has
/// already been found or not, as well as a queue to store the next elements.
#[derive(Clone)]
pub struct GenIter {
/// The number of dimensions the group acts on.
pub dim: usize,
/// The generators for the group.
pub gens: Vec<Matrix<f64>>,
/// Stores the elements that have been generated and that can still be
/// generated again. Is integral for the algorithm to work, as without it,
/// duplicate group elements will just keep generating forever.
elements: BTreeMap<OrdMatrix, usize>,
/// Stores the elements that haven't yet been processed.
queue: VecDeque<OrdMatrix>,
/// Stores the index in (`generators`)[GenGroup.generators] of the generator
/// that's being checked. All previous once will have already been
/// multiplied to the right of the current element. Quirk of the current
/// data structure, subject to change.
gen_idx: usize,
}
impl Iterator for GenIter {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.try_next() {
GroupNext::None => return None,
GroupNext::Repeat => {}
GroupNext::New(el) => return Some(el),
};
}
}
}
/// Determines whether two matrices are "approximately equal" elementwise.
fn matrix_approx(mat1: &Matrix<f64>, mat2: &Matrix<f64>) -> bool {
const EPS: f64 = 1e-4;
let mat1 = mat1.iter();
let mut mat2 = mat2.iter();
for x in mat1 {
let y = mat2.next().expect("Matrices don't have the same size!");
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
/// Builds a reflection matrix from a given vector.
pub fn refl_mat(n: Vector<f64>) -> Matrix<f64> {
let dim = n.nrows();
let nn = n.norm_squared();
// Reflects every basis vector, builds a matrix from all of their images.
Matrix::from_columns(
&Matrix::identity(dim, dim)
.column_iter()
.map(|v| v - (2.0 * v.dot(&n) / nn) * &n)
.collect::<Vec<_>>(),
)
}
impl GenIter {
/// Builds a new group from a set of generators.
fn new(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
// Initializes the queue with only the identity matrix.
let mut queue = VecDeque::new();
queue.push_back(OrdMatrix::new(Matrix::identity(dim, dim)));
// We say that the identity has been found zero times. This is a special
// case that ensures that neither the identity is queued nor found
// twice.
let mut elements = BTreeMap::new();
elements.insert(OrdMatrix::new(Matrix::identity(dim, dim)), 0);
Self {
dim,
gens,
elements,
queue,
gen_idx: 0,
}
}
/// Inserts a new element into the group. Returns whether the element is new.
fn insert(&mut self, el: Matrix<f64>) -> bool {
let el = OrdMatrix::new(el);
// If the element has been found before.
if let Some(value) = self.elements.insert(el.clone(), 1) {
// Bumps the value by 1, or removes the element if this is the last
// time we'll find the element.
if value != self.gens.len() - 1 {
self.elements.insert(el, value + 1);
} else {
self.elements.remove(&el);
}
// The element is a repeat, except in the special case of the
// identity.
value == 0
}
// If the element is new, we add it to the queue as well.
else {
self.queue.push_back(el);
true
}
}
/// Gets the next element and the next generator to attempt to multiply.
/// Advances the iterator.
fn next_el_gen(&mut self) -> Option<[Matrix<f64>; 2]> {
let el = self.queue.front()?.0.clone();
let gen = self.gens[self.gen_idx].clone();
// Advances the indices.
self.gen_idx += 1;
if self.gen_idx == self.gens.len() {
self.gen_idx = 0;
self.queue.pop_front();
}
Some([el, gen])
}
/// Multiplies the current element times the current generator, determines
/// if it is a new element. Advances the iterator.
fn try_next(&mut self) -> GroupNext {
// If there's a next element and generator.
if let Some([el, gen]) = self.next_el_gen() {
let new_el = el * gen;
// If the group element is new.
if self.insert(new_el.clone()) {
GroupNext::New(new_el)
}
// If we found a repeat.
else {
GroupNext::Repeat
}
}
// If we already went through the entire group.
else {
GroupNext::None
}
}
pub fn from_cox_mat(cox: CoxMatrix) -> Option<Self> {
const EPS: f64 = 1e-6;
let dim = cox.nrows();
let mut generators = Vec::with_capacity(dim);
// Builds each generator from the top down as a triangular matrix, so
// that the dot products match the values in the Coxeter matrix.
for i in 0..dim {
let mut gen_i = Vector::from_element(dim, 0.0);
for (j, gen_j) in generators.iter().enumerate() {
let dot = gen_i.dot(gen_j);
gen_i[j] = ((PI / cox[(i, j)] as f64).cos() - dot) / gen_j[j];
}
// The vector doesn't fit in spherical space.
let norm_sq = gen_i.norm_squared();
if norm_sq >= 1.0 - EPS {
return None;
} else {
gen_i[i] = (1.0 - norm_sq).sqrt();
}
generators.push(gen_i);
}
Some(Self::new(
dim,
generators.into_iter().map(refl_mat).collect(),
))
}
}
#[cfg(test)]
mod tests {
use gcd::Gcd;
use super::*;
/// Tests a given symmetry group.
fn test(group: Group, order: usize, rot_order: usize, name: &str) {
// Makes testing multiple derived groups faster.
let group = group.cache().debug();
// Tests the order of the group.
assert_eq!(
group.clone().order(),
order,
"{} does not have the expected order.",
name
);
// Tests the order of the rotational subgroup.
assert_eq!(
group.rotations().order(),
rot_order,
"The rotational group of {} does not have the expected order.",
name
);
}
/// Tests the trivial group in various dimensions.
#[test]
fn i() {
for n in 1..=10 {
test(Group::trivial(n), 1, 1, &format!("I^{}", n))
}
}
/// Tests the group consisting of the identity and a central inversion in
/// various dimensions.
#[test]
fn pm_i() {
for n in 1..=10 {
test(
Group::central_inv(n),
2,
(n + 1) % 2 + 1,
&format!("±I{}", n),
)
}
}
/// Tests the I2(*n*) symmetries, which correspond to the symmetries of a
/// regular *n*-gon.
#[test]
fn i2() {
for n in 2..=10 {
for d in 1..n {
if n.gcd(d) != 1 {
continue;
}
test(cox!(n as f64 / d as f64), 2 * n, n, &format!("I2({})", n));
}
}
}
/// Tests the A3⁺ @ (I2(*n*) × I) symmetries, the tetrahedron swirl
/// symmetries.
#[test]
fn a3_p_swirl_i2xi_p() {
for n in 2..10 {
let order = 24 * n;
test(
Group::swirl(
cox!(3.0, 3.0),
Group::direct_product(cox!(n), Group::trivial(1)),
),
order,
order,
&format!("A3⁺ @ (I2({}) × I)", n),
)
}
}
/// Tests the A*n* symmetries, which correspond to the symmetries of the
/// regular simplices.
#[test]
fn a() {
let mut order = 2;
for n in 2..=6 {
order *= n + 1;
test(cox!(3; n - 1), order, order / 2, &format!("A{}", n))
}
}
/// Tests the ±A*n* symmetries, which correspond to the symmetries of the
/// compound of two simplices.
#[test]
fn pm_an() {
let mut order = 4;
for n in 2..=6 {
order *= n + 1;
test(
Group::matrix_product(cox!(3; n - 1), Group::central_inv(n)).unwrap(),
order,
order / 2,
&format!("±A{}", n),
)
}
}
/// Tests the BC*n* symmetries, which correspond to the symmetries of the
/// regular hypercube and orthoplex.
#[test]
fn bc() {
let mut order = 2;
for n in 2..=6 {
// A better cox! macro would make this unnecessary.
let mut cox = vec![3.0; n - 1];
cox[0] = 4.0;
let cox = CoxMatrix::from_lin_diagram(cox);
order *= n * 2;
test(
Group::cox_group(cox).unwrap(),
order,
order / 2,
&format!("BC{}", n),
)
}
}
/// Tests the H*n* symmetries, which correspond to the symmetries of a
/// regular dodecahedron and a regular hecatonicosachoron.
#[test]
fn h() {
test(cox!(5, 3), 120, 60, &"H3");
test(cox!(5, 3, 3), 14400, 7200, &"H4");
}
/// Tests the E6 symmetry group.
#[test]
fn e6() {
// In the future, we'll have better code for this, I hope.
let e6 = Group::cox_group(CoxMatrix(Matrix::from_data(VecStorage::new(
Dynamic::new(6),
Dynamic::new(6),
vec![
1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0,
2.0, 3.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 2.0, 2.0, 2.0,
3.0, 2.0, 2.0, 1.0,
],
))))
.unwrap();
test(e6, 51840, 25920, &"E6");
}
#[test]
/// Tests the direct product of A3 with itself.
fn a3xa3() {
let a3 = cox!(3, 3);
let g = Group::direct_product(a3.clone(), a3.clone());
test(g, 576, 288, &"A3×A3");
}
#[test]
/// Tests the wreath product of A3 with A1.
fn a3_wr_a1() {
test(Group::wreath(cox!(3, 3), cox!()), 1152, 576, &"A3 ≀ A1");
}
#[test]
/// Tests out some step prisms.
fn step() {
for n in 1..10 {
for d in 1..n {
test(
Group::step(cox!(n).rotations(), move |mat| mat.pow(d).unwrap()),
n,
n,
"Step prismatic n-d",
);
}
}
}
}
| {
let dim = self.dim;
Self {
dim,
iter: Box::new(self.map(move |x| {
let msg = "Size of matrix does not match expected dimension.";
assert_eq!(x.nrows(), dim, "{}", msg);
assert_eq!(x.ncols(), dim, "{}", msg);
x
})),
}
} | identifier_body |
group.rs | //! Contains methods to generate many symmetry groups.
// Circumvents rust-analyzer bug.
#[allow(unused_imports)]
use crate::{cox, EPS};
use super::{convex, cox::CoxMatrix, geometry::Point, Concrete};
use approx::{abs_diff_ne, relative_eq};
use nalgebra::{
storage::Storage, DMatrix as Matrix, DVector as Vector, Dim, Dynamic, Quaternion, VecStorage,
U1,
};
use std::{
collections::{BTreeMap, BTreeSet, VecDeque},
f64::consts::PI,
};
/// Converts a 3D rotation matrix into a quaternion. Uses the code from
/// [Day (2015)](https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf).
fn mat_to_quat(mat: Matrix<f64>) -> Quaternion<f64> {
debug_assert!(
relative_eq!(mat.determinant(), 1.0, epsilon = EPS),
"Only matrices with determinant 1 can be turned into quaternions."
);
let t;
let q;
if mat[(2, 2)] < 0.0 {
if mat[(0, 0)] > mat[(1, 1)] {
t = 1.0 + mat[(0, 0)] - mat[(1, 1)] - mat[(2, 2)];
q = Quaternion::new(
t,
mat[(0, 1)] + mat[(1, 0)],
mat[(2, 0)] + mat[(0, 2)],
mat[(1, 2)] - mat[(2, 1)],
);
} else {
t = 1.0 - mat[(0, 0)] + mat[(1, 1)] - mat[(2, 2)];
q = Quaternion::new(
mat[(0, 1)] + mat[(1, 0)],
t,
mat[(1, 2)] + mat[(2, 1)],
mat[(2, 0)] - mat[(0, 2)],
);
}
} else if mat[(0, 0)] < -mat[(1, 1)] {
t = 1.0 - mat[(0, 0)] - mat[(1, 1)] + mat[(2, 2)];
q = Quaternion::new(
mat[(2, 0)] + mat[(0, 2)],
mat[(1, 2)] + mat[(2, 1)],
t,
mat[(0, 1)] - mat[(1, 0)],
);
} else {
t = 1.0 + mat[(0, 0)] + mat[(1, 1)] + mat[(2, 2)];
q = Quaternion::new(
mat[(1, 2)] - mat[(2, 1)],
mat[(2, 0)] - mat[(0, 2)],
mat[(0, 1)] - mat[(1, 0)],
t,
);
}
q * 0.5 / t.sqrt()
}
/// Converts a quaternion into a matrix, depending on whether it's a left or
/// right quaternion multiplication.
fn quat_to_mat(q: Quaternion<f64>, left: bool) -> Matrix<f64> {
let size = Dynamic::new(4);
let left = if left { 1.0 } else { -1.0 };
Matrix::from_data(VecStorage::new(
size,
size,
vec![
q.w,
q.i,
q.j,
q.k,
-q.i,
q.w,
left * q.k,
-left * q.j,
-q.j,
-left * q.k,
q.w,
left * q.i,
-q.k,
left * q.j,
-left * q.i,
q.w,
],
))
}
/// Computes the [direct sum](https://en.wikipedia.org/wiki/Block_matrix#Direct_sum)
/// of two matrices.
fn direct_sum(mat1: Matrix<f64>, mat2: Matrix<f64>) -> Matrix<f64> {
let dim1 = mat1.nrows();
let dim = dim1 + mat2.nrows();
Matrix::from_fn(dim, dim, |i, j| {
if i < dim1 {
if j < dim1 {
mat1[(i, j)]
} else {
0.0
}
} else if j >= dim1 {
mat2[(i - dim1, j - dim1)]
} else {
0.0
}
})
}
/// An iterator such that `dyn` objects using it can be cloned. Used to get
/// around orphan rules.
trait GroupIter: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone {}
impl<T: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone> GroupIter for T {}
dyn_clone::clone_trait_object!(GroupIter);
/// A [group](https://en.wikipedia.org/wiki/Group_(mathematics)) of matrices,
/// acting on a space of a certain dimension.
#[derive(Clone)]
pub struct Group {
/// The dimension of the matrices of the group. Stored separately so that
/// the iterator doesn't have to be peekable.
dim: usize,
/// The underlying iterator, which actually outputs the matrices.
iter: Box<dyn GroupIter>,
}
impl Iterator for Group {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
impl Group {
/// Gets all of the elements of the group. Consumes the iterator.
pub fn elements(self) -> Vec<Matrix<f64>> {
self.collect()
}
/// Gets the number of elements of the group. Consumes the iterators.
pub fn order(self) -> usize {
self.count()
}
pub fn from_gens(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
Self {
dim,
iter: Box::new(GenIter::new(dim, gens)),
}
}
/// Buils the rotation subgroup of a group.
pub fn rotations(self) -> Self {
// The determinant might not be exactly 1, so we're extra lenient and
// just test for positive determinants.
Self {
dim: self.dim,
iter: Box::new(self.filter(|el| el.determinant() > 0.0)),
}
}
/// Builds an iterator over the set of either left or a right quaternions
/// from a 3D group. **These won't actually generate a group,** as they
/// don't contain central inversion.
fn quaternions(self, left: bool) -> Box<dyn GroupIter> {
if self.dim != 3 {
panic!("Quaternions can only be generated from 3D matrices.");
}
Box::new(
self.rotations()
.map(move |el| quat_to_mat(mat_to_quat(el), left)),
)
}
/// Returns the swirl symmetry group of two 3D groups.
pub fn swirl(g: Self, h: Self) -> Self {
if g.dim != 3 {
panic!("g must be a group of 3D matrices.");
}
if h.dim != 3 {
panic!("h must be a group of 3D matrices.");
}
Self {
dim: 4,
iter: Box::new(
itertools::iproduct!(g.quaternions(true), h.quaternions(false))
.map(|(mat1, mat2)| {
let mat = mat1 * mat2;
std::iter::once(mat.clone()).chain(std::iter::once(-mat))
})
.flatten(),
),
}
}
/// Returns a new group whose elements have all been generated already, so
/// that they can be used multiple times quickly.
pub fn cache(self) -> Self {
self.elements().into()
}
/// Returns the exact same group, but now asserts that each generated
/// element has the appropriate dimension. Used for debugging purposes.
pub fn debug(self) -> Self {
let dim = self.dim;
Self {
dim,
iter: Box::new(self.map(move |x| {
let msg = "Size of matrix does not match expected dimension.";
assert_eq!(x.nrows(), dim, "{}", msg);
assert_eq!(x.ncols(), dim, "{}", msg);
x
})),
}
}
/// Generates the trivial group of a certain dimension.
pub fn trivial(dim: usize) -> Self {
Self {
dim,
iter: Box::new(std::iter::once(Matrix::identity(dim, dim))),
}
}
/// Generates the group with the identity and a central inversion of a
/// certain dimension.
pub fn central_inv(dim: usize) -> Self {
Self {
dim,
iter: Box::new(
vec![Matrix::identity(dim, dim), -Matrix::identity(dim, dim)].into_iter(),
),
}
}
/// Generates a step prism group from a base group and a homomorphism into
/// another group.
pub fn step(g: Self, f: impl Fn(Matrix<f64>) -> Matrix<f64> + Clone + 'static) -> Self {
let dim = g.dim * 2;
Self {
dim,
iter: Box::new(g.map(move |mat| {
let clone = mat.clone();
direct_sum(clone, f(mat))
})),
}
}
/// Generates a Coxeter group from its [`CoxMatrix`], or returns `None` if
/// the group doesn't fit as a matrix group in spherical space.
pub fn cox_group(cox: CoxMatrix) -> Option<Self> {
Some(Self {
dim: cox.nrows(),
iter: Box::new(GenIter::from_cox_mat(cox)?),
})
}
/// Generates the direct product of two groups. Uses the specified function
/// to uniquely map the ordered pairs of matrices into other matrices.
pub fn fn_product(
g: Self,
h: Self,
dim: usize,
product: (impl Fn((Matrix<f64>, Matrix<f64>)) -> Matrix<f64> + Clone + 'static),
) -> Self {
Self {
dim,
iter: Box::new(itertools::iproduct!(g, h).map(product)),
}
}
/// Returns the group determined by all products between elements of the
/// first and the second group. **Is meant only for groups that commute with
/// one another.**
pub fn matrix_product(g: Self, h: Self) -> Option<Self> {
// The two matrices must have the same size.
if g.dim != h.dim {
return None;
}
let dim = g.dim;
Some(Self::fn_product(g, h, dim, |(mat1, mat2)| mat1 * mat2))
}
/// Calculates the direct product of two groups. Pairs of matrices are then
/// mapped to their direct sum.
pub fn direct_product(g: Self, h: Self) -> Self {
let dim = g.dim + h.dim;
Self::fn_product(g, h, dim, |(mat1, mat2)| direct_sum(mat1, mat2))
}
/// Generates the [wreath product](https://en.wikipedia.org/wiki/Wreath_product)
/// of two symmetry groups.
pub fn wreath(g: Self, h: Self) -> Self {
let h = h.elements();
let h_len = h.len();
let g_dim = g.dim;
let dim = g_dim * h_len;
// Indexes each element in h.
let mut h_indices = BTreeMap::new();
for (i, h_el) in h.iter().enumerate() {
h_indices.insert(OrdMatrix::new(h_el.clone()), i);
}
// Converts h into a permutation group.
let mut permutations = Vec::with_capacity(h_len);
for h_el_1 in &h {
let mut perm = Vec::with_capacity(h.len());
for h_el_2 in &h {
perm.push(
*h_indices
.get(&OrdMatrix::new(h_el_1 * h_el_2))
.expect("h is not a valid group!"),
);
}
permutations.push(perm);
}
// Computes the direct product of g with itself |h| times.
let g_prod = vec![&g; h_len - 1]
.into_iter()
.cloned()
.fold(g.clone(), |acc, g| Group::direct_product(g, acc));
Self {
dim,
iter: Box::new(
g_prod
.map(move |g_el| {
let mut matrices = Vec::new();
for perm in &permutations {
let mut new_el = Matrix::zeros(dim, dim);
// Permutes the blocks on the diagonal of g_el.
for (i, &j) in perm.iter().enumerate() {
for x in 0..g_dim {
for y in 0..g_dim {
new_el[(i * g_dim + x, j * g_dim + y)] =
g_el[(i * g_dim + x, i * g_dim + y)];
}
}
}
matrices.push(new_el);
}
matrices.into_iter()
})
.flatten(),
),
}
}
/// Generates the orbit of a point under a given symmetry group.
pub fn orbit(self, p: Point) -> Vec<Point> {
let mut points = BTreeSet::new();
for m in self {
points.insert(OrdPoint::new(m * &p));
}
points.into_iter().map(|x| x.0).collect()
}
/// Generates a polytope as the convex hull of the orbit of a point under a
/// given symmetry group.
pub fn into_polytope(self, p: Point) -> Concrete {
convex::convex_hull(self.orbit(p))
}
}
impl From<Vec<Matrix<f64>>> for Group {
fn from(elements: Vec<Matrix<f64>>) -> Self {
Self {
dim: elements
.get(0)
.expect("Group must have at least one element.")
.nrows(),
iter: Box::new(elements.into_iter()),
}
}
}
/// The result of trying to get the next element in a group.
pub enum GroupNext {
/// We've already found all elements of the group.
None,
/// We found an element we had found previously.
Repeat,
/// We found a new element.
New(Matrix<f64>),
}
#[allow(clippy::upper_case_acronyms)]
type MatrixMN<R, C> = nalgebra::Matrix<f64, R, C, VecStorage<f64, R, C>>;
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
/// A matrix ordered by fuzzy lexicographic ordering. Used to quickly
/// determine whether an element in a [`GenIter`](GenIter) is a
/// duplicate.
pub struct OrdMatrixMN<R: Dim, C: Dim>(pub MatrixMN<R, C>)
where
VecStorage<f64, R, C>: Storage<f64, R, C>;
impl<R: Dim, C: Dim> std::ops::Deref for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
type Target = MatrixMN<R, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<R: Dim, C: Dim> std::ops::DerefMut for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<R: Dim, C: Dim> PartialEq for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn eq(&self, other: &Self) -> bool {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
}
impl<R: Dim, C: Dim> Eq for OrdMatrixMN<R, C> where VecStorage<f64, R, C>: Storage<f64, R, C> {}
impl<R: Dim, C: Dim> PartialOrd for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return x.partial_cmp(y);
}
}
Some(std::cmp::Ordering::Equal)
}
}
impl<R: Dim, C: Dim> Ord for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
impl<R: Dim, C: Dim> OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
pub fn new(mat: MatrixMN<R, C>) -> Self {
Self(mat)
}
}
type OrdMatrix = OrdMatrixMN<Dynamic, Dynamic>;
type OrdPoint = OrdMatrixMN<Dynamic, U1>;
/// An iterator for a `Group` [generated](https://en.wikipedia.org/wiki/Generator_(mathematics))
/// by a set of floating point matrices. Its elements are built in a BFS order.
/// It contains a lookup table, used to figure out whether an element has
/// already been found or not, as well as a queue to store the next elements.
#[derive(Clone)]
pub struct GenIter {
/// The number of dimensions the group acts on.
pub dim: usize,
/// The generators for the group.
pub gens: Vec<Matrix<f64>>,
/// Stores the elements that have been generated and that can still be
/// generated again. Is integral for the algorithm to work, as without it,
/// duplicate group elements will just keep generating forever.
elements: BTreeMap<OrdMatrix, usize>,
/// Stores the elements that haven't yet been processed.
queue: VecDeque<OrdMatrix>,
/// Stores the index in (`generators`)[GenGroup.generators] of the generator
/// that's being checked. All previous once will have already been
/// multiplied to the right of the current element. Quirk of the current
/// data structure, subject to change.
gen_idx: usize,
}
impl Iterator for GenIter {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.try_next() {
GroupNext::None => return None,
GroupNext::Repeat => {}
GroupNext::New(el) => return Some(el),
};
}
}
}
/// Determines whether two matrices are "approximately equal" elementwise.
fn matrix_approx(mat1: &Matrix<f64>, mat2: &Matrix<f64>) -> bool {
const EPS: f64 = 1e-4;
let mat1 = mat1.iter();
let mut mat2 = mat2.iter();
for x in mat1 {
let y = mat2.next().expect("Matrices don't have the same size!");
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
/// Builds a reflection matrix from a given vector.
pub fn refl_mat(n: Vector<f64>) -> Matrix<f64> {
let dim = n.nrows(); | let nn = n.norm_squared();
// Reflects every basis vector, builds a matrix from all of their images.
Matrix::from_columns(
&Matrix::identity(dim, dim)
.column_iter()
.map(|v| v - (2.0 * v.dot(&n) / nn) * &n)
.collect::<Vec<_>>(),
)
}
impl GenIter {
/// Builds a new group from a set of generators.
fn new(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
// Initializes the queue with only the identity matrix.
let mut queue = VecDeque::new();
queue.push_back(OrdMatrix::new(Matrix::identity(dim, dim)));
// We say that the identity has been found zero times. This is a special
// case that ensures that neither the identity is queued nor found
// twice.
let mut elements = BTreeMap::new();
elements.insert(OrdMatrix::new(Matrix::identity(dim, dim)), 0);
Self {
dim,
gens,
elements,
queue,
gen_idx: 0,
}
}
/// Inserts a new element into the group. Returns whether the element is new.
fn insert(&mut self, el: Matrix<f64>) -> bool {
let el = OrdMatrix::new(el);
// If the element has been found before.
if let Some(value) = self.elements.insert(el.clone(), 1) {
// Bumps the value by 1, or removes the element if this is the last
// time we'll find the element.
if value != self.gens.len() - 1 {
self.elements.insert(el, value + 1);
} else {
self.elements.remove(&el);
}
// The element is a repeat, except in the special case of the
// identity.
value == 0
}
// If the element is new, we add it to the queue as well.
else {
self.queue.push_back(el);
true
}
}
/// Gets the next element and the next generator to attempt to multiply.
/// Advances the iterator.
fn next_el_gen(&mut self) -> Option<[Matrix<f64>; 2]> {
let el = self.queue.front()?.0.clone();
let gen = self.gens[self.gen_idx].clone();
// Advances the indices.
self.gen_idx += 1;
if self.gen_idx == self.gens.len() {
self.gen_idx = 0;
self.queue.pop_front();
}
Some([el, gen])
}
/// Multiplies the current element times the current generator, determines
/// if it is a new element. Advances the iterator.
fn try_next(&mut self) -> GroupNext {
// If there's a next element and generator.
if let Some([el, gen]) = self.next_el_gen() {
let new_el = el * gen;
// If the group element is new.
if self.insert(new_el.clone()) {
GroupNext::New(new_el)
}
// If we found a repeat.
else {
GroupNext::Repeat
}
}
// If we already went through the entire group.
else {
GroupNext::None
}
}
pub fn from_cox_mat(cox: CoxMatrix) -> Option<Self> {
const EPS: f64 = 1e-6;
let dim = cox.nrows();
let mut generators = Vec::with_capacity(dim);
// Builds each generator from the top down as a triangular matrix, so
// that the dot products match the values in the Coxeter matrix.
for i in 0..dim {
let mut gen_i = Vector::from_element(dim, 0.0);
for (j, gen_j) in generators.iter().enumerate() {
let dot = gen_i.dot(gen_j);
gen_i[j] = ((PI / cox[(i, j)] as f64).cos() - dot) / gen_j[j];
}
// The vector doesn't fit in spherical space.
let norm_sq = gen_i.norm_squared();
if norm_sq >= 1.0 - EPS {
return None;
} else {
gen_i[i] = (1.0 - norm_sq).sqrt();
}
generators.push(gen_i);
}
Some(Self::new(
dim,
generators.into_iter().map(refl_mat).collect(),
))
}
}
#[cfg(test)]
mod tests {
use gcd::Gcd;
use super::*;
/// Tests a given symmetry group.
fn test(group: Group, order: usize, rot_order: usize, name: &str) {
// Makes testing multiple derived groups faster.
let group = group.cache().debug();
// Tests the order of the group.
assert_eq!(
group.clone().order(),
order,
"{} does not have the expected order.",
name
);
// Tests the order of the rotational subgroup.
assert_eq!(
group.rotations().order(),
rot_order,
"The rotational group of {} does not have the expected order.",
name
);
}
/// Tests the trivial group in various dimensions.
#[test]
fn i() {
for n in 1..=10 {
test(Group::trivial(n), 1, 1, &format!("I^{}", n))
}
}
/// Tests the group consisting of the identity and a central inversion in
/// various dimensions.
#[test]
fn pm_i() {
for n in 1..=10 {
test(
Group::central_inv(n),
2,
(n + 1) % 2 + 1,
&format!("±I{}", n),
)
}
}
/// Tests the I2(*n*) symmetries, which correspond to the symmetries of a
/// regular *n*-gon.
#[test]
fn i2() {
for n in 2..=10 {
for d in 1..n {
if n.gcd(d) != 1 {
continue;
}
test(cox!(n as f64 / d as f64), 2 * n, n, &format!("I2({})", n));
}
}
}
/// Tests the A3⁺ @ (I2(*n*) × I) symmetries, the tetrahedron swirl
/// symmetries.
#[test]
fn a3_p_swirl_i2xi_p() {
for n in 2..10 {
let order = 24 * n;
test(
Group::swirl(
cox!(3.0, 3.0),
Group::direct_product(cox!(n), Group::trivial(1)),
),
order,
order,
&format!("A3⁺ @ (I2({}) × I)", n),
)
}
}
/// Tests the A*n* symmetries, which correspond to the symmetries of the
/// regular simplices.
#[test]
fn a() {
let mut order = 2;
for n in 2..=6 {
order *= n + 1;
test(cox!(3; n - 1), order, order / 2, &format!("A{}", n))
}
}
/// Tests the ±A*n* symmetries, which correspond to the symmetries of the
/// compound of two simplices.
#[test]
fn pm_an() {
let mut order = 4;
for n in 2..=6 {
order *= n + 1;
test(
Group::matrix_product(cox!(3; n - 1), Group::central_inv(n)).unwrap(),
order,
order / 2,
&format!("±A{}", n),
)
}
}
/// Tests the BC*n* symmetries, which correspond to the symmetries of the
/// regular hypercube and orthoplex.
#[test]
fn bc() {
let mut order = 2;
for n in 2..=6 {
// A better cox! macro would make this unnecessary.
let mut cox = vec![3.0; n - 1];
cox[0] = 4.0;
let cox = CoxMatrix::from_lin_diagram(cox);
order *= n * 2;
test(
Group::cox_group(cox).unwrap(),
order,
order / 2,
&format!("BC{}", n),
)
}
}
/// Tests the H*n* symmetries, which correspond to the symmetries of a
/// regular dodecahedron and a regular hecatonicosachoron.
#[test]
fn h() {
test(cox!(5, 3), 120, 60, &"H3");
test(cox!(5, 3, 3), 14400, 7200, &"H4");
}
/// Tests the E6 symmetry group.
#[test]
fn e6() {
// In the future, we'll have better code for this, I hope.
let e6 = Group::cox_group(CoxMatrix(Matrix::from_data(VecStorage::new(
Dynamic::new(6),
Dynamic::new(6),
vec![
1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0,
2.0, 3.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 2.0, 2.0, 2.0,
3.0, 2.0, 2.0, 1.0,
],
))))
.unwrap();
test(e6, 51840, 25920, &"E6");
}
#[test]
/// Tests the direct product of A3 with itself.
fn a3xa3() {
let a3 = cox!(3, 3);
let g = Group::direct_product(a3.clone(), a3.clone());
test(g, 576, 288, &"A3×A3");
}
#[test]
/// Tests the wreath product of A3 with A1.
fn a3_wr_a1() {
test(Group::wreath(cox!(3, 3), cox!()), 1152, 576, &"A3 ≀ A1");
}
#[test]
/// Tests out some step prisms.
fn step() {
for n in 1..10 {
for d in 1..n {
test(
Group::step(cox!(n).rotations(), move |mat| mat.pow(d).unwrap()),
n,
n,
"Step prismatic n-d",
);
}
}
}
} | random_line_split | |
group.rs | //! Contains methods to generate many symmetry groups.
// Circumvents rust-analyzer bug.
#[allow(unused_imports)]
use crate::{cox, EPS};
use super::{convex, cox::CoxMatrix, geometry::Point, Concrete};
use approx::{abs_diff_ne, relative_eq};
use nalgebra::{
storage::Storage, DMatrix as Matrix, DVector as Vector, Dim, Dynamic, Quaternion, VecStorage,
U1,
};
use std::{
collections::{BTreeMap, BTreeSet, VecDeque},
f64::consts::PI,
};
/// Converts a 3D rotation matrix into a quaternion. Uses the code from
/// [Day (2015)](https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf).
fn mat_to_quat(mat: Matrix<f64>) -> Quaternion<f64> {
debug_assert!(
relative_eq!(mat.determinant(), 1.0, epsilon = EPS),
"Only matrices with determinant 1 can be turned into quaternions."
);
let t;
let q;
if mat[(2, 2)] < 0.0 {
if mat[(0, 0)] > mat[(1, 1)] {
t = 1.0 + mat[(0, 0)] - mat[(1, 1)] - mat[(2, 2)];
q = Quaternion::new(
t,
mat[(0, 1)] + mat[(1, 0)],
mat[(2, 0)] + mat[(0, 2)],
mat[(1, 2)] - mat[(2, 1)],
);
} else {
t = 1.0 - mat[(0, 0)] + mat[(1, 1)] - mat[(2, 2)];
q = Quaternion::new(
mat[(0, 1)] + mat[(1, 0)],
t,
mat[(1, 2)] + mat[(2, 1)],
mat[(2, 0)] - mat[(0, 2)],
);
}
} else if mat[(0, 0)] < -mat[(1, 1)] {
t = 1.0 - mat[(0, 0)] - mat[(1, 1)] + mat[(2, 2)];
q = Quaternion::new(
mat[(2, 0)] + mat[(0, 2)],
mat[(1, 2)] + mat[(2, 1)],
t,
mat[(0, 1)] - mat[(1, 0)],
);
} else {
t = 1.0 + mat[(0, 0)] + mat[(1, 1)] + mat[(2, 2)];
q = Quaternion::new(
mat[(1, 2)] - mat[(2, 1)],
mat[(2, 0)] - mat[(0, 2)],
mat[(0, 1)] - mat[(1, 0)],
t,
);
}
q * 0.5 / t.sqrt()
}
/// Converts a quaternion into a matrix, depending on whether it's a left or
/// right quaternion multiplication.
fn quat_to_mat(q: Quaternion<f64>, left: bool) -> Matrix<f64> {
let size = Dynamic::new(4);
let left = if left { 1.0 } else { -1.0 };
Matrix::from_data(VecStorage::new(
size,
size,
vec![
q.w,
q.i,
q.j,
q.k,
-q.i,
q.w,
left * q.k,
-left * q.j,
-q.j,
-left * q.k,
q.w,
left * q.i,
-q.k,
left * q.j,
-left * q.i,
q.w,
],
))
}
/// Computes the [direct sum](https://en.wikipedia.org/wiki/Block_matrix#Direct_sum)
/// of two matrices.
fn direct_sum(mat1: Matrix<f64>, mat2: Matrix<f64>) -> Matrix<f64> {
let dim1 = mat1.nrows();
let dim = dim1 + mat2.nrows();
Matrix::from_fn(dim, dim, |i, j| {
if i < dim1 {
if j < dim1 {
mat1[(i, j)]
} else {
0.0
}
} else if j >= dim1 {
mat2[(i - dim1, j - dim1)]
} else {
0.0
}
})
}
/// An iterator such that `dyn` objects using it can be cloned. Used to get
/// around orphan rules.
trait GroupIter: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone {}
impl<T: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone> GroupIter for T {}
dyn_clone::clone_trait_object!(GroupIter);
/// A [group](https://en.wikipedia.org/wiki/Group_(mathematics)) of matrices,
/// acting on a space of a certain dimension.
#[derive(Clone)]
pub struct Group {
/// The dimension of the matrices of the group. Stored separately so that
/// the iterator doesn't have to be peekable.
dim: usize,
/// The underlying iterator, which actually outputs the matrices.
iter: Box<dyn GroupIter>,
}
impl Iterator for Group {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
impl Group {
/// Gets all of the elements of the group. Consumes the iterator.
pub fn elements(self) -> Vec<Matrix<f64>> {
self.collect()
}
/// Gets the number of elements of the group. Consumes the iterators.
pub fn order(self) -> usize {
self.count()
}
pub fn from_gens(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
Self {
dim,
iter: Box::new(GenIter::new(dim, gens)),
}
}
/// Buils the rotation subgroup of a group.
pub fn rotations(self) -> Self {
// The determinant might not be exactly 1, so we're extra lenient and
// just test for positive determinants.
Self {
dim: self.dim,
iter: Box::new(self.filter(|el| el.determinant() > 0.0)),
}
}
/// Builds an iterator over the set of either left or a right quaternions
/// from a 3D group. **These won't actually generate a group,** as they
/// don't contain central inversion.
fn | (self, left: bool) -> Box<dyn GroupIter> {
if self.dim != 3 {
panic!("Quaternions can only be generated from 3D matrices.");
}
Box::new(
self.rotations()
.map(move |el| quat_to_mat(mat_to_quat(el), left)),
)
}
/// Returns the swirl symmetry group of two 3D groups.
pub fn swirl(g: Self, h: Self) -> Self {
if g.dim != 3 {
panic!("g must be a group of 3D matrices.");
}
if h.dim != 3 {
panic!("h must be a group of 3D matrices.");
}
Self {
dim: 4,
iter: Box::new(
itertools::iproduct!(g.quaternions(true), h.quaternions(false))
.map(|(mat1, mat2)| {
let mat = mat1 * mat2;
std::iter::once(mat.clone()).chain(std::iter::once(-mat))
})
.flatten(),
),
}
}
/// Returns a new group whose elements have all been generated already, so
/// that they can be used multiple times quickly.
pub fn cache(self) -> Self {
self.elements().into()
}
/// Returns the exact same group, but now asserts that each generated
/// element has the appropriate dimension. Used for debugging purposes.
pub fn debug(self) -> Self {
let dim = self.dim;
Self {
dim,
iter: Box::new(self.map(move |x| {
let msg = "Size of matrix does not match expected dimension.";
assert_eq!(x.nrows(), dim, "{}", msg);
assert_eq!(x.ncols(), dim, "{}", msg);
x
})),
}
}
/// Generates the trivial group of a certain dimension.
pub fn trivial(dim: usize) -> Self {
Self {
dim,
iter: Box::new(std::iter::once(Matrix::identity(dim, dim))),
}
}
/// Generates the group with the identity and a central inversion of a
/// certain dimension.
pub fn central_inv(dim: usize) -> Self {
Self {
dim,
iter: Box::new(
vec![Matrix::identity(dim, dim), -Matrix::identity(dim, dim)].into_iter(),
),
}
}
/// Generates a step prism group from a base group and a homomorphism into
/// another group.
pub fn step(g: Self, f: impl Fn(Matrix<f64>) -> Matrix<f64> + Clone + 'static) -> Self {
let dim = g.dim * 2;
Self {
dim,
iter: Box::new(g.map(move |mat| {
let clone = mat.clone();
direct_sum(clone, f(mat))
})),
}
}
/// Generates a Coxeter group from its [`CoxMatrix`], or returns `None` if
/// the group doesn't fit as a matrix group in spherical space.
pub fn cox_group(cox: CoxMatrix) -> Option<Self> {
Some(Self {
dim: cox.nrows(),
iter: Box::new(GenIter::from_cox_mat(cox)?),
})
}
/// Generates the direct product of two groups. Uses the specified function
/// to uniquely map the ordered pairs of matrices into other matrices.
pub fn fn_product(
g: Self,
h: Self,
dim: usize,
product: (impl Fn((Matrix<f64>, Matrix<f64>)) -> Matrix<f64> + Clone + 'static),
) -> Self {
Self {
dim,
iter: Box::new(itertools::iproduct!(g, h).map(product)),
}
}
/// Returns the group determined by all products between elements of the
/// first and the second group. **Is meant only for groups that commute with
/// one another.**
pub fn matrix_product(g: Self, h: Self) -> Option<Self> {
// The two matrices must have the same size.
if g.dim != h.dim {
return None;
}
let dim = g.dim;
Some(Self::fn_product(g, h, dim, |(mat1, mat2)| mat1 * mat2))
}
/// Calculates the direct product of two groups. Pairs of matrices are then
/// mapped to their direct sum.
pub fn direct_product(g: Self, h: Self) -> Self {
let dim = g.dim + h.dim;
Self::fn_product(g, h, dim, |(mat1, mat2)| direct_sum(mat1, mat2))
}
/// Generates the [wreath product](https://en.wikipedia.org/wiki/Wreath_product)
/// of two symmetry groups.
pub fn wreath(g: Self, h: Self) -> Self {
let h = h.elements();
let h_len = h.len();
let g_dim = g.dim;
let dim = g_dim * h_len;
// Indexes each element in h.
let mut h_indices = BTreeMap::new();
for (i, h_el) in h.iter().enumerate() {
h_indices.insert(OrdMatrix::new(h_el.clone()), i);
}
// Converts h into a permutation group.
let mut permutations = Vec::with_capacity(h_len);
for h_el_1 in &h {
let mut perm = Vec::with_capacity(h.len());
for h_el_2 in &h {
perm.push(
*h_indices
.get(&OrdMatrix::new(h_el_1 * h_el_2))
.expect("h is not a valid group!"),
);
}
permutations.push(perm);
}
// Computes the direct product of g with itself |h| times.
let g_prod = vec![&g; h_len - 1]
.into_iter()
.cloned()
.fold(g.clone(), |acc, g| Group::direct_product(g, acc));
Self {
dim,
iter: Box::new(
g_prod
.map(move |g_el| {
let mut matrices = Vec::new();
for perm in &permutations {
let mut new_el = Matrix::zeros(dim, dim);
// Permutes the blocks on the diagonal of g_el.
for (i, &j) in perm.iter().enumerate() {
for x in 0..g_dim {
for y in 0..g_dim {
new_el[(i * g_dim + x, j * g_dim + y)] =
g_el[(i * g_dim + x, i * g_dim + y)];
}
}
}
matrices.push(new_el);
}
matrices.into_iter()
})
.flatten(),
),
}
}
/// Generates the orbit of a point under a given symmetry group.
pub fn orbit(self, p: Point) -> Vec<Point> {
let mut points = BTreeSet::new();
for m in self {
points.insert(OrdPoint::new(m * &p));
}
points.into_iter().map(|x| x.0).collect()
}
/// Generates a polytope as the convex hull of the orbit of a point under a
/// given symmetry group.
pub fn into_polytope(self, p: Point) -> Concrete {
convex::convex_hull(self.orbit(p))
}
}
impl From<Vec<Matrix<f64>>> for Group {
fn from(elements: Vec<Matrix<f64>>) -> Self {
Self {
dim: elements
.get(0)
.expect("Group must have at least one element.")
.nrows(),
iter: Box::new(elements.into_iter()),
}
}
}
/// The result of trying to get the next element in a group.
pub enum GroupNext {
/// We've already found all elements of the group.
None,
/// We found an element we had found previously.
Repeat,
/// We found a new element.
New(Matrix<f64>),
}
#[allow(clippy::upper_case_acronyms)]
type MatrixMN<R, C> = nalgebra::Matrix<f64, R, C, VecStorage<f64, R, C>>;
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
/// A matrix ordered by fuzzy lexicographic ordering. Used to quickly
/// determine whether an element in a [`GenIter`](GenIter) is a
/// duplicate.
pub struct OrdMatrixMN<R: Dim, C: Dim>(pub MatrixMN<R, C>)
where
VecStorage<f64, R, C>: Storage<f64, R, C>;
impl<R: Dim, C: Dim> std::ops::Deref for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
type Target = MatrixMN<R, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<R: Dim, C: Dim> std::ops::DerefMut for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<R: Dim, C: Dim> PartialEq for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn eq(&self, other: &Self) -> bool {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
}
impl<R: Dim, C: Dim> Eq for OrdMatrixMN<R, C> where VecStorage<f64, R, C>: Storage<f64, R, C> {}
impl<R: Dim, C: Dim> PartialOrd for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return x.partial_cmp(y);
}
}
Some(std::cmp::Ordering::Equal)
}
}
impl<R: Dim, C: Dim> Ord for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
impl<R: Dim, C: Dim> OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
pub fn new(mat: MatrixMN<R, C>) -> Self {
Self(mat)
}
}
type OrdMatrix = OrdMatrixMN<Dynamic, Dynamic>;
type OrdPoint = OrdMatrixMN<Dynamic, U1>;
/// An iterator for a `Group` [generated](https://en.wikipedia.org/wiki/Generator_(mathematics))
/// by a set of floating point matrices. Its elements are built in a BFS order.
/// It contains a lookup table, used to figure out whether an element has
/// already been found or not, as well as a queue to store the next elements.
#[derive(Clone)]
pub struct GenIter {
/// The number of dimensions the group acts on.
pub dim: usize,
/// The generators for the group.
pub gens: Vec<Matrix<f64>>,
/// Stores the elements that have been generated and that can still be
/// generated again. Is integral for the algorithm to work, as without it,
/// duplicate group elements will just keep generating forever.
elements: BTreeMap<OrdMatrix, usize>,
/// Stores the elements that haven't yet been processed.
queue: VecDeque<OrdMatrix>,
/// Stores the index in (`generators`)[GenGroup.generators] of the generator
/// that's being checked. All previous once will have already been
/// multiplied to the right of the current element. Quirk of the current
/// data structure, subject to change.
gen_idx: usize,
}
impl Iterator for GenIter {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.try_next() {
GroupNext::None => return None,
GroupNext::Repeat => {}
GroupNext::New(el) => return Some(el),
};
}
}
}
/// Determines whether two matrices are "approximately equal" elementwise.
fn matrix_approx(mat1: &Matrix<f64>, mat2: &Matrix<f64>) -> bool {
const EPS: f64 = 1e-4;
let mat1 = mat1.iter();
let mut mat2 = mat2.iter();
for x in mat1 {
let y = mat2.next().expect("Matrices don't have the same size!");
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
/// Builds a reflection matrix from a given vector.
pub fn refl_mat(n: Vector<f64>) -> Matrix<f64> {
let dim = n.nrows();
let nn = n.norm_squared();
// Reflects every basis vector, builds a matrix from all of their images.
Matrix::from_columns(
&Matrix::identity(dim, dim)
.column_iter()
.map(|v| v - (2.0 * v.dot(&n) / nn) * &n)
.collect::<Vec<_>>(),
)
}
impl GenIter {
/// Builds a new group from a set of generators.
fn new(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
// Initializes the queue with only the identity matrix.
let mut queue = VecDeque::new();
queue.push_back(OrdMatrix::new(Matrix::identity(dim, dim)));
// We say that the identity has been found zero times. This is a special
// case that ensures that neither the identity is queued nor found
// twice.
let mut elements = BTreeMap::new();
elements.insert(OrdMatrix::new(Matrix::identity(dim, dim)), 0);
Self {
dim,
gens,
elements,
queue,
gen_idx: 0,
}
}
/// Inserts a new element into the group. Returns whether the element is new.
fn insert(&mut self, el: Matrix<f64>) -> bool {
let el = OrdMatrix::new(el);
// If the element has been found before.
if let Some(value) = self.elements.insert(el.clone(), 1) {
// Bumps the value by 1, or removes the element if this is the last
// time we'll find the element.
if value != self.gens.len() - 1 {
self.elements.insert(el, value + 1);
} else {
self.elements.remove(&el);
}
// The element is a repeat, except in the special case of the
// identity.
value == 0
}
// If the element is new, we add it to the queue as well.
else {
self.queue.push_back(el);
true
}
}
/// Gets the next element and the next generator to attempt to multiply.
/// Advances the iterator.
fn next_el_gen(&mut self) -> Option<[Matrix<f64>; 2]> {
let el = self.queue.front()?.0.clone();
let gen = self.gens[self.gen_idx].clone();
// Advances the indices.
self.gen_idx += 1;
if self.gen_idx == self.gens.len() {
self.gen_idx = 0;
self.queue.pop_front();
}
Some([el, gen])
}
/// Multiplies the current element times the current generator, determines
/// if it is a new element. Advances the iterator.
fn try_next(&mut self) -> GroupNext {
// If there's a next element and generator.
if let Some([el, gen]) = self.next_el_gen() {
let new_el = el * gen;
// If the group element is new.
if self.insert(new_el.clone()) {
GroupNext::New(new_el)
}
// If we found a repeat.
else {
GroupNext::Repeat
}
}
// If we already went through the entire group.
else {
GroupNext::None
}
}
pub fn from_cox_mat(cox: CoxMatrix) -> Option<Self> {
const EPS: f64 = 1e-6;
let dim = cox.nrows();
let mut generators = Vec::with_capacity(dim);
// Builds each generator from the top down as a triangular matrix, so
// that the dot products match the values in the Coxeter matrix.
for i in 0..dim {
let mut gen_i = Vector::from_element(dim, 0.0);
for (j, gen_j) in generators.iter().enumerate() {
let dot = gen_i.dot(gen_j);
gen_i[j] = ((PI / cox[(i, j)] as f64).cos() - dot) / gen_j[j];
}
// The vector doesn't fit in spherical space.
let norm_sq = gen_i.norm_squared();
if norm_sq >= 1.0 - EPS {
return None;
} else {
gen_i[i] = (1.0 - norm_sq).sqrt();
}
generators.push(gen_i);
}
Some(Self::new(
dim,
generators.into_iter().map(refl_mat).collect(),
))
}
}
#[cfg(test)]
mod tests {
use gcd::Gcd;
use super::*;
/// Tests a given symmetry group.
fn test(group: Group, order: usize, rot_order: usize, name: &str) {
// Makes testing multiple derived groups faster.
let group = group.cache().debug();
// Tests the order of the group.
assert_eq!(
group.clone().order(),
order,
"{} does not have the expected order.",
name
);
// Tests the order of the rotational subgroup.
assert_eq!(
group.rotations().order(),
rot_order,
"The rotational group of {} does not have the expected order.",
name
);
}
/// Tests the trivial group in various dimensions.
#[test]
fn i() {
for n in 1..=10 {
test(Group::trivial(n), 1, 1, &format!("I^{}", n))
}
}
/// Tests the group consisting of the identity and a central inversion in
/// various dimensions.
#[test]
fn pm_i() {
for n in 1..=10 {
test(
Group::central_inv(n),
2,
(n + 1) % 2 + 1,
&format!("±I{}", n),
)
}
}
/// Tests the I2(*n*) symmetries, which correspond to the symmetries of a
/// regular *n*-gon.
#[test]
fn i2() {
for n in 2..=10 {
for d in 1..n {
if n.gcd(d) != 1 {
continue;
}
test(cox!(n as f64 / d as f64), 2 * n, n, &format!("I2({})", n));
}
}
}
/// Tests the A3⁺ @ (I2(*n*) × I) symmetries, the tetrahedron swirl
/// symmetries.
#[test]
fn a3_p_swirl_i2xi_p() {
for n in 2..10 {
let order = 24 * n;
test(
Group::swirl(
cox!(3.0, 3.0),
Group::direct_product(cox!(n), Group::trivial(1)),
),
order,
order,
&format!("A3⁺ @ (I2({}) × I)", n),
)
}
}
/// Tests the A*n* symmetries, which correspond to the symmetries of the
/// regular simplices.
#[test]
fn a() {
let mut order = 2;
for n in 2..=6 {
order *= n + 1;
test(cox!(3; n - 1), order, order / 2, &format!("A{}", n))
}
}
/// Tests the ±A*n* symmetries, which correspond to the symmetries of the
/// compound of two simplices.
#[test]
fn pm_an() {
let mut order = 4;
for n in 2..=6 {
order *= n + 1;
test(
Group::matrix_product(cox!(3; n - 1), Group::central_inv(n)).unwrap(),
order,
order / 2,
&format!("±A{}", n),
)
}
}
/// Tests the BC*n* symmetries, which correspond to the symmetries of the
/// regular hypercube and orthoplex.
#[test]
fn bc() {
let mut order = 2;
for n in 2..=6 {
// A better cox! macro would make this unnecessary.
let mut cox = vec![3.0; n - 1];
cox[0] = 4.0;
let cox = CoxMatrix::from_lin_diagram(cox);
order *= n * 2;
test(
Group::cox_group(cox).unwrap(),
order,
order / 2,
&format!("BC{}", n),
)
}
}
/// Tests the H*n* symmetries, which correspond to the symmetries of a
/// regular dodecahedron and a regular hecatonicosachoron.
#[test]
fn h() {
test(cox!(5, 3), 120, 60, &"H3");
test(cox!(5, 3, 3), 14400, 7200, &"H4");
}
/// Tests the E6 symmetry group.
#[test]
fn e6() {
// In the future, we'll have better code for this, I hope.
let e6 = Group::cox_group(CoxMatrix(Matrix::from_data(VecStorage::new(
Dynamic::new(6),
Dynamic::new(6),
vec![
1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0,
2.0, 3.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 2.0, 2.0, 2.0,
3.0, 2.0, 2.0, 1.0,
],
))))
.unwrap();
test(e6, 51840, 25920, &"E6");
}
#[test]
/// Tests the direct product of A3 with itself.
fn a3xa3() {
let a3 = cox!(3, 3);
let g = Group::direct_product(a3.clone(), a3.clone());
test(g, 576, 288, &"A3×A3");
}
#[test]
/// Tests the wreath product of A3 with A1.
fn a3_wr_a1() {
test(Group::wreath(cox!(3, 3), cox!()), 1152, 576, &"A3 ≀ A1");
}
#[test]
/// Tests out some step prisms.
fn step() {
for n in 1..10 {
for d in 1..n {
test(
Group::step(cox!(n).rotations(), move |mat| mat.pow(d).unwrap()),
n,
n,
"Step prismatic n-d",
);
}
}
}
}
| quaternions | identifier_name |
connection.py | # SPDX-License-Identifier: Apache-2.0
"""
Provides managed HTTP session to Subaru Starlink mobile app API.
For more details, please refer to the documentation at https://github.com/G-Two/subarulink
"""
import asyncio
import logging
import pprint
import time
import aiohttp
from yarl import URL
from subarulink.exceptions import (
IncompleteCredentials,
InvalidCredentials,
SubaruException,
)
from ._subaru_api.const import (
API_2FA_AUTH_VERIFY,
API_2FA_CONTACT,
API_2FA_SEND_VERIFICATION,
API_ERROR_INVALID_ACCOUNT,
API_ERROR_INVALID_CREDENTIALS,
API_ERROR_PASSWORD_WARNING,
API_ERROR_VEHICLE_SETUP,
API_LOGIN,
API_MOBILE_APP,
API_SELECT_VEHICLE,
API_SERVER,
API_VALIDATE_SESSION,
API_VERSION,
)
_LOGGER = logging.getLogger(__name__)
GET = "get"
POST = "post"
class Connection:
"""A managed HTTP session to Subaru Starlink mobile app API."""
def __init__(
self,
websession: aiohttp.ClientSession,
username,
password,
device_id,
device_name,
country,
) -> None:
"""
Initialize connection object.
Args:
websession (aiohttp.ClientSession): An instance of aiohttp.ClientSession.
username (str): Username used for the MySubaru mobile app.
password (str): Password used for the MySubaru mobile app.
device_id (str): Alphanumeric designator that Subaru API uses to track individual device authorization.
device_name (str): Human friendly name that is associated with `device_id` (shows on mysubaru.com profile "devices").
country (str): Country of MySubaru Account [CAN, USA].
"""
self._username = username
self._password = password
self._device_id = device_id
self._country = country
self._lock = asyncio.Lock()
self._device_name = device_name
self._vehicles = []
self._head = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; Android SDK built for x86 Build/QSR1.191030.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.185 Mobile Safari/537.36",
"Origin": "file://",
"X-Requested-With": API_MOBILE_APP[self._country],
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
}
self._websession = websession
self._authenticated = False
self._registered = False
self._current_vin = None
self._list_of_vins = []
self._session_login_time = None
self._auth_contact_options = None
async def connect(self):
"""
Connect to and establish session with Subaru Starlink mobile app API.
Returns:
List: A list of dicts containing information about each vehicle registered in the Subaru account.
Raises:
InvalidCredentials: If login credentials are incorrect.
IncompleteCredentials: If login credentials were not provided.
SubaruException: If login fails for any other reason.
"""
await self._authenticate()
await self._get_vehicle_data()
if not self.device_registered:
await self._get_contact_methods()
return self._vehicles
@property
def device_registered(self):
"""Device is registered."""
return self._registered
@property
def auth_contact_methods(self):
"""Contact methods for 2FA."""
return self._auth_contact_options
async def request_auth_code(self, contact_method):
"""Request 2FA code be sent via specified contact method."""
if contact_method not in self.auth_contact_methods:
_LOGGER.error("Invalid 2FA contact method requested")
return False
_LOGGER.debug("Requesting 2FA code")
post_data = {
"contactMethod": contact_method,
"languagePreference": "EN",
}
js_resp = await self.__open(
API_2FA_SEND_VERIFICATION,
POST,
params=post_data,
)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
return True
async def submit_auth_code(self, code, make_permanent=True):
"""Submit received 2FA code for validation."""
if not code.isdecimal() or len(code) != 6:
_LOGGER.error("2FA code must be 6 digits")
return False
_LOGGER.info("Validating 2FA response")
post_data = {
"deviceId": self._device_id,
"deviceName": self._device_name,
"verificationCode": code,
}
if make_permanent:
post_data["rememberDevice"] = "on"
js_resp = await self.__open(API_2FA_AUTH_VERIFY, POST, params=post_data)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
_LOGGER.info("Device successfully authorized")
while not self._registered:
# Device registration does not always immediately take effect
await asyncio.sleep(3)
await self._authenticate()
# Current server side vin context is ambiguous (even for single vehicle account??)
self._current_vin = None
return True
async def validate_session(self, vin):
"""
Validate if current session is ready for an API command/query.
Verifies session cookie is still valid and re-authenticates if necessary.
Sets server-side vehicle context as needed.
Args:
vin (str): VIN of desired server-side vehicle context.
Returns:
bool: `True` if session is ready to send a command or query to the Subaru API with the desired `vin` context.
Raises:
SubaruException: If validation fails and a new session fails to be established.
"""
result = False
js_resp = await self.__open(API_VALIDATE_SESSION, GET)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
if vin != self._current_vin:
# API call for VIN that is not the current remote context.
_LOGGER.debug("Switching Subaru API vehicle context to: %s", vin)
if await self._select_vehicle(vin):
result = True
else:
result = True
if result is False:
await self._authenticate(vin)
# New session cookie. Must call selectVehicle.json before any other API call.
if await self._select_vehicle(vin):
result = True
return result
def get_session_age(self):
"""Return number of minutes since last authentication."""
return (time.time() - self._session_login_time) // 60
def reset_session(self):
"""Clear session cookies."""
self._websession.cookie_jar.clear()
async def get(self, url, params=None):
"""
Send HTTPS GET request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP GET request parameters
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=GET, headers=self._head, params=params)
async def post(self, url, params=None, json_data=None):
"""
Send HTTPS POST request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP POST request parameters
json_data (Dict, optional): HTTP POST request JSON data as a Dict
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)
async def _authenticate(self, vin=None) -> bool:
|
async def _select_vehicle(self, vin):
"""Select active vehicle for accounts with multiple VINs."""
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp.get("success"):
self._current_vin = vin
_LOGGER.debug("Current vehicle: vin=%s", js_resp["data"]["vin"])
return js_resp["data"]
if not js_resp.get("success") and js_resp.get("errorCode") == API_ERROR_VEHICLE_SETUP:
# Occasionally happens every few hours. Resetting the session seems to deal with it.
_LOGGER.warning("VEHICLESETUPERROR received. Resetting session.")
self.reset_session()
return False
_LOGGER.debug("Failed to switch vehicle errorCode=%s", js_resp.get("errorCode"))
# Something else is probably wrong with the backend server context - try resetting
self.reset_session()
raise SubaruException("Failed to switch vehicle %s - resetting session." % js_resp.get("errorCode"))
async def _get_vehicle_data(self):
for vin in self._list_of_vins:
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
self._vehicles.append(js_resp["data"])
self._current_vin = vin
async def _get_contact_methods(self):
js_resp = await self.__open(API_2FA_CONTACT, POST)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
self._auth_contact_options = js_resp.get("data")
async def __open(
self,
url,
method=GET,
headers=None,
data=None,
json_data=None,
params=None,
baseurl="",
):
"""Open url."""
if not baseurl:
baseurl = f"https://{API_SERVER[self._country]}{API_VERSION}"
url: URL = URL(baseurl + url)
_LOGGER.debug("%s: %s, params=%s, json_data=%s", method.upper(), url, params, json_data)
async with self._lock:
try:
resp = await getattr(self._websession, method)(
url, headers=headers, params=params, data=data, json=json_data
)
if resp.status > 299:
raise SubaruException("HTTP %d: %s %s" % (resp.status, await resp.text(), resp.request_info))
js_resp = await resp.json()
if "success" not in js_resp and "serviceType" not in js_resp:
raise SubaruException("Unexpected response: %s" % resp)
return js_resp
except aiohttp.ClientResponseError as err:
raise SubaruException(err.status) from err
except aiohttp.ClientConnectionError as err:
raise SubaruException("aiohttp.ClientConnectionError") from err
| """Authenticate to Subaru Remote Services API."""
if self._username and self._password and self._device_id:
post_data = {
"env": "cloudprod",
"loginUsername": self._username,
"password": self._password,
"deviceId": self._device_id,
"passwordToken": None,
"selectedVin": vin,
"pushToken": None,
"deviceType": "android",
}
js_resp = await self.__open(API_LOGIN, POST, data=post_data, headers=self._head)
if js_resp.get("success"):
_LOGGER.debug("Client authentication successful")
_LOGGER.debug(pprint.pformat(js_resp))
self._authenticated = True
self._session_login_time = time.time()
self._registered = js_resp["data"]["deviceRegistered"]
self._list_of_vins = [v["vin"] for v in js_resp["data"]["vehicles"]]
self._current_vin = None
return True
if js_resp.get("errorCode"):
_LOGGER.debug(pprint.pformat(js_resp))
error = js_resp.get("errorCode")
if error == API_ERROR_INVALID_ACCOUNT:
_LOGGER.error("Invalid account")
raise InvalidCredentials(error)
if error == API_ERROR_INVALID_CREDENTIALS:
_LOGGER.error("Client authentication failed")
raise InvalidCredentials(error)
if error == API_ERROR_PASSWORD_WARNING:
_LOGGER.error("Multiple Password Failures.")
raise InvalidCredentials(error)
raise SubaruException(error)
raise IncompleteCredentials("Connection requires email and password and device id.") | identifier_body |
connection.py | # SPDX-License-Identifier: Apache-2.0
"""
Provides managed HTTP session to Subaru Starlink mobile app API.
For more details, please refer to the documentation at https://github.com/G-Two/subarulink
"""
import asyncio
import logging
import pprint
import time
import aiohttp
from yarl import URL
from subarulink.exceptions import (
IncompleteCredentials,
InvalidCredentials,
SubaruException,
)
from ._subaru_api.const import (
API_2FA_AUTH_VERIFY,
API_2FA_CONTACT,
API_2FA_SEND_VERIFICATION,
API_ERROR_INVALID_ACCOUNT,
API_ERROR_INVALID_CREDENTIALS,
API_ERROR_PASSWORD_WARNING,
API_ERROR_VEHICLE_SETUP,
API_LOGIN,
API_MOBILE_APP,
API_SELECT_VEHICLE,
API_SERVER,
API_VALIDATE_SESSION,
API_VERSION,
)
_LOGGER = logging.getLogger(__name__)
GET = "get"
POST = "post"
class Connection:
"""A managed HTTP session to Subaru Starlink mobile app API."""
def __init__(
self,
websession: aiohttp.ClientSession,
username,
password,
device_id,
device_name,
country,
) -> None:
"""
Initialize connection object.
Args:
websession (aiohttp.ClientSession): An instance of aiohttp.ClientSession.
username (str): Username used for the MySubaru mobile app.
password (str): Password used for the MySubaru mobile app.
device_id (str): Alphanumeric designator that Subaru API uses to track individual device authorization.
device_name (str): Human friendly name that is associated with `device_id` (shows on mysubaru.com profile "devices").
country (str): Country of MySubaru Account [CAN, USA].
"""
self._username = username
self._password = password
self._device_id = device_id
self._country = country
self._lock = asyncio.Lock()
self._device_name = device_name
self._vehicles = []
self._head = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; Android SDK built for x86 Build/QSR1.191030.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.185 Mobile Safari/537.36",
"Origin": "file://",
"X-Requested-With": API_MOBILE_APP[self._country],
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
}
self._websession = websession
self._authenticated = False
self._registered = False
self._current_vin = None
self._list_of_vins = []
self._session_login_time = None
self._auth_contact_options = None
async def connect(self):
"""
Connect to and establish session with Subaru Starlink mobile app API.
Returns:
List: A list of dicts containing information about each vehicle registered in the Subaru account.
Raises:
InvalidCredentials: If login credentials are incorrect.
IncompleteCredentials: If login credentials were not provided.
SubaruException: If login fails for any other reason.
"""
await self._authenticate()
await self._get_vehicle_data()
if not self.device_registered:
await self._get_contact_methods()
return self._vehicles
@property
def device_registered(self):
"""Device is registered."""
return self._registered
@property
def auth_contact_methods(self):
"""Contact methods for 2FA."""
return self._auth_contact_options
async def request_auth_code(self, contact_method):
"""Request 2FA code be sent via specified contact method."""
if contact_method not in self.auth_contact_methods:
_LOGGER.error("Invalid 2FA contact method requested")
return False
_LOGGER.debug("Requesting 2FA code")
post_data = {
"contactMethod": contact_method,
"languagePreference": "EN",
}
js_resp = await self.__open(
API_2FA_SEND_VERIFICATION,
POST,
params=post_data,
)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
return True
async def submit_auth_code(self, code, make_permanent=True):
"""Submit received 2FA code for validation."""
if not code.isdecimal() or len(code) != 6:
_LOGGER.error("2FA code must be 6 digits")
return False
_LOGGER.info("Validating 2FA response")
post_data = {
"deviceId": self._device_id,
"deviceName": self._device_name,
"verificationCode": code,
}
if make_permanent:
post_data["rememberDevice"] = "on"
js_resp = await self.__open(API_2FA_AUTH_VERIFY, POST, params=post_data)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
_LOGGER.info("Device successfully authorized")
while not self._registered:
# Device registration does not always immediately take effect
await asyncio.sleep(3)
await self._authenticate()
# Current server side vin context is ambiguous (even for single vehicle account??)
self._current_vin = None
return True
async def validate_session(self, vin):
"""
Validate if current session is ready for an API command/query.
Verifies session cookie is still valid and re-authenticates if necessary.
Sets server-side vehicle context as needed.
Args:
vin (str): VIN of desired server-side vehicle context.
Returns:
bool: `True` if session is ready to send a command or query to the Subaru API with the desired `vin` context.
Raises:
SubaruException: If validation fails and a new session fails to be established.
"""
result = False
js_resp = await self.__open(API_VALIDATE_SESSION, GET)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
if vin != self._current_vin:
# API call for VIN that is not the current remote context.
_LOGGER.debug("Switching Subaru API vehicle context to: %s", vin)
if await self._select_vehicle(vin):
result = True
else:
result = True
if result is False:
await self._authenticate(vin)
# New session cookie. Must call selectVehicle.json before any other API call.
if await self._select_vehicle(vin):
result = True
return result
def get_session_age(self):
"""Return number of minutes since last authentication."""
return (time.time() - self._session_login_time) // 60
def reset_session(self):
"""Clear session cookies."""
self._websession.cookie_jar.clear()
async def get(self, url, params=None):
"""
Send HTTPS GET request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP GET request parameters
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=GET, headers=self._head, params=params)
async def post(self, url, params=None, json_data=None):
"""
Send HTTPS POST request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP POST request parameters
json_data (Dict, optional): HTTP POST request JSON data as a Dict
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
|
async def _authenticate(self, vin=None) -> bool:
"""Authenticate to Subaru Remote Services API."""
if self._username and self._password and self._device_id:
post_data = {
"env": "cloudprod",
"loginUsername": self._username,
"password": self._password,
"deviceId": self._device_id,
"passwordToken": None,
"selectedVin": vin,
"pushToken": None,
"deviceType": "android",
}
js_resp = await self.__open(API_LOGIN, POST, data=post_data, headers=self._head)
if js_resp.get("success"):
_LOGGER.debug("Client authentication successful")
_LOGGER.debug(pprint.pformat(js_resp))
self._authenticated = True
self._session_login_time = time.time()
self._registered = js_resp["data"]["deviceRegistered"]
self._list_of_vins = [v["vin"] for v in js_resp["data"]["vehicles"]]
self._current_vin = None
return True
if js_resp.get("errorCode"):
_LOGGER.debug(pprint.pformat(js_resp))
error = js_resp.get("errorCode")
if error == API_ERROR_INVALID_ACCOUNT:
_LOGGER.error("Invalid account")
raise InvalidCredentials(error)
if error == API_ERROR_INVALID_CREDENTIALS:
_LOGGER.error("Client authentication failed")
raise InvalidCredentials(error)
if error == API_ERROR_PASSWORD_WARNING:
_LOGGER.error("Multiple Password Failures.")
raise InvalidCredentials(error)
raise SubaruException(error)
raise IncompleteCredentials("Connection requires email and password and device id.")
async def _select_vehicle(self, vin):
"""Select active vehicle for accounts with multiple VINs."""
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp.get("success"):
self._current_vin = vin
_LOGGER.debug("Current vehicle: vin=%s", js_resp["data"]["vin"])
return js_resp["data"]
if not js_resp.get("success") and js_resp.get("errorCode") == API_ERROR_VEHICLE_SETUP:
# Occasionally happens every few hours. Resetting the session seems to deal with it.
_LOGGER.warning("VEHICLESETUPERROR received. Resetting session.")
self.reset_session()
return False
_LOGGER.debug("Failed to switch vehicle errorCode=%s", js_resp.get("errorCode"))
# Something else is probably wrong with the backend server context - try resetting
self.reset_session()
raise SubaruException("Failed to switch vehicle %s - resetting session." % js_resp.get("errorCode"))
async def _get_vehicle_data(self):
for vin in self._list_of_vins:
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
self._vehicles.append(js_resp["data"])
self._current_vin = vin
async def _get_contact_methods(self):
js_resp = await self.__open(API_2FA_CONTACT, POST)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
self._auth_contact_options = js_resp.get("data")
async def __open(
self,
url,
method=GET,
headers=None,
data=None,
json_data=None,
params=None,
baseurl="",
):
"""Open url."""
if not baseurl:
baseurl = f"https://{API_SERVER[self._country]}{API_VERSION}"
url: URL = URL(baseurl + url)
_LOGGER.debug("%s: %s, params=%s, json_data=%s", method.upper(), url, params, json_data)
async with self._lock:
try:
resp = await getattr(self._websession, method)(
url, headers=headers, params=params, data=data, json=json_data
)
if resp.status > 299:
raise SubaruException("HTTP %d: %s %s" % (resp.status, await resp.text(), resp.request_info))
js_resp = await resp.json()
if "success" not in js_resp and "serviceType" not in js_resp:
raise SubaruException("Unexpected response: %s" % resp)
return js_resp
except aiohttp.ClientResponseError as err:
raise SubaruException(err.status) from err
except aiohttp.ClientConnectionError as err:
raise SubaruException("aiohttp.ClientConnectionError") from err
| return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data) | conditional_block |
connection.py | # SPDX-License-Identifier: Apache-2.0
"""
Provides managed HTTP session to Subaru Starlink mobile app API.
For more details, please refer to the documentation at https://github.com/G-Two/subarulink
"""
import asyncio
import logging
import pprint
import time
import aiohttp
from yarl import URL
from subarulink.exceptions import (
IncompleteCredentials,
InvalidCredentials,
SubaruException,
)
from ._subaru_api.const import (
API_2FA_AUTH_VERIFY,
API_2FA_CONTACT,
API_2FA_SEND_VERIFICATION,
API_ERROR_INVALID_ACCOUNT,
API_ERROR_INVALID_CREDENTIALS,
API_ERROR_PASSWORD_WARNING,
API_ERROR_VEHICLE_SETUP,
API_LOGIN,
API_MOBILE_APP,
API_SELECT_VEHICLE,
API_SERVER,
API_VALIDATE_SESSION,
API_VERSION,
)
_LOGGER = logging.getLogger(__name__)
GET = "get"
POST = "post"
class Connection:
"""A managed HTTP session to Subaru Starlink mobile app API."""
def __init__(
self,
websession: aiohttp.ClientSession,
username,
password,
device_id,
device_name,
country,
) -> None:
"""
Initialize connection object.
Args:
websession (aiohttp.ClientSession): An instance of aiohttp.ClientSession.
username (str): Username used for the MySubaru mobile app.
password (str): Password used for the MySubaru mobile app.
device_id (str): Alphanumeric designator that Subaru API uses to track individual device authorization.
device_name (str): Human friendly name that is associated with `device_id` (shows on mysubaru.com profile "devices").
country (str): Country of MySubaru Account [CAN, USA].
"""
self._username = username
self._password = password
self._device_id = device_id
self._country = country
self._lock = asyncio.Lock()
self._device_name = device_name
self._vehicles = []
self._head = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; Android SDK built for x86 Build/QSR1.191030.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.185 Mobile Safari/537.36",
"Origin": "file://",
"X-Requested-With": API_MOBILE_APP[self._country],
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
}
self._websession = websession
self._authenticated = False
self._registered = False
self._current_vin = None
self._list_of_vins = []
self._session_login_time = None
self._auth_contact_options = None
async def connect(self):
"""
Connect to and establish session with Subaru Starlink mobile app API.
Returns:
List: A list of dicts containing information about each vehicle registered in the Subaru account.
Raises:
InvalidCredentials: If login credentials are incorrect.
IncompleteCredentials: If login credentials were not provided.
SubaruException: If login fails for any other reason.
"""
await self._authenticate()
await self._get_vehicle_data()
if not self.device_registered:
await self._get_contact_methods()
return self._vehicles
@property
def device_registered(self):
"""Device is registered."""
return self._registered
@property
def auth_contact_methods(self):
"""Contact methods for 2FA."""
return self._auth_contact_options
async def request_auth_code(self, contact_method):
"""Request 2FA code be sent via specified contact method."""
if contact_method not in self.auth_contact_methods:
_LOGGER.error("Invalid 2FA contact method requested")
return False
_LOGGER.debug("Requesting 2FA code")
post_data = {
"contactMethod": contact_method,
"languagePreference": "EN",
}
js_resp = await self.__open(
API_2FA_SEND_VERIFICATION,
POST,
params=post_data,
)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
return True
async def submit_auth_code(self, code, make_permanent=True):
"""Submit received 2FA code for validation."""
if not code.isdecimal() or len(code) != 6:
_LOGGER.error("2FA code must be 6 digits")
return False
_LOGGER.info("Validating 2FA response")
post_data = {
"deviceId": self._device_id,
"deviceName": self._device_name,
"verificationCode": code,
}
if make_permanent:
post_data["rememberDevice"] = "on"
js_resp = await self.__open(API_2FA_AUTH_VERIFY, POST, params=post_data)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
_LOGGER.info("Device successfully authorized")
while not self._registered:
# Device registration does not always immediately take effect
await asyncio.sleep(3)
await self._authenticate()
# Current server side vin context is ambiguous (even for single vehicle account??)
self._current_vin = None
return True
async def validate_session(self, vin):
"""
Validate if current session is ready for an API command/query.
Verifies session cookie is still valid and re-authenticates if necessary.
Sets server-side vehicle context as needed.
Args:
vin (str): VIN of desired server-side vehicle context.
Returns:
bool: `True` if session is ready to send a command or query to the Subaru API with the desired `vin` context.
Raises:
SubaruException: If validation fails and a new session fails to be established.
"""
result = False
js_resp = await self.__open(API_VALIDATE_SESSION, GET)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
if vin != self._current_vin:
# API call for VIN that is not the current remote context.
_LOGGER.debug("Switching Subaru API vehicle context to: %s", vin)
if await self._select_vehicle(vin):
result = True
else:
result = True
if result is False:
await self._authenticate(vin)
# New session cookie. Must call selectVehicle.json before any other API call.
if await self._select_vehicle(vin):
result = True
return result
def get_session_age(self):
"""Return number of minutes since last authentication."""
return (time.time() - self._session_login_time) // 60
def | (self):
"""Clear session cookies."""
self._websession.cookie_jar.clear()
async def get(self, url, params=None):
"""
Send HTTPS GET request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP GET request parameters
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=GET, headers=self._head, params=params)
async def post(self, url, params=None, json_data=None):
"""
Send HTTPS POST request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP POST request parameters
json_data (Dict, optional): HTTP POST request JSON data as a Dict
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)
async def _authenticate(self, vin=None) -> bool:
"""Authenticate to Subaru Remote Services API."""
if self._username and self._password and self._device_id:
post_data = {
"env": "cloudprod",
"loginUsername": self._username,
"password": self._password,
"deviceId": self._device_id,
"passwordToken": None,
"selectedVin": vin,
"pushToken": None,
"deviceType": "android",
}
js_resp = await self.__open(API_LOGIN, POST, data=post_data, headers=self._head)
if js_resp.get("success"):
_LOGGER.debug("Client authentication successful")
_LOGGER.debug(pprint.pformat(js_resp))
self._authenticated = True
self._session_login_time = time.time()
self._registered = js_resp["data"]["deviceRegistered"]
self._list_of_vins = [v["vin"] for v in js_resp["data"]["vehicles"]]
self._current_vin = None
return True
if js_resp.get("errorCode"):
_LOGGER.debug(pprint.pformat(js_resp))
error = js_resp.get("errorCode")
if error == API_ERROR_INVALID_ACCOUNT:
_LOGGER.error("Invalid account")
raise InvalidCredentials(error)
if error == API_ERROR_INVALID_CREDENTIALS:
_LOGGER.error("Client authentication failed")
raise InvalidCredentials(error)
if error == API_ERROR_PASSWORD_WARNING:
_LOGGER.error("Multiple Password Failures.")
raise InvalidCredentials(error)
raise SubaruException(error)
raise IncompleteCredentials("Connection requires email and password and device id.")
async def _select_vehicle(self, vin):
"""Select active vehicle for accounts with multiple VINs."""
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp.get("success"):
self._current_vin = vin
_LOGGER.debug("Current vehicle: vin=%s", js_resp["data"]["vin"])
return js_resp["data"]
if not js_resp.get("success") and js_resp.get("errorCode") == API_ERROR_VEHICLE_SETUP:
# Occasionally happens every few hours. Resetting the session seems to deal with it.
_LOGGER.warning("VEHICLESETUPERROR received. Resetting session.")
self.reset_session()
return False
_LOGGER.debug("Failed to switch vehicle errorCode=%s", js_resp.get("errorCode"))
# Something else is probably wrong with the backend server context - try resetting
self.reset_session()
raise SubaruException("Failed to switch vehicle %s - resetting session." % js_resp.get("errorCode"))
async def _get_vehicle_data(self):
for vin in self._list_of_vins:
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
self._vehicles.append(js_resp["data"])
self._current_vin = vin
async def _get_contact_methods(self):
js_resp = await self.__open(API_2FA_CONTACT, POST)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
self._auth_contact_options = js_resp.get("data")
async def __open(
self,
url,
method=GET,
headers=None,
data=None,
json_data=None,
params=None,
baseurl="",
):
"""Open url."""
if not baseurl:
baseurl = f"https://{API_SERVER[self._country]}{API_VERSION}"
url: URL = URL(baseurl + url)
_LOGGER.debug("%s: %s, params=%s, json_data=%s", method.upper(), url, params, json_data)
async with self._lock:
try:
resp = await getattr(self._websession, method)(
url, headers=headers, params=params, data=data, json=json_data
)
if resp.status > 299:
raise SubaruException("HTTP %d: %s %s" % (resp.status, await resp.text(), resp.request_info))
js_resp = await resp.json()
if "success" not in js_resp and "serviceType" not in js_resp:
raise SubaruException("Unexpected response: %s" % resp)
return js_resp
except aiohttp.ClientResponseError as err:
raise SubaruException(err.status) from err
except aiohttp.ClientConnectionError as err:
raise SubaruException("aiohttp.ClientConnectionError") from err
| reset_session | identifier_name |
connection.py | # SPDX-License-Identifier: Apache-2.0
"""
Provides managed HTTP session to Subaru Starlink mobile app API.
For more details, please refer to the documentation at https://github.com/G-Two/subarulink
"""
import asyncio
import logging
import pprint
import time
import aiohttp
from yarl import URL
from subarulink.exceptions import (
IncompleteCredentials,
InvalidCredentials,
SubaruException,
)
from ._subaru_api.const import (
API_2FA_AUTH_VERIFY,
API_2FA_CONTACT,
API_2FA_SEND_VERIFICATION,
API_ERROR_INVALID_ACCOUNT,
API_ERROR_INVALID_CREDENTIALS,
API_ERROR_PASSWORD_WARNING,
API_ERROR_VEHICLE_SETUP,
API_LOGIN,
API_MOBILE_APP,
API_SELECT_VEHICLE,
API_SERVER,
API_VALIDATE_SESSION,
API_VERSION,
)
_LOGGER = logging.getLogger(__name__)
GET = "get"
POST = "post"
class Connection:
"""A managed HTTP session to Subaru Starlink mobile app API."""
def __init__(
self,
websession: aiohttp.ClientSession,
username,
password,
device_id,
device_name,
country,
) -> None:
"""
Initialize connection object.
Args:
websession (aiohttp.ClientSession): An instance of aiohttp.ClientSession.
username (str): Username used for the MySubaru mobile app.
password (str): Password used for the MySubaru mobile app.
device_id (str): Alphanumeric designator that Subaru API uses to track individual device authorization.
device_name (str): Human friendly name that is associated with `device_id` (shows on mysubaru.com profile "devices").
country (str): Country of MySubaru Account [CAN, USA].
"""
self._username = username
self._password = password
self._device_id = device_id
self._country = country
self._lock = asyncio.Lock()
self._device_name = device_name
self._vehicles = []
self._head = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; Android SDK built for x86 Build/QSR1.191030.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.185 Mobile Safari/537.36",
"Origin": "file://",
"X-Requested-With": API_MOBILE_APP[self._country],
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate", | self._registered = False
self._current_vin = None
self._list_of_vins = []
self._session_login_time = None
self._auth_contact_options = None
async def connect(self):
"""
Connect to and establish session with Subaru Starlink mobile app API.
Returns:
List: A list of dicts containing information about each vehicle registered in the Subaru account.
Raises:
InvalidCredentials: If login credentials are incorrect.
IncompleteCredentials: If login credentials were not provided.
SubaruException: If login fails for any other reason.
"""
await self._authenticate()
await self._get_vehicle_data()
if not self.device_registered:
await self._get_contact_methods()
return self._vehicles
@property
def device_registered(self):
"""Device is registered."""
return self._registered
@property
def auth_contact_methods(self):
"""Contact methods for 2FA."""
return self._auth_contact_options
async def request_auth_code(self, contact_method):
"""Request 2FA code be sent via specified contact method."""
if contact_method not in self.auth_contact_methods:
_LOGGER.error("Invalid 2FA contact method requested")
return False
_LOGGER.debug("Requesting 2FA code")
post_data = {
"contactMethod": contact_method,
"languagePreference": "EN",
}
js_resp = await self.__open(
API_2FA_SEND_VERIFICATION,
POST,
params=post_data,
)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
return True
async def submit_auth_code(self, code, make_permanent=True):
"""Submit received 2FA code for validation."""
if not code.isdecimal() or len(code) != 6:
_LOGGER.error("2FA code must be 6 digits")
return False
_LOGGER.info("Validating 2FA response")
post_data = {
"deviceId": self._device_id,
"deviceName": self._device_name,
"verificationCode": code,
}
if make_permanent:
post_data["rememberDevice"] = "on"
js_resp = await self.__open(API_2FA_AUTH_VERIFY, POST, params=post_data)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
_LOGGER.info("Device successfully authorized")
while not self._registered:
# Device registration does not always immediately take effect
await asyncio.sleep(3)
await self._authenticate()
# Current server side vin context is ambiguous (even for single vehicle account??)
self._current_vin = None
return True
async def validate_session(self, vin):
"""
Validate if current session is ready for an API command/query.
Verifies session cookie is still valid and re-authenticates if necessary.
Sets server-side vehicle context as needed.
Args:
vin (str): VIN of desired server-side vehicle context.
Returns:
bool: `True` if session is ready to send a command or query to the Subaru API with the desired `vin` context.
Raises:
SubaruException: If validation fails and a new session fails to be established.
"""
result = False
js_resp = await self.__open(API_VALIDATE_SESSION, GET)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
if vin != self._current_vin:
# API call for VIN that is not the current remote context.
_LOGGER.debug("Switching Subaru API vehicle context to: %s", vin)
if await self._select_vehicle(vin):
result = True
else:
result = True
if result is False:
await self._authenticate(vin)
# New session cookie. Must call selectVehicle.json before any other API call.
if await self._select_vehicle(vin):
result = True
return result
def get_session_age(self):
"""Return number of minutes since last authentication."""
return (time.time() - self._session_login_time) // 60
def reset_session(self):
"""Clear session cookies."""
self._websession.cookie_jar.clear()
async def get(self, url, params=None):
"""
Send HTTPS GET request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP GET request parameters
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=GET, headers=self._head, params=params)
async def post(self, url, params=None, json_data=None):
"""
Send HTTPS POST request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP POST request parameters
json_data (Dict, optional): HTTP POST request JSON data as a Dict
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)
async def _authenticate(self, vin=None) -> bool:
"""Authenticate to Subaru Remote Services API."""
if self._username and self._password and self._device_id:
post_data = {
"env": "cloudprod",
"loginUsername": self._username,
"password": self._password,
"deviceId": self._device_id,
"passwordToken": None,
"selectedVin": vin,
"pushToken": None,
"deviceType": "android",
}
js_resp = await self.__open(API_LOGIN, POST, data=post_data, headers=self._head)
if js_resp.get("success"):
_LOGGER.debug("Client authentication successful")
_LOGGER.debug(pprint.pformat(js_resp))
self._authenticated = True
self._session_login_time = time.time()
self._registered = js_resp["data"]["deviceRegistered"]
self._list_of_vins = [v["vin"] for v in js_resp["data"]["vehicles"]]
self._current_vin = None
return True
if js_resp.get("errorCode"):
_LOGGER.debug(pprint.pformat(js_resp))
error = js_resp.get("errorCode")
if error == API_ERROR_INVALID_ACCOUNT:
_LOGGER.error("Invalid account")
raise InvalidCredentials(error)
if error == API_ERROR_INVALID_CREDENTIALS:
_LOGGER.error("Client authentication failed")
raise InvalidCredentials(error)
if error == API_ERROR_PASSWORD_WARNING:
_LOGGER.error("Multiple Password Failures.")
raise InvalidCredentials(error)
raise SubaruException(error)
raise IncompleteCredentials("Connection requires email and password and device id.")
async def _select_vehicle(self, vin):
"""Select active vehicle for accounts with multiple VINs."""
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp.get("success"):
self._current_vin = vin
_LOGGER.debug("Current vehicle: vin=%s", js_resp["data"]["vin"])
return js_resp["data"]
if not js_resp.get("success") and js_resp.get("errorCode") == API_ERROR_VEHICLE_SETUP:
# Occasionally happens every few hours. Resetting the session seems to deal with it.
_LOGGER.warning("VEHICLESETUPERROR received. Resetting session.")
self.reset_session()
return False
_LOGGER.debug("Failed to switch vehicle errorCode=%s", js_resp.get("errorCode"))
# Something else is probably wrong with the backend server context - try resetting
self.reset_session()
raise SubaruException("Failed to switch vehicle %s - resetting session." % js_resp.get("errorCode"))
async def _get_vehicle_data(self):
for vin in self._list_of_vins:
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
self._vehicles.append(js_resp["data"])
self._current_vin = vin
async def _get_contact_methods(self):
js_resp = await self.__open(API_2FA_CONTACT, POST)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
self._auth_contact_options = js_resp.get("data")
async def __open(
self,
url,
method=GET,
headers=None,
data=None,
json_data=None,
params=None,
baseurl="",
):
"""Open url."""
if not baseurl:
baseurl = f"https://{API_SERVER[self._country]}{API_VERSION}"
url: URL = URL(baseurl + url)
_LOGGER.debug("%s: %s, params=%s, json_data=%s", method.upper(), url, params, json_data)
async with self._lock:
try:
resp = await getattr(self._websession, method)(
url, headers=headers, params=params, data=data, json=json_data
)
if resp.status > 299:
raise SubaruException("HTTP %d: %s %s" % (resp.status, await resp.text(), resp.request_info))
js_resp = await resp.json()
if "success" not in js_resp and "serviceType" not in js_resp:
raise SubaruException("Unexpected response: %s" % resp)
return js_resp
except aiohttp.ClientResponseError as err:
raise SubaruException(err.status) from err
except aiohttp.ClientConnectionError as err:
raise SubaruException("aiohttp.ClientConnectionError") from err | "Accept": "*/*",
}
self._websession = websession
self._authenticated = False | random_line_split |
main.go | package main
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"time"
"cloud.google.com/go/pubsub"
"cloud.google.com/go/trace"
"golang.org/x/oauth2"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/google/go-github/github"
flags "github.com/jessevdk/go-flags"
"github.com/pivotal-cf/paraphernalia/operate/admin"
"github.com/pivotal-cf/paraphernalia/secure/tlsconfig"
"github.com/pivotal-cf/paraphernalia/serve/grpcrunner"
"github.com/robdimsdale/honeylager"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/grouper"
"github.com/tedsuo/ifrit/sigmon"
"cred-alert/config"
"cred-alert/crypto"
"cred-alert/db"
"cred-alert/db/migrations"
"cred-alert/gitclient"
"cred-alert/metrics"
"cred-alert/notifications"
"cred-alert/queue"
"cred-alert/revok"
"cred-alert/revok/api"
"cred-alert/revok/stats"
"cred-alert/revokpb"
"cred-alert/search"
"cred-alert/sniff"
"rolodex/rolodexpb"
)
var info = admin.ServiceInfo{
Name: "revok",
Description: "A service which fetches new Git commits and scans them for credentials.",
Team: "PCF Security Enablement",
}
func main() {
var cfg *config.WorkerConfig
var flagOpts config.WorkerOpts
var ghClient *revok.GitHubClient
logger := lager.NewLogger("revok-worker")
logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))
logger.Info("starting")
_, err := flags.Parse(&flagOpts)
if err != nil {
os.Exit(1)
}
bs, err := ioutil.ReadFile(string(flagOpts.ConfigFile))
if err != nil {
logger.Error("failed-to-open-config-file", err)
os.Exit(1)
}
cfg, err = config.LoadWorkerConfig(bs)
if err != nil {
logger.Error("failed-to-load-config-file", err)
os.Exit(1)
}
errs := cfg.Validate()
if errs != nil {
for _, err := range errs {
fmt.Println(err.Error())
}
os.Exit(1)
}
if cfg.Metrics.SentryDSN != "" {
logger.RegisterSink(revok.NewSentrySink(cfg.Metrics.SentryDSN, cfg.Metrics.Environment))
}
if cfg.Metrics.HoneycombWriteKey != "" && cfg.Metrics.Environment != "" {
s := honeylager.NewSink(cfg.Metrics.HoneycombWriteKey, cfg.Metrics.Environment, lager.DEBUG)
defer s.Close()
logger.RegisterSink(s)
}
workdir := cfg.WorkDir
_, err = os.Lstat(workdir)
if err != nil {
log.Fatalf("workdir error: %s", err)
}
dbCertificate, dbCaCertPool := loadCerts(
cfg.MySQL.CertificatePath,
cfg.MySQL.PrivateKeyPath,
cfg.MySQL.PrivateKeyPassphrase,
cfg.MySQL.CACertificatePath,
)
dbURI := db.NewDSN(
cfg.MySQL.Username,
cfg.MySQL.Password,
cfg.MySQL.DBName,
cfg.MySQL.Hostname,
int(cfg.MySQL.Port),
cfg.MySQL.ServerName,
dbCertificate,
dbCaCertPool,
)
database, err := migrations.LockDBAndMigrate(logger, "mysql", dbURI)
if err != nil {
log.Fatalf("db error: %s", err)
}
database.LogMode(false)
clk := clock.NewClock()
cloneMsgCh := make(chan revok.CloneMsg)
scanRepository := db.NewScanRepository(database, clk)
repositoryRepository := db.NewRepositoryRepository(database)
fetchRepository := db.NewFetchRepository(database)
credentialRepository := db.NewCredentialRepository(database)
branchRepository := db.NewBranchRepository(database)
emitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)
gitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath, cfg.GitPath)
repoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)
formatter := notifications.NewSlackNotificationFormatter()
traceClient, err := trace.NewClient(context.Background(), cfg.Trace.ProjectName)
if err != nil {
logger.Error("failed-to-create-trace-client", err)
}
slackHTTPClient := &http.Client{
Timeout: 5 * time.Second,
}
notifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)
certificate, caCertPool := loadCerts(
cfg.Identity.CertificatePath,
cfg.Identity.PrivateKeyPath,
cfg.Identity.PrivateKeyPassphrase,
cfg.Identity.CACertificatePath,
)
rolodexServerAddr := fmt.Sprintf("%s:%d", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)
tlsConfig := tlsconfig.Build(
tlsconfig.WithInternalServiceDefaults(),
tlsconfig.WithIdentity(certificate),
)
transportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))
conn, err := grpc.Dial(
rolodexServerAddr,
grpc.WithDialer(keepAliveDial),
grpc.WithTransportCredentials(transportCreds),
grpc.WithUnaryInterceptor(traceClient.GRPCClientInterceptor()),
)
if err != nil {
log.Fatalf("failed to connect to rolodex: %s", err)
}
rolodexClient := rolodexpb.NewRolodexClient(conn)
teamURLs := notifications.NewTeamURLs(
cfg.Slack.DefaultURL,
cfg.Slack.DefaultPublicChannel,
cfg.Slack.DefaultPrivateChannel,
cfg.Slack.TeamURLs,
)
addressBook := notifications.NewRolodex(
rolodexClient,
teamURLs,
)
router := notifications.NewRouter(
notifier,
addressBook,
repoWhitelist,
)
sniffer := sniff.NewDefaultSniffer()
scanner := revok.NewScanner(
gitClient,
repositoryRepository,
scanRepository,
credentialRepository,
sniffer,
)
notificationComposer := revok.NewNotificationComposer(
repositoryRepository,
router,
scanner,
)
if cfg.GitHub.AccessToken != "" {
githubHTTPClient := &http.Client{
Timeout: 30 * time.Second,
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},
),
Base: &http.Transport{
DisableKeepAlives: true,
},
},
}
gh := github.NewClient(githubHTTPClient)
ghClient = revok.NewGitHubClient(gh.Repositories)
}
changeFetcher := revok.NewChangeFetcher(
logger,
gitClient,
ghClient,
notificationComposer,
repositoryRepository,
fetchRepository,
emitter,
)
changeScheduleRunner := revok.NewScheduleRunner(logger)
changeScheduler := revok.NewChangeScheduler(
logger,
repositoryRepository,
changeScheduleRunner,
changeFetcher,
)
cloner := revok.NewCloner(
logger,
workdir,
cloneMsgCh,
gitClient,
repositoryRepository,
notificationComposer,
emitter,
changeScheduler,
)
dirscanUpdater := revok.NewRescanner(
logger,
scanRepository,
credentialRepository,
scanner,
router,
emitter,
)
statsReporter := stats.NewReporter(
logger,
clk,
60*time.Second,
db.NewStatsRepository(database),
emitter,
)
headCredentialCounter := revok.NewHeadCredentialCounter(
logger,
branchRepository,
repositoryRepository,
clk,
cfg.CredentialCounterInterval,
gitClient,
sniffer,
)
gitGCRunner := revok.NewGitGCRunner(logger, clk, repositoryRepository, gitClient, 24*time.Hour)
debug := admin.Runner(
"6060",
admin.WithInfo(info),
admin.WithUptime(),
)
members := []grouper.Member{
{Name: "cloner", Runner: cloner},
{Name: "dirscan-updater", Runner: dirscanUpdater},
{Name: "stats-reporter", Runner: statsReporter},
{Name: "head-credential-counter", Runner: headCredentialCounter},
{Name: "change-schedule-runner", Runner: changeScheduleRunner},
{Name: "git-gc-runner", Runner: gitGCRunner},
{Name: "debug", Runner: debug},
}
looper := gitclient.NewLooper()
searcher := search.NewSearcher(repositoryRepository, looper)
fileLookup := gitclient.NewFileLookup()
blobSearcher := search.NewBlobSearcher(repositoryRepository, fileLookup)
handler := api.NewSearchServer(logger, searcher, blobSearcher)
serverTLS := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))
grpcServer := grpcrunner.New(
logger,
fmt.Sprintf("127.0.0.1:%d", cfg.API.BindPort),
func(server *grpc.Server) {
revokpb.RegisterRevokSearchServer(server, handler)
},
grpc.Creds(credentials.NewTLS(serverTLS)),
)
members = append(members, grouper.Member{
Name: "grpc-server",
Runner: grpcServer,
})
pubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
subscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)
publicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
pushEventProcessor := queue.NewPushEventProcessor(
changeFetcher,
emitter,
clk,
traceClient,
)
signatureChecker := queue.NewSignatureCheck(crypto.NewRSAVerifier(publicKey), emitter, pushEventProcessor)
members = append(members, grouper.Member{
Name: "github-hint-handler",
Runner: queue.NewPubSubSubscriber(logger, subscription, signatureChecker, emitter),
})
if cfg.GitHub.AccessToken != "" {
repoDiscoverer := revok.NewRepoDiscoverer(
logger,
workdir,
cloneMsgCh,
ghClient,
clk,
cfg.RepositoryDiscovery.Interval,
cfg.RepositoryDiscovery.Organizations,
cfg.RepositoryDiscovery.Users,
repositoryRepository,
)
members = append(members, grouper.Member{
Name: "repo-discoverer",
Runner: repoDiscoverer,
})
}
startupTasks := []grouper.Member{
{
Name: "schedule-fetches",
Runner: changeScheduler,
},
}
system := []grouper.Member{
{
Name: "servers",
Runner: grouper.NewParallel(os.Interrupt, members),
},
{
Name: "startup-tasks",
Runner: grouper.NewParallel(os.Interrupt, startupTasks),
},
}
runner := sigmon.New(grouper.NewOrdered(os.Interrupt, system))
err = <-ifrit.Invoke(runner).Wait()
if err != nil {
log.Fatalf("failed-to-start: %s", err)
}
}
func loadCerts(certificatePath, privateKeyPath, privateKeyPassphrase, caCertificatePath string) (tls.Certificate, *x509.CertPool) {
certificate, err := config.LoadCertificateFromFiles(
certificatePath,
privateKeyPath,
privateKeyPassphrase,
)
if err != nil {
log.Fatalln(err)
}
caCertPool, err := config.LoadCertificatePoolFromFiles(caCertificatePath)
if err != nil {
log.Fatalln(err)
}
return certificate, caCertPool
}
func | (addr string, timeout time.Duration) (net.Conn, error) {
d := net.Dialer{
Timeout: timeout,
KeepAlive: 60 * time.Second,
}
return d.Dial("tcp", addr)
}
| keepAliveDial | identifier_name |
main.go | package main
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"time"
"cloud.google.com/go/pubsub"
"cloud.google.com/go/trace"
"golang.org/x/oauth2"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/google/go-github/github"
flags "github.com/jessevdk/go-flags"
"github.com/pivotal-cf/paraphernalia/operate/admin"
"github.com/pivotal-cf/paraphernalia/secure/tlsconfig"
"github.com/pivotal-cf/paraphernalia/serve/grpcrunner"
"github.com/robdimsdale/honeylager"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/grouper"
"github.com/tedsuo/ifrit/sigmon"
"cred-alert/config"
"cred-alert/crypto"
"cred-alert/db"
"cred-alert/db/migrations"
"cred-alert/gitclient"
"cred-alert/metrics"
"cred-alert/notifications"
"cred-alert/queue"
"cred-alert/revok"
"cred-alert/revok/api"
"cred-alert/revok/stats"
"cred-alert/revokpb"
"cred-alert/search"
"cred-alert/sniff"
"rolodex/rolodexpb"
)
var info = admin.ServiceInfo{
Name: "revok",
Description: "A service which fetches new Git commits and scans them for credentials.",
Team: "PCF Security Enablement",
}
func main() {
var cfg *config.WorkerConfig
var flagOpts config.WorkerOpts
var ghClient *revok.GitHubClient
logger := lager.NewLogger("revok-worker")
logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))
logger.Info("starting")
_, err := flags.Parse(&flagOpts)
if err != nil |
bs, err := ioutil.ReadFile(string(flagOpts.ConfigFile))
if err != nil {
logger.Error("failed-to-open-config-file", err)
os.Exit(1)
}
cfg, err = config.LoadWorkerConfig(bs)
if err != nil {
logger.Error("failed-to-load-config-file", err)
os.Exit(1)
}
errs := cfg.Validate()
if errs != nil {
for _, err := range errs {
fmt.Println(err.Error())
}
os.Exit(1)
}
if cfg.Metrics.SentryDSN != "" {
logger.RegisterSink(revok.NewSentrySink(cfg.Metrics.SentryDSN, cfg.Metrics.Environment))
}
if cfg.Metrics.HoneycombWriteKey != "" && cfg.Metrics.Environment != "" {
s := honeylager.NewSink(cfg.Metrics.HoneycombWriteKey, cfg.Metrics.Environment, lager.DEBUG)
defer s.Close()
logger.RegisterSink(s)
}
workdir := cfg.WorkDir
_, err = os.Lstat(workdir)
if err != nil {
log.Fatalf("workdir error: %s", err)
}
dbCertificate, dbCaCertPool := loadCerts(
cfg.MySQL.CertificatePath,
cfg.MySQL.PrivateKeyPath,
cfg.MySQL.PrivateKeyPassphrase,
cfg.MySQL.CACertificatePath,
)
dbURI := db.NewDSN(
cfg.MySQL.Username,
cfg.MySQL.Password,
cfg.MySQL.DBName,
cfg.MySQL.Hostname,
int(cfg.MySQL.Port),
cfg.MySQL.ServerName,
dbCertificate,
dbCaCertPool,
)
database, err := migrations.LockDBAndMigrate(logger, "mysql", dbURI)
if err != nil {
log.Fatalf("db error: %s", err)
}
database.LogMode(false)
clk := clock.NewClock()
cloneMsgCh := make(chan revok.CloneMsg)
scanRepository := db.NewScanRepository(database, clk)
repositoryRepository := db.NewRepositoryRepository(database)
fetchRepository := db.NewFetchRepository(database)
credentialRepository := db.NewCredentialRepository(database)
branchRepository := db.NewBranchRepository(database)
emitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)
gitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath, cfg.GitPath)
repoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)
formatter := notifications.NewSlackNotificationFormatter()
traceClient, err := trace.NewClient(context.Background(), cfg.Trace.ProjectName)
if err != nil {
logger.Error("failed-to-create-trace-client", err)
}
slackHTTPClient := &http.Client{
Timeout: 5 * time.Second,
}
notifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)
certificate, caCertPool := loadCerts(
cfg.Identity.CertificatePath,
cfg.Identity.PrivateKeyPath,
cfg.Identity.PrivateKeyPassphrase,
cfg.Identity.CACertificatePath,
)
rolodexServerAddr := fmt.Sprintf("%s:%d", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)
tlsConfig := tlsconfig.Build(
tlsconfig.WithInternalServiceDefaults(),
tlsconfig.WithIdentity(certificate),
)
transportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))
conn, err := grpc.Dial(
rolodexServerAddr,
grpc.WithDialer(keepAliveDial),
grpc.WithTransportCredentials(transportCreds),
grpc.WithUnaryInterceptor(traceClient.GRPCClientInterceptor()),
)
if err != nil {
log.Fatalf("failed to connect to rolodex: %s", err)
}
rolodexClient := rolodexpb.NewRolodexClient(conn)
teamURLs := notifications.NewTeamURLs(
cfg.Slack.DefaultURL,
cfg.Slack.DefaultPublicChannel,
cfg.Slack.DefaultPrivateChannel,
cfg.Slack.TeamURLs,
)
addressBook := notifications.NewRolodex(
rolodexClient,
teamURLs,
)
router := notifications.NewRouter(
notifier,
addressBook,
repoWhitelist,
)
sniffer := sniff.NewDefaultSniffer()
scanner := revok.NewScanner(
gitClient,
repositoryRepository,
scanRepository,
credentialRepository,
sniffer,
)
notificationComposer := revok.NewNotificationComposer(
repositoryRepository,
router,
scanner,
)
if cfg.GitHub.AccessToken != "" {
githubHTTPClient := &http.Client{
Timeout: 30 * time.Second,
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},
),
Base: &http.Transport{
DisableKeepAlives: true,
},
},
}
gh := github.NewClient(githubHTTPClient)
ghClient = revok.NewGitHubClient(gh.Repositories)
}
changeFetcher := revok.NewChangeFetcher(
logger,
gitClient,
ghClient,
notificationComposer,
repositoryRepository,
fetchRepository,
emitter,
)
changeScheduleRunner := revok.NewScheduleRunner(logger)
changeScheduler := revok.NewChangeScheduler(
logger,
repositoryRepository,
changeScheduleRunner,
changeFetcher,
)
cloner := revok.NewCloner(
logger,
workdir,
cloneMsgCh,
gitClient,
repositoryRepository,
notificationComposer,
emitter,
changeScheduler,
)
dirscanUpdater := revok.NewRescanner(
logger,
scanRepository,
credentialRepository,
scanner,
router,
emitter,
)
statsReporter := stats.NewReporter(
logger,
clk,
60*time.Second,
db.NewStatsRepository(database),
emitter,
)
headCredentialCounter := revok.NewHeadCredentialCounter(
logger,
branchRepository,
repositoryRepository,
clk,
cfg.CredentialCounterInterval,
gitClient,
sniffer,
)
gitGCRunner := revok.NewGitGCRunner(logger, clk, repositoryRepository, gitClient, 24*time.Hour)
debug := admin.Runner(
"6060",
admin.WithInfo(info),
admin.WithUptime(),
)
members := []grouper.Member{
{Name: "cloner", Runner: cloner},
{Name: "dirscan-updater", Runner: dirscanUpdater},
{Name: "stats-reporter", Runner: statsReporter},
{Name: "head-credential-counter", Runner: headCredentialCounter},
{Name: "change-schedule-runner", Runner: changeScheduleRunner},
{Name: "git-gc-runner", Runner: gitGCRunner},
{Name: "debug", Runner: debug},
}
looper := gitclient.NewLooper()
searcher := search.NewSearcher(repositoryRepository, looper)
fileLookup := gitclient.NewFileLookup()
blobSearcher := search.NewBlobSearcher(repositoryRepository, fileLookup)
handler := api.NewSearchServer(logger, searcher, blobSearcher)
serverTLS := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))
grpcServer := grpcrunner.New(
logger,
fmt.Sprintf("127.0.0.1:%d", cfg.API.BindPort),
func(server *grpc.Server) {
revokpb.RegisterRevokSearchServer(server, handler)
},
grpc.Creds(credentials.NewTLS(serverTLS)),
)
members = append(members, grouper.Member{
Name: "grpc-server",
Runner: grpcServer,
})
pubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
subscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)
publicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
pushEventProcessor := queue.NewPushEventProcessor(
changeFetcher,
emitter,
clk,
traceClient,
)
signatureChecker := queue.NewSignatureCheck(crypto.NewRSAVerifier(publicKey), emitter, pushEventProcessor)
members = append(members, grouper.Member{
Name: "github-hint-handler",
Runner: queue.NewPubSubSubscriber(logger, subscription, signatureChecker, emitter),
})
if cfg.GitHub.AccessToken != "" {
repoDiscoverer := revok.NewRepoDiscoverer(
logger,
workdir,
cloneMsgCh,
ghClient,
clk,
cfg.RepositoryDiscovery.Interval,
cfg.RepositoryDiscovery.Organizations,
cfg.RepositoryDiscovery.Users,
repositoryRepository,
)
members = append(members, grouper.Member{
Name: "repo-discoverer",
Runner: repoDiscoverer,
})
}
startupTasks := []grouper.Member{
{
Name: "schedule-fetches",
Runner: changeScheduler,
},
}
system := []grouper.Member{
{
Name: "servers",
Runner: grouper.NewParallel(os.Interrupt, members),
},
{
Name: "startup-tasks",
Runner: grouper.NewParallel(os.Interrupt, startupTasks),
},
}
runner := sigmon.New(grouper.NewOrdered(os.Interrupt, system))
err = <-ifrit.Invoke(runner).Wait()
if err != nil {
log.Fatalf("failed-to-start: %s", err)
}
}
func loadCerts(certificatePath, privateKeyPath, privateKeyPassphrase, caCertificatePath string) (tls.Certificate, *x509.CertPool) {
certificate, err := config.LoadCertificateFromFiles(
certificatePath,
privateKeyPath,
privateKeyPassphrase,
)
if err != nil {
log.Fatalln(err)
}
caCertPool, err := config.LoadCertificatePoolFromFiles(caCertificatePath)
if err != nil {
log.Fatalln(err)
}
return certificate, caCertPool
}
func keepAliveDial(addr string, timeout time.Duration) (net.Conn, error) {
d := net.Dialer{
Timeout: timeout,
KeepAlive: 60 * time.Second,
}
return d.Dial("tcp", addr)
}
| {
os.Exit(1)
} | conditional_block |
main.go | package main
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"time"
"cloud.google.com/go/pubsub"
"cloud.google.com/go/trace"
"golang.org/x/oauth2"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/google/go-github/github"
flags "github.com/jessevdk/go-flags"
"github.com/pivotal-cf/paraphernalia/operate/admin"
"github.com/pivotal-cf/paraphernalia/secure/tlsconfig"
"github.com/pivotal-cf/paraphernalia/serve/grpcrunner"
"github.com/robdimsdale/honeylager"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/grouper"
"github.com/tedsuo/ifrit/sigmon"
"cred-alert/config"
"cred-alert/crypto"
"cred-alert/db"
"cred-alert/db/migrations"
"cred-alert/gitclient"
"cred-alert/metrics"
"cred-alert/notifications"
"cred-alert/queue"
"cred-alert/revok"
"cred-alert/revok/api"
"cred-alert/revok/stats"
"cred-alert/revokpb"
"cred-alert/search"
"cred-alert/sniff"
"rolodex/rolodexpb"
)
var info = admin.ServiceInfo{
Name: "revok",
Description: "A service which fetches new Git commits and scans them for credentials.",
Team: "PCF Security Enablement",
}
func main() |
func loadCerts(certificatePath, privateKeyPath, privateKeyPassphrase, caCertificatePath string) (tls.Certificate, *x509.CertPool) {
certificate, err := config.LoadCertificateFromFiles(
certificatePath,
privateKeyPath,
privateKeyPassphrase,
)
if err != nil {
log.Fatalln(err)
}
caCertPool, err := config.LoadCertificatePoolFromFiles(caCertificatePath)
if err != nil {
log.Fatalln(err)
}
return certificate, caCertPool
}
func keepAliveDial(addr string, timeout time.Duration) (net.Conn, error) {
d := net.Dialer{
Timeout: timeout,
KeepAlive: 60 * time.Second,
}
return d.Dial("tcp", addr)
}
| {
var cfg *config.WorkerConfig
var flagOpts config.WorkerOpts
var ghClient *revok.GitHubClient
logger := lager.NewLogger("revok-worker")
logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))
logger.Info("starting")
_, err := flags.Parse(&flagOpts)
if err != nil {
os.Exit(1)
}
bs, err := ioutil.ReadFile(string(flagOpts.ConfigFile))
if err != nil {
logger.Error("failed-to-open-config-file", err)
os.Exit(1)
}
cfg, err = config.LoadWorkerConfig(bs)
if err != nil {
logger.Error("failed-to-load-config-file", err)
os.Exit(1)
}
errs := cfg.Validate()
if errs != nil {
for _, err := range errs {
fmt.Println(err.Error())
}
os.Exit(1)
}
if cfg.Metrics.SentryDSN != "" {
logger.RegisterSink(revok.NewSentrySink(cfg.Metrics.SentryDSN, cfg.Metrics.Environment))
}
if cfg.Metrics.HoneycombWriteKey != "" && cfg.Metrics.Environment != "" {
s := honeylager.NewSink(cfg.Metrics.HoneycombWriteKey, cfg.Metrics.Environment, lager.DEBUG)
defer s.Close()
logger.RegisterSink(s)
}
workdir := cfg.WorkDir
_, err = os.Lstat(workdir)
if err != nil {
log.Fatalf("workdir error: %s", err)
}
dbCertificate, dbCaCertPool := loadCerts(
cfg.MySQL.CertificatePath,
cfg.MySQL.PrivateKeyPath,
cfg.MySQL.PrivateKeyPassphrase,
cfg.MySQL.CACertificatePath,
)
dbURI := db.NewDSN(
cfg.MySQL.Username,
cfg.MySQL.Password,
cfg.MySQL.DBName,
cfg.MySQL.Hostname,
int(cfg.MySQL.Port),
cfg.MySQL.ServerName,
dbCertificate,
dbCaCertPool,
)
database, err := migrations.LockDBAndMigrate(logger, "mysql", dbURI)
if err != nil {
log.Fatalf("db error: %s", err)
}
database.LogMode(false)
clk := clock.NewClock()
cloneMsgCh := make(chan revok.CloneMsg)
scanRepository := db.NewScanRepository(database, clk)
repositoryRepository := db.NewRepositoryRepository(database)
fetchRepository := db.NewFetchRepository(database)
credentialRepository := db.NewCredentialRepository(database)
branchRepository := db.NewBranchRepository(database)
emitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)
gitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath, cfg.GitPath)
repoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)
formatter := notifications.NewSlackNotificationFormatter()
traceClient, err := trace.NewClient(context.Background(), cfg.Trace.ProjectName)
if err != nil {
logger.Error("failed-to-create-trace-client", err)
}
slackHTTPClient := &http.Client{
Timeout: 5 * time.Second,
}
notifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)
certificate, caCertPool := loadCerts(
cfg.Identity.CertificatePath,
cfg.Identity.PrivateKeyPath,
cfg.Identity.PrivateKeyPassphrase,
cfg.Identity.CACertificatePath,
)
rolodexServerAddr := fmt.Sprintf("%s:%d", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)
tlsConfig := tlsconfig.Build(
tlsconfig.WithInternalServiceDefaults(),
tlsconfig.WithIdentity(certificate),
)
transportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))
conn, err := grpc.Dial(
rolodexServerAddr,
grpc.WithDialer(keepAliveDial),
grpc.WithTransportCredentials(transportCreds),
grpc.WithUnaryInterceptor(traceClient.GRPCClientInterceptor()),
)
if err != nil {
log.Fatalf("failed to connect to rolodex: %s", err)
}
rolodexClient := rolodexpb.NewRolodexClient(conn)
teamURLs := notifications.NewTeamURLs(
cfg.Slack.DefaultURL,
cfg.Slack.DefaultPublicChannel,
cfg.Slack.DefaultPrivateChannel,
cfg.Slack.TeamURLs,
)
addressBook := notifications.NewRolodex(
rolodexClient,
teamURLs,
)
router := notifications.NewRouter(
notifier,
addressBook,
repoWhitelist,
)
sniffer := sniff.NewDefaultSniffer()
scanner := revok.NewScanner(
gitClient,
repositoryRepository,
scanRepository,
credentialRepository,
sniffer,
)
notificationComposer := revok.NewNotificationComposer(
repositoryRepository,
router,
scanner,
)
if cfg.GitHub.AccessToken != "" {
githubHTTPClient := &http.Client{
Timeout: 30 * time.Second,
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},
),
Base: &http.Transport{
DisableKeepAlives: true,
},
},
}
gh := github.NewClient(githubHTTPClient)
ghClient = revok.NewGitHubClient(gh.Repositories)
}
changeFetcher := revok.NewChangeFetcher(
logger,
gitClient,
ghClient,
notificationComposer,
repositoryRepository,
fetchRepository,
emitter,
)
changeScheduleRunner := revok.NewScheduleRunner(logger)
changeScheduler := revok.NewChangeScheduler(
logger,
repositoryRepository,
changeScheduleRunner,
changeFetcher,
)
cloner := revok.NewCloner(
logger,
workdir,
cloneMsgCh,
gitClient,
repositoryRepository,
notificationComposer,
emitter,
changeScheduler,
)
dirscanUpdater := revok.NewRescanner(
logger,
scanRepository,
credentialRepository,
scanner,
router,
emitter,
)
statsReporter := stats.NewReporter(
logger,
clk,
60*time.Second,
db.NewStatsRepository(database),
emitter,
)
headCredentialCounter := revok.NewHeadCredentialCounter(
logger,
branchRepository,
repositoryRepository,
clk,
cfg.CredentialCounterInterval,
gitClient,
sniffer,
)
gitGCRunner := revok.NewGitGCRunner(logger, clk, repositoryRepository, gitClient, 24*time.Hour)
debug := admin.Runner(
"6060",
admin.WithInfo(info),
admin.WithUptime(),
)
members := []grouper.Member{
{Name: "cloner", Runner: cloner},
{Name: "dirscan-updater", Runner: dirscanUpdater},
{Name: "stats-reporter", Runner: statsReporter},
{Name: "head-credential-counter", Runner: headCredentialCounter},
{Name: "change-schedule-runner", Runner: changeScheduleRunner},
{Name: "git-gc-runner", Runner: gitGCRunner},
{Name: "debug", Runner: debug},
}
looper := gitclient.NewLooper()
searcher := search.NewSearcher(repositoryRepository, looper)
fileLookup := gitclient.NewFileLookup()
blobSearcher := search.NewBlobSearcher(repositoryRepository, fileLookup)
handler := api.NewSearchServer(logger, searcher, blobSearcher)
serverTLS := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))
grpcServer := grpcrunner.New(
logger,
fmt.Sprintf("127.0.0.1:%d", cfg.API.BindPort),
func(server *grpc.Server) {
revokpb.RegisterRevokSearchServer(server, handler)
},
grpc.Creds(credentials.NewTLS(serverTLS)),
)
members = append(members, grouper.Member{
Name: "grpc-server",
Runner: grpcServer,
})
pubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
subscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)
publicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
pushEventProcessor := queue.NewPushEventProcessor(
changeFetcher,
emitter,
clk,
traceClient,
)
signatureChecker := queue.NewSignatureCheck(crypto.NewRSAVerifier(publicKey), emitter, pushEventProcessor)
members = append(members, grouper.Member{
Name: "github-hint-handler",
Runner: queue.NewPubSubSubscriber(logger, subscription, signatureChecker, emitter),
})
if cfg.GitHub.AccessToken != "" {
repoDiscoverer := revok.NewRepoDiscoverer(
logger,
workdir,
cloneMsgCh,
ghClient,
clk,
cfg.RepositoryDiscovery.Interval,
cfg.RepositoryDiscovery.Organizations,
cfg.RepositoryDiscovery.Users,
repositoryRepository,
)
members = append(members, grouper.Member{
Name: "repo-discoverer",
Runner: repoDiscoverer,
})
}
startupTasks := []grouper.Member{
{
Name: "schedule-fetches",
Runner: changeScheduler,
},
}
system := []grouper.Member{
{
Name: "servers",
Runner: grouper.NewParallel(os.Interrupt, members),
},
{
Name: "startup-tasks",
Runner: grouper.NewParallel(os.Interrupt, startupTasks),
},
}
runner := sigmon.New(grouper.NewOrdered(os.Interrupt, system))
err = <-ifrit.Invoke(runner).Wait()
if err != nil {
log.Fatalf("failed-to-start: %s", err)
}
} | identifier_body |
main.go | package main
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"time"
"cloud.google.com/go/pubsub"
"cloud.google.com/go/trace"
"golang.org/x/oauth2"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/google/go-github/github"
flags "github.com/jessevdk/go-flags"
"github.com/pivotal-cf/paraphernalia/operate/admin"
"github.com/pivotal-cf/paraphernalia/secure/tlsconfig"
"github.com/pivotal-cf/paraphernalia/serve/grpcrunner"
"github.com/robdimsdale/honeylager"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/grouper"
"github.com/tedsuo/ifrit/sigmon"
"cred-alert/config"
"cred-alert/crypto"
"cred-alert/db"
"cred-alert/db/migrations"
"cred-alert/gitclient"
"cred-alert/metrics"
"cred-alert/notifications"
"cred-alert/queue"
"cred-alert/revok"
"cred-alert/revok/api"
"cred-alert/revok/stats"
"cred-alert/revokpb"
"cred-alert/search"
"cred-alert/sniff"
"rolodex/rolodexpb"
)
var info = admin.ServiceInfo{
Name: "revok",
Description: "A service which fetches new Git commits and scans them for credentials.",
Team: "PCF Security Enablement",
}
func main() {
var cfg *config.WorkerConfig
var flagOpts config.WorkerOpts
var ghClient *revok.GitHubClient
logger := lager.NewLogger("revok-worker")
logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))
logger.Info("starting")
_, err := flags.Parse(&flagOpts)
if err != nil {
os.Exit(1)
}
bs, err := ioutil.ReadFile(string(flagOpts.ConfigFile))
if err != nil {
logger.Error("failed-to-open-config-file", err)
os.Exit(1)
}
cfg, err = config.LoadWorkerConfig(bs)
if err != nil {
logger.Error("failed-to-load-config-file", err)
os.Exit(1)
}
errs := cfg.Validate()
if errs != nil {
for _, err := range errs {
fmt.Println(err.Error())
}
os.Exit(1)
}
if cfg.Metrics.SentryDSN != "" {
logger.RegisterSink(revok.NewSentrySink(cfg.Metrics.SentryDSN, cfg.Metrics.Environment))
}
if cfg.Metrics.HoneycombWriteKey != "" && cfg.Metrics.Environment != "" {
s := honeylager.NewSink(cfg.Metrics.HoneycombWriteKey, cfg.Metrics.Environment, lager.DEBUG)
defer s.Close()
logger.RegisterSink(s)
}
workdir := cfg.WorkDir
_, err = os.Lstat(workdir)
if err != nil {
log.Fatalf("workdir error: %s", err)
}
dbCertificate, dbCaCertPool := loadCerts(
cfg.MySQL.CertificatePath,
cfg.MySQL.PrivateKeyPath,
cfg.MySQL.PrivateKeyPassphrase,
cfg.MySQL.CACertificatePath,
)
dbURI := db.NewDSN(
cfg.MySQL.Username,
cfg.MySQL.Password,
cfg.MySQL.DBName,
cfg.MySQL.Hostname,
int(cfg.MySQL.Port),
cfg.MySQL.ServerName,
dbCertificate,
dbCaCertPool,
)
database, err := migrations.LockDBAndMigrate(logger, "mysql", dbURI)
if err != nil {
log.Fatalf("db error: %s", err)
}
database.LogMode(false)
clk := clock.NewClock()
cloneMsgCh := make(chan revok.CloneMsg)
scanRepository := db.NewScanRepository(database, clk)
repositoryRepository := db.NewRepositoryRepository(database)
fetchRepository := db.NewFetchRepository(database)
credentialRepository := db.NewCredentialRepository(database)
branchRepository := db.NewBranchRepository(database)
emitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)
gitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath, cfg.GitPath)
repoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)
formatter := notifications.NewSlackNotificationFormatter()
traceClient, err := trace.NewClient(context.Background(), cfg.Trace.ProjectName)
if err != nil {
logger.Error("failed-to-create-trace-client", err)
}
slackHTTPClient := &http.Client{
Timeout: 5 * time.Second,
}
notifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)
certificate, caCertPool := loadCerts(
cfg.Identity.CertificatePath,
cfg.Identity.PrivateKeyPath,
cfg.Identity.PrivateKeyPassphrase,
cfg.Identity.CACertificatePath,
)
rolodexServerAddr := fmt.Sprintf("%s:%d", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)
tlsConfig := tlsconfig.Build(
tlsconfig.WithInternalServiceDefaults(),
tlsconfig.WithIdentity(certificate),
)
transportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))
conn, err := grpc.Dial(
rolodexServerAddr,
grpc.WithDialer(keepAliveDial),
grpc.WithTransportCredentials(transportCreds),
grpc.WithUnaryInterceptor(traceClient.GRPCClientInterceptor()),
)
if err != nil {
log.Fatalf("failed to connect to rolodex: %s", err)
}
rolodexClient := rolodexpb.NewRolodexClient(conn)
teamURLs := notifications.NewTeamURLs(
cfg.Slack.DefaultURL,
cfg.Slack.DefaultPublicChannel,
cfg.Slack.DefaultPrivateChannel,
cfg.Slack.TeamURLs,
)
addressBook := notifications.NewRolodex(
rolodexClient,
teamURLs,
)
router := notifications.NewRouter(
notifier,
addressBook,
repoWhitelist,
)
sniffer := sniff.NewDefaultSniffer()
scanner := revok.NewScanner(
gitClient,
repositoryRepository,
scanRepository,
credentialRepository,
sniffer,
)
notificationComposer := revok.NewNotificationComposer(
repositoryRepository,
router,
scanner,
)
if cfg.GitHub.AccessToken != "" {
githubHTTPClient := &http.Client{
Timeout: 30 * time.Second,
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},
),
Base: &http.Transport{
DisableKeepAlives: true,
},
},
}
gh := github.NewClient(githubHTTPClient)
ghClient = revok.NewGitHubClient(gh.Repositories)
}
changeFetcher := revok.NewChangeFetcher(
logger,
gitClient,
ghClient,
notificationComposer,
repositoryRepository,
fetchRepository,
emitter,
)
changeScheduleRunner := revok.NewScheduleRunner(logger)
changeScheduler := revok.NewChangeScheduler(
logger,
repositoryRepository,
changeScheduleRunner,
changeFetcher,
)
cloner := revok.NewCloner(
logger,
workdir,
cloneMsgCh,
gitClient,
repositoryRepository,
notificationComposer,
emitter,
changeScheduler,
)
dirscanUpdater := revok.NewRescanner(
logger,
scanRepository,
credentialRepository,
scanner,
router,
emitter,
)
statsReporter := stats.NewReporter(
logger,
clk,
60*time.Second,
db.NewStatsRepository(database),
emitter,
)
headCredentialCounter := revok.NewHeadCredentialCounter(
logger,
branchRepository,
repositoryRepository,
clk,
cfg.CredentialCounterInterval,
gitClient,
sniffer,
)
gitGCRunner := revok.NewGitGCRunner(logger, clk, repositoryRepository, gitClient, 24*time.Hour)
debug := admin.Runner(
"6060",
admin.WithInfo(info),
admin.WithUptime(),
)
members := []grouper.Member{
{Name: "cloner", Runner: cloner},
{Name: "dirscan-updater", Runner: dirscanUpdater},
{Name: "stats-reporter", Runner: statsReporter},
{Name: "head-credential-counter", Runner: headCredentialCounter},
{Name: "change-schedule-runner", Runner: changeScheduleRunner},
{Name: "git-gc-runner", Runner: gitGCRunner},
{Name: "debug", Runner: debug},
}
looper := gitclient.NewLooper()
searcher := search.NewSearcher(repositoryRepository, looper)
fileLookup := gitclient.NewFileLookup()
blobSearcher := search.NewBlobSearcher(repositoryRepository, fileLookup)
handler := api.NewSearchServer(logger, searcher, blobSearcher)
serverTLS := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))
grpcServer := grpcrunner.New(
logger,
fmt.Sprintf("127.0.0.1:%d", cfg.API.BindPort),
func(server *grpc.Server) {
revokpb.RegisterRevokSearchServer(server, handler)
},
grpc.Creds(credentials.NewTLS(serverTLS)),
)
members = append(members, grouper.Member{
Name: "grpc-server",
Runner: grpcServer,
})
pubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
subscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)
publicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
pushEventProcessor := queue.NewPushEventProcessor(
changeFetcher,
emitter,
clk,
traceClient,
) |
signatureChecker := queue.NewSignatureCheck(crypto.NewRSAVerifier(publicKey), emitter, pushEventProcessor)
members = append(members, grouper.Member{
Name: "github-hint-handler",
Runner: queue.NewPubSubSubscriber(logger, subscription, signatureChecker, emitter),
})
if cfg.GitHub.AccessToken != "" {
repoDiscoverer := revok.NewRepoDiscoverer(
logger,
workdir,
cloneMsgCh,
ghClient,
clk,
cfg.RepositoryDiscovery.Interval,
cfg.RepositoryDiscovery.Organizations,
cfg.RepositoryDiscovery.Users,
repositoryRepository,
)
members = append(members, grouper.Member{
Name: "repo-discoverer",
Runner: repoDiscoverer,
})
}
startupTasks := []grouper.Member{
{
Name: "schedule-fetches",
Runner: changeScheduler,
},
}
system := []grouper.Member{
{
Name: "servers",
Runner: grouper.NewParallel(os.Interrupt, members),
},
{
Name: "startup-tasks",
Runner: grouper.NewParallel(os.Interrupt, startupTasks),
},
}
runner := sigmon.New(grouper.NewOrdered(os.Interrupt, system))
err = <-ifrit.Invoke(runner).Wait()
if err != nil {
log.Fatalf("failed-to-start: %s", err)
}
}
func loadCerts(certificatePath, privateKeyPath, privateKeyPassphrase, caCertificatePath string) (tls.Certificate, *x509.CertPool) {
certificate, err := config.LoadCertificateFromFiles(
certificatePath,
privateKeyPath,
privateKeyPassphrase,
)
if err != nil {
log.Fatalln(err)
}
caCertPool, err := config.LoadCertificatePoolFromFiles(caCertificatePath)
if err != nil {
log.Fatalln(err)
}
return certificate, caCertPool
}
func keepAliveDial(addr string, timeout time.Duration) (net.Conn, error) {
d := net.Dialer{
Timeout: timeout,
KeepAlive: 60 * time.Second,
}
return d.Dial("tcp", addr)
} | random_line_split | |
task.rs | use notifier::Notifier;
use sender::Sender;
use futures::{self, future, Future, Async};
use futures::executor::{self, Spawn};
use std::{fmt, mem, panic, ptr};
use std::cell::Cell;
use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
use std::sync::atomic::Ordering::{AcqRel, Acquire, Release, Relaxed};
#[cfg(feature = "unstable-futures")]
use futures2;
pub(crate) struct Task {
ptr: *mut Inner,
}
#[derive(Debug)]
pub(crate) struct Queue {
head: AtomicPtr<Inner>,
tail: Cell<*mut Inner>,
stub: Box<Inner>,
}
#[derive(Debug)]
pub(crate) enum Poll {
Empty,
Inconsistent,
Data(Task),
}
#[derive(Debug)]
pub(crate) enum Run {
Idle,
Schedule,
Complete,
}
type BoxFuture = Box<Future<Item = (), Error = ()> + Send + 'static>;
#[cfg(feature = "unstable-futures")]
type BoxFuture2 = Box<futures2::Future<Item = (), Error = futures2::Never> + Send>;
enum TaskFuture {
Futures1(Spawn<BoxFuture>),
#[cfg(feature = "unstable-futures")]
Futures2 {
tls: futures2::task::LocalMap,
waker: futures2::task::Waker,
fut: BoxFuture2,
}
}
struct Inner {
// Next pointer in the queue that submits tasks to a worker.
next: AtomicPtr<Inner>,
// Task state
state: AtomicUsize,
// Number of outstanding references to the task
ref_count: AtomicUsize,
// Store the future at the head of the struct
//
// The future is dropped immediately when it transitions to Complete
future: Option<TaskFuture>,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum State {
/// Task is currently idle
Idle,
/// Task is currently running
Running,
/// Task is currently running, but has been notified that it must run again.
Notified,
/// Task has been scheduled
Scheduled,
/// Task is complete
Complete,
}
// ===== impl Task =====
impl Task {
/// Create a new task handle
pub fn new(future: BoxFuture) -> Task {
let task_fut = TaskFuture::Futures1(executor::spawn(future));
let inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: Some(task_fut),
});
Task { ptr: Box::into_raw(inner) }
}
/// Create a new task handle for a futures 0.2 future
#[cfg(feature = "unstable-futures")]
pub fn new2<F>(fut: BoxFuture2, make_waker: F) -> Task
where F: FnOnce(usize) -> futures2::task::Waker
{
let mut inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: None,
});
let waker = make_waker((&*inner) as *const _ as usize);
let tls = futures2::task::LocalMap::new();
inner.future = Some(TaskFuture::Futures2 { waker, tls, fut });
Task { ptr: Box::into_raw(inner) }
}
/// Transmute a u64 to a Task
pub unsafe fn from_notify_id(unpark_id: usize) -> Task {
mem::transmute(unpark_id)
}
/// Transmute a u64 to a task ref
pub unsafe fn from_notify_id_ref<'a>(unpark_id: &'a usize) -> &'a Task {
mem::transmute(unpark_id)
}
/// Execute the task returning `Run::Schedule` if the task needs to be
/// scheduled again.
pub fn run(&self, unpark: &Arc<Notifier>, exec: &mut Sender) -> Run {
use self::State::*;
// Transition task to running state. At this point, the task must be
// scheduled.
let actual: State = self.inner().state.compare_and_swap(
Scheduled.into(), Running.into(), AcqRel).into();
trace!("running; state={:?}", actual);
match actual {
Scheduled => {},
_ => panic!("unexpected task state; {:?}", actual),
}
trace!("Task::run; state={:?}", State::from(self.inner().state.load(Relaxed)));
let fut = &mut self.inner_mut().future;
// This block deals with the future panicking while being polled.
//
// If the future panics, then the drop handler must be called such that
// `thread::panicking() -> true`. To do this, the future is dropped from
// within the catch_unwind block.
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
struct Guard<'a>(&'a mut Option<TaskFuture>, bool);
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
// This drops the future
if self.1 {
let _ = self.0.take();
}
}
}
let mut g = Guard(fut, true);
let ret = g.0.as_mut().unwrap()
.poll(unpark, self.ptr as usize, exec);
g.1 = false;
ret
}));
match res {
Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => {
trace!(" -> task complete");
// Drop the future
self.inner_mut().drop_future();
// Transition to the completed state
self.inner().state.store(State::Complete.into(), Release);
Run::Complete
}
Ok(Ok(Async::NotReady)) => {
trace!(" -> not ready");
// Attempt to transition from Running -> Idle, if successful,
// then the task does not need to be scheduled again. If the CAS
// fails, then the task has been unparked concurrent to running,
// in which case it transitions immediately back to scheduled
// and we return `true`.
let prev: State = self.inner().state.compare_and_swap(
Running.into(), Idle.into(), AcqRel).into();
match prev {
Running => Run::Idle,
Notified => {
self.inner().state.store(Scheduled.into(), Release);
Run::Schedule
}
_ => unreachable!(),
}
}
}
}
/// Transition the task state to scheduled.
///
/// Returns `true` if the caller is permitted to schedule the task.
pub fn schedule(&self) -> bool {
use self::State::*;
loop {
let actual = self.inner().state.compare_and_swap(
Idle.into(),
Scheduled.into(),
AcqRel).into();
match actual {
Idle => return true,
Running => {
let actual = self.inner().state.compare_and_swap(
Running.into(), Notified.into(), AcqRel).into();
match actual {
Idle => continue,
_ => return false,
}
}
Complete | Notified | Scheduled => return false,
}
}
}
#[inline]
fn inner(&self) -> &Inner {
unsafe { &*self.ptr }
}
#[inline]
fn inner_mut(&self) -> &mut Inner {
unsafe { &mut *self.ptr }
}
}
impl fmt::Debug for Task {
fn | (&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Task")
.field("inner", self.inner())
.finish()
}
}
impl Clone for Task {
fn clone(&self) -> Task {
use std::isize;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().ref_count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// TODO: abort
panic!();
}
Task { ptr: self.ptr }
}
}
impl Drop for Task {
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().ref_count.fetch_sub(1, Release) != 1 {
return;
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
let _ = Box::from_raw(self.ptr);
}
}
}
unsafe impl Send for Task {}
// ===== impl Inner =====
impl Inner {
fn stub() -> Inner {
Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::stub().into()),
ref_count: AtomicUsize::new(0),
future: Some(TaskFuture::Futures1(executor::spawn(Box::new(future::empty())))),
}
}
fn drop_future(&mut self) {
let _ = self.future.take();
}
}
impl Drop for Inner {
fn drop(&mut self) {
self.drop_future();
}
}
impl fmt::Debug for Inner {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Inner")
.field("next", &self.next)
.field("state", &self.state)
.field("ref_count", &self.ref_count)
.field("future", &"Spawn<BoxFuture>")
.finish()
}
}
// ===== impl Queue =====
impl Queue {
pub fn new() -> Queue {
let stub = Box::new(Inner::stub());
let ptr = &*stub as *const _ as *mut _;
Queue {
head: AtomicPtr::new(ptr),
tail: Cell::new(ptr),
stub: stub,
}
}
pub fn push(&self, handle: Task) {
unsafe {
self.push2(handle.ptr);
// Forgetting the handle is necessary to avoid the ref dec
mem::forget(handle);
}
}
unsafe fn push2(&self, handle: *mut Inner) {
// Set the next pointer. This does not require an atomic operation as
// this node is not accessible. The write will be flushed with the next
// operation
(*handle).next = AtomicPtr::new(ptr::null_mut());
// Update the head to point to the new node. We need to see the previous
// node in order to update the next pointer as well as release `handle`
// to any other threads calling `push`.
let prev = self.head.swap(handle, AcqRel);
// Release `handle` to the consume end.
(*prev).next.store(handle, Release);
}
pub unsafe fn poll(&self) -> Poll {
let mut tail = self.tail.get();
let mut next = (*tail).next.load(Acquire);
let stub = &*self.stub as *const _ as *mut _;
if tail == stub {
if next.is_null() {
return Poll::Empty;
}
self.tail.set(next);
tail = next;
next = (*next).next.load(Acquire);
}
if !next.is_null() {
self.tail.set(next);
// No ref_count inc is necessary here as this poll is paired
// with a `push` which "forgets" the handle.
return Poll::Data(Task {
ptr: tail,
});
}
if self.head.load(Acquire) != tail {
return Poll::Inconsistent;
}
self.push2(stub);
next = (*tail).next.load(Acquire);
if !next.is_null() {
self.tail.set(next);
return Poll::Data(Task {
ptr: tail,
});
}
Poll::Inconsistent
}
}
// ===== impl State =====
impl State {
/// Returns the initial task state.
///
/// Tasks start in the scheduled state as they are immediately scheduled on
/// creation.
fn new() -> State {
State::Scheduled
}
fn stub() -> State {
State::Idle
}
}
impl From<usize> for State {
fn from(src: usize) -> Self {
use self::State::*;
match src {
0 => Idle,
1 => Running,
2 => Notified,
3 => Scheduled,
4 => Complete,
_ => unreachable!(),
}
}
}
impl From<State> for usize {
fn from(src: State) -> Self {
use self::State::*;
match src {
Idle => 0,
Running => 1,
Notified => 2,
Scheduled => 3,
Complete => 4,
}
}
}
// ===== impl TaskFuture =====
impl TaskFuture {
#[allow(unused_variables)]
fn poll(&mut self, unpark: &Arc<Notifier>, id: usize, exec: &mut Sender) -> futures::Poll<(), ()> {
match *self {
TaskFuture::Futures1(ref mut fut) => fut.poll_future_notify(unpark, id),
#[cfg(feature = "unstable-futures")]
TaskFuture::Futures2 { ref mut fut, ref waker, ref mut tls } => {
let mut cx = futures2::task::Context::new(tls, waker, exec);
match fut.poll(&mut cx).unwrap() {
futures2::Async::Pending => Ok(Async::NotReady),
futures2::Async::Ready(x) => Ok(Async::Ready(x)),
}
}
}
}
}
| fmt | identifier_name |
task.rs | use notifier::Notifier;
use sender::Sender;
use futures::{self, future, Future, Async};
use futures::executor::{self, Spawn};
use std::{fmt, mem, panic, ptr};
use std::cell::Cell;
use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
use std::sync::atomic::Ordering::{AcqRel, Acquire, Release, Relaxed};
#[cfg(feature = "unstable-futures")]
use futures2;
pub(crate) struct Task {
ptr: *mut Inner,
}
#[derive(Debug)]
pub(crate) struct Queue {
head: AtomicPtr<Inner>,
tail: Cell<*mut Inner>,
stub: Box<Inner>,
}
#[derive(Debug)]
pub(crate) enum Poll {
Empty,
Inconsistent,
Data(Task),
}
#[derive(Debug)]
pub(crate) enum Run {
Idle,
Schedule,
Complete,
}
type BoxFuture = Box<Future<Item = (), Error = ()> + Send + 'static>;
#[cfg(feature = "unstable-futures")]
type BoxFuture2 = Box<futures2::Future<Item = (), Error = futures2::Never> + Send>;
enum TaskFuture {
Futures1(Spawn<BoxFuture>),
#[cfg(feature = "unstable-futures")]
Futures2 {
tls: futures2::task::LocalMap,
waker: futures2::task::Waker,
fut: BoxFuture2,
}
}
struct Inner {
// Next pointer in the queue that submits tasks to a worker.
next: AtomicPtr<Inner>,
// Task state
state: AtomicUsize,
// Number of outstanding references to the task
ref_count: AtomicUsize,
// Store the future at the head of the struct
//
// The future is dropped immediately when it transitions to Complete
future: Option<TaskFuture>,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum State {
/// Task is currently idle
Idle,
/// Task is currently running
Running,
/// Task is currently running, but has been notified that it must run again.
Notified,
/// Task has been scheduled
Scheduled,
/// Task is complete
Complete,
}
// ===== impl Task =====
impl Task {
/// Create a new task handle
pub fn new(future: BoxFuture) -> Task {
let task_fut = TaskFuture::Futures1(executor::spawn(future));
let inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: Some(task_fut),
});
Task { ptr: Box::into_raw(inner) }
}
/// Create a new task handle for a futures 0.2 future
#[cfg(feature = "unstable-futures")]
pub fn new2<F>(fut: BoxFuture2, make_waker: F) -> Task
where F: FnOnce(usize) -> futures2::task::Waker
{
let mut inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: None,
});
let waker = make_waker((&*inner) as *const _ as usize);
let tls = futures2::task::LocalMap::new();
inner.future = Some(TaskFuture::Futures2 { waker, tls, fut });
Task { ptr: Box::into_raw(inner) }
}
/// Transmute a u64 to a Task
pub unsafe fn from_notify_id(unpark_id: usize) -> Task {
mem::transmute(unpark_id)
}
/// Transmute a u64 to a task ref
pub unsafe fn from_notify_id_ref<'a>(unpark_id: &'a usize) -> &'a Task {
mem::transmute(unpark_id)
}
/// Execute the task returning `Run::Schedule` if the task needs to be
/// scheduled again.
pub fn run(&self, unpark: &Arc<Notifier>, exec: &mut Sender) -> Run {
use self::State::*;
// Transition task to running state. At this point, the task must be
// scheduled.
let actual: State = self.inner().state.compare_and_swap(
Scheduled.into(), Running.into(), AcqRel).into();
trace!("running; state={:?}", actual);
match actual {
Scheduled => {},
_ => panic!("unexpected task state; {:?}", actual),
}
trace!("Task::run; state={:?}", State::from(self.inner().state.load(Relaxed)));
let fut = &mut self.inner_mut().future;
// This block deals with the future panicking while being polled.
//
// If the future panics, then the drop handler must be called such that
// `thread::panicking() -> true`. To do this, the future is dropped from
// within the catch_unwind block.
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
struct Guard<'a>(&'a mut Option<TaskFuture>, bool);
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
// This drops the future
if self.1 {
let _ = self.0.take();
}
}
}
let mut g = Guard(fut, true);
let ret = g.0.as_mut().unwrap()
.poll(unpark, self.ptr as usize, exec);
g.1 = false;
ret
}));
match res {
Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => {
trace!(" -> task complete");
// Drop the future
self.inner_mut().drop_future();
// Transition to the completed state
self.inner().state.store(State::Complete.into(), Release);
Run::Complete
}
Ok(Ok(Async::NotReady)) => {
trace!(" -> not ready");
// Attempt to transition from Running -> Idle, if successful,
// then the task does not need to be scheduled again. If the CAS
// fails, then the task has been unparked concurrent to running,
// in which case it transitions immediately back to scheduled
// and we return `true`.
let prev: State = self.inner().state.compare_and_swap(
Running.into(), Idle.into(), AcqRel).into();
match prev {
Running => Run::Idle,
Notified => {
self.inner().state.store(Scheduled.into(), Release);
Run::Schedule
}
_ => unreachable!(),
}
}
}
}
/// Transition the task state to scheduled.
///
/// Returns `true` if the caller is permitted to schedule the task.
pub fn schedule(&self) -> bool {
use self::State::*;
loop {
let actual = self.inner().state.compare_and_swap(
Idle.into(),
Scheduled.into(),
AcqRel).into();
match actual {
Idle => return true,
Running => {
let actual = self.inner().state.compare_and_swap(
Running.into(), Notified.into(), AcqRel).into();
match actual {
Idle => continue,
_ => return false,
}
}
Complete | Notified | Scheduled => return false,
}
}
}
#[inline]
fn inner(&self) -> &Inner {
unsafe { &*self.ptr }
}
#[inline]
fn inner_mut(&self) -> &mut Inner {
unsafe { &mut *self.ptr }
}
}
impl fmt::Debug for Task {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Task")
.field("inner", self.inner())
.finish()
}
}
impl Clone for Task {
fn clone(&self) -> Task {
use std::isize;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().ref_count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// TODO: abort
panic!();
}
Task { ptr: self.ptr }
}
}
impl Drop for Task {
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().ref_count.fetch_sub(1, Release) != 1 {
return;
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
let _ = Box::from_raw(self.ptr);
}
}
}
unsafe impl Send for Task {}
// ===== impl Inner =====
impl Inner {
fn stub() -> Inner {
Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::stub().into()),
ref_count: AtomicUsize::new(0),
future: Some(TaskFuture::Futures1(executor::spawn(Box::new(future::empty())))),
}
}
fn drop_future(&mut self) {
let _ = self.future.take();
}
}
impl Drop for Inner {
fn drop(&mut self) {
self.drop_future();
}
}
impl fmt::Debug for Inner {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Inner")
.field("next", &self.next)
.field("state", &self.state)
.field("ref_count", &self.ref_count)
.field("future", &"Spawn<BoxFuture>")
.finish()
}
}
// ===== impl Queue =====
impl Queue {
pub fn new() -> Queue {
let stub = Box::new(Inner::stub());
let ptr = &*stub as *const _ as *mut _;
Queue {
head: AtomicPtr::new(ptr),
tail: Cell::new(ptr),
stub: stub,
}
}
pub fn push(&self, handle: Task) {
unsafe {
self.push2(handle.ptr);
// Forgetting the handle is necessary to avoid the ref dec
mem::forget(handle);
}
}
unsafe fn push2(&self, handle: *mut Inner) {
// Set the next pointer. This does not require an atomic operation as
// this node is not accessible. The write will be flushed with the next
// operation
(*handle).next = AtomicPtr::new(ptr::null_mut());
// Update the head to point to the new node. We need to see the previous
// node in order to update the next pointer as well as release `handle`
// to any other threads calling `push`.
let prev = self.head.swap(handle, AcqRel);
// Release `handle` to the consume end.
(*prev).next.store(handle, Release);
}
pub unsafe fn poll(&self) -> Poll {
let mut tail = self.tail.get();
let mut next = (*tail).next.load(Acquire);
let stub = &*self.stub as *const _ as *mut _;
if tail == stub {
if next.is_null() {
return Poll::Empty;
}
self.tail.set(next);
tail = next;
next = (*next).next.load(Acquire);
}
if !next.is_null() {
self.tail.set(next);
// No ref_count inc is necessary here as this poll is paired
// with a `push` which "forgets" the handle.
return Poll::Data(Task {
ptr: tail,
});
}
if self.head.load(Acquire) != tail {
return Poll::Inconsistent;
}
self.push2(stub);
next = (*tail).next.load(Acquire);
if !next.is_null() {
self.tail.set(next);
return Poll::Data(Task {
ptr: tail,
});
}
Poll::Inconsistent
}
}
// ===== impl State =====
impl State {
/// Returns the initial task state.
///
/// Tasks start in the scheduled state as they are immediately scheduled on
/// creation.
fn new() -> State {
State::Scheduled
}
fn stub() -> State |
}
impl From<usize> for State {
fn from(src: usize) -> Self {
use self::State::*;
match src {
0 => Idle,
1 => Running,
2 => Notified,
3 => Scheduled,
4 => Complete,
_ => unreachable!(),
}
}
}
impl From<State> for usize {
fn from(src: State) -> Self {
use self::State::*;
match src {
Idle => 0,
Running => 1,
Notified => 2,
Scheduled => 3,
Complete => 4,
}
}
}
// ===== impl TaskFuture =====
impl TaskFuture {
#[allow(unused_variables)]
fn poll(&mut self, unpark: &Arc<Notifier>, id: usize, exec: &mut Sender) -> futures::Poll<(), ()> {
match *self {
TaskFuture::Futures1(ref mut fut) => fut.poll_future_notify(unpark, id),
#[cfg(feature = "unstable-futures")]
TaskFuture::Futures2 { ref mut fut, ref waker, ref mut tls } => {
let mut cx = futures2::task::Context::new(tls, waker, exec);
match fut.poll(&mut cx).unwrap() {
futures2::Async::Pending => Ok(Async::NotReady),
futures2::Async::Ready(x) => Ok(Async::Ready(x)),
}
}
}
}
}
| {
State::Idle
} | identifier_body |
task.rs | use notifier::Notifier;
use sender::Sender;
use futures::{self, future, Future, Async};
use futures::executor::{self, Spawn};
use std::{fmt, mem, panic, ptr};
use std::cell::Cell;
use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
use std::sync::atomic::Ordering::{AcqRel, Acquire, Release, Relaxed};
#[cfg(feature = "unstable-futures")]
use futures2;
pub(crate) struct Task {
ptr: *mut Inner,
}
#[derive(Debug)]
pub(crate) struct Queue {
head: AtomicPtr<Inner>,
tail: Cell<*mut Inner>,
stub: Box<Inner>,
}
#[derive(Debug)]
pub(crate) enum Poll {
Empty,
Inconsistent,
Data(Task),
}
#[derive(Debug)]
pub(crate) enum Run {
Idle,
Schedule,
Complete,
}
type BoxFuture = Box<Future<Item = (), Error = ()> + Send + 'static>;
#[cfg(feature = "unstable-futures")]
type BoxFuture2 = Box<futures2::Future<Item = (), Error = futures2::Never> + Send>;
enum TaskFuture {
Futures1(Spawn<BoxFuture>),
#[cfg(feature = "unstable-futures")]
Futures2 {
tls: futures2::task::LocalMap,
waker: futures2::task::Waker,
fut: BoxFuture2,
}
}
struct Inner {
// Next pointer in the queue that submits tasks to a worker.
next: AtomicPtr<Inner>,
// Task state
state: AtomicUsize,
// Number of outstanding references to the task
ref_count: AtomicUsize,
// Store the future at the head of the struct
//
// The future is dropped immediately when it transitions to Complete
future: Option<TaskFuture>,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum State {
/// Task is currently idle
Idle,
/// Task is currently running
Running,
/// Task is currently running, but has been notified that it must run again.
Notified,
/// Task has been scheduled
Scheduled,
/// Task is complete
Complete,
}
// ===== impl Task =====
impl Task {
/// Create a new task handle
pub fn new(future: BoxFuture) -> Task {
let task_fut = TaskFuture::Futures1(executor::spawn(future));
let inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: Some(task_fut),
});
Task { ptr: Box::into_raw(inner) }
}
/// Create a new task handle for a futures 0.2 future
#[cfg(feature = "unstable-futures")]
pub fn new2<F>(fut: BoxFuture2, make_waker: F) -> Task
where F: FnOnce(usize) -> futures2::task::Waker
{
let mut inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: None,
});
let waker = make_waker((&*inner) as *const _ as usize);
let tls = futures2::task::LocalMap::new();
inner.future = Some(TaskFuture::Futures2 { waker, tls, fut });
Task { ptr: Box::into_raw(inner) }
}
/// Transmute a u64 to a Task
pub unsafe fn from_notify_id(unpark_id: usize) -> Task {
mem::transmute(unpark_id)
}
/// Transmute a u64 to a task ref
pub unsafe fn from_notify_id_ref<'a>(unpark_id: &'a usize) -> &'a Task {
mem::transmute(unpark_id)
}
/// Execute the task returning `Run::Schedule` if the task needs to be
/// scheduled again.
pub fn run(&self, unpark: &Arc<Notifier>, exec: &mut Sender) -> Run {
use self::State::*;
// Transition task to running state. At this point, the task must be
// scheduled.
let actual: State = self.inner().state.compare_and_swap(
Scheduled.into(), Running.into(), AcqRel).into();
trace!("running; state={:?}", actual);
match actual {
Scheduled => {},
_ => panic!("unexpected task state; {:?}", actual),
}
trace!("Task::run; state={:?}", State::from(self.inner().state.load(Relaxed)));
let fut = &mut self.inner_mut().future;
// This block deals with the future panicking while being polled.
//
// If the future panics, then the drop handler must be called such that
// `thread::panicking() -> true`. To do this, the future is dropped from
// within the catch_unwind block.
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
struct Guard<'a>(&'a mut Option<TaskFuture>, bool);
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
// This drops the future
if self.1 {
let _ = self.0.take();
}
}
}
let mut g = Guard(fut, true);
let ret = g.0.as_mut().unwrap()
.poll(unpark, self.ptr as usize, exec);
g.1 = false;
ret
}));
match res {
Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => {
trace!(" -> task complete");
// Drop the future
self.inner_mut().drop_future();
// Transition to the completed state
self.inner().state.store(State::Complete.into(), Release);
Run::Complete
}
Ok(Ok(Async::NotReady)) => {
trace!(" -> not ready");
// Attempt to transition from Running -> Idle, if successful,
// then the task does not need to be scheduled again. If the CAS
// fails, then the task has been unparked concurrent to running,
// in which case it transitions immediately back to scheduled
// and we return `true`.
let prev: State = self.inner().state.compare_and_swap(
Running.into(), Idle.into(), AcqRel).into();
match prev {
Running => Run::Idle,
Notified => {
self.inner().state.store(Scheduled.into(), Release);
Run::Schedule
}
_ => unreachable!(),
}
}
}
}
/// Transition the task state to scheduled.
///
/// Returns `true` if the caller is permitted to schedule the task.
pub fn schedule(&self) -> bool {
use self::State::*;
loop {
let actual = self.inner().state.compare_and_swap(
Idle.into(),
Scheduled.into(),
AcqRel).into();
match actual {
Idle => return true,
Running => {
let actual = self.inner().state.compare_and_swap(
Running.into(), Notified.into(), AcqRel).into();
match actual {
Idle => continue,
_ => return false,
}
}
Complete | Notified | Scheduled => return false,
}
}
}
#[inline]
fn inner(&self) -> &Inner {
unsafe { &*self.ptr }
}
#[inline]
fn inner_mut(&self) -> &mut Inner {
unsafe { &mut *self.ptr }
}
}
impl fmt::Debug for Task {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Task")
.field("inner", self.inner())
.finish()
}
}
impl Clone for Task {
fn clone(&self) -> Task {
use std::isize;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().ref_count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// TODO: abort
panic!();
}
Task { ptr: self.ptr }
}
}
impl Drop for Task {
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().ref_count.fetch_sub(1, Release) != 1 {
return;
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
let _ = Box::from_raw(self.ptr);
}
}
}
unsafe impl Send for Task {} |
impl Inner {
fn stub() -> Inner {
Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::stub().into()),
ref_count: AtomicUsize::new(0),
future: Some(TaskFuture::Futures1(executor::spawn(Box::new(future::empty())))),
}
}
fn drop_future(&mut self) {
let _ = self.future.take();
}
}
impl Drop for Inner {
fn drop(&mut self) {
self.drop_future();
}
}
impl fmt::Debug for Inner {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Inner")
.field("next", &self.next)
.field("state", &self.state)
.field("ref_count", &self.ref_count)
.field("future", &"Spawn<BoxFuture>")
.finish()
}
}
// ===== impl Queue =====
impl Queue {
pub fn new() -> Queue {
let stub = Box::new(Inner::stub());
let ptr = &*stub as *const _ as *mut _;
Queue {
head: AtomicPtr::new(ptr),
tail: Cell::new(ptr),
stub: stub,
}
}
pub fn push(&self, handle: Task) {
unsafe {
self.push2(handle.ptr);
// Forgetting the handle is necessary to avoid the ref dec
mem::forget(handle);
}
}
unsafe fn push2(&self, handle: *mut Inner) {
// Set the next pointer. This does not require an atomic operation as
// this node is not accessible. The write will be flushed with the next
// operation
(*handle).next = AtomicPtr::new(ptr::null_mut());
// Update the head to point to the new node. We need to see the previous
// node in order to update the next pointer as well as release `handle`
// to any other threads calling `push`.
let prev = self.head.swap(handle, AcqRel);
// Release `handle` to the consume end.
(*prev).next.store(handle, Release);
}
pub unsafe fn poll(&self) -> Poll {
let mut tail = self.tail.get();
let mut next = (*tail).next.load(Acquire);
let stub = &*self.stub as *const _ as *mut _;
if tail == stub {
if next.is_null() {
return Poll::Empty;
}
self.tail.set(next);
tail = next;
next = (*next).next.load(Acquire);
}
if !next.is_null() {
self.tail.set(next);
// No ref_count inc is necessary here as this poll is paired
// with a `push` which "forgets" the handle.
return Poll::Data(Task {
ptr: tail,
});
}
if self.head.load(Acquire) != tail {
return Poll::Inconsistent;
}
self.push2(stub);
next = (*tail).next.load(Acquire);
if !next.is_null() {
self.tail.set(next);
return Poll::Data(Task {
ptr: tail,
});
}
Poll::Inconsistent
}
}
// ===== impl State =====
impl State {
/// Returns the initial task state.
///
/// Tasks start in the scheduled state as they are immediately scheduled on
/// creation.
fn new() -> State {
State::Scheduled
}
fn stub() -> State {
State::Idle
}
}
impl From<usize> for State {
fn from(src: usize) -> Self {
use self::State::*;
match src {
0 => Idle,
1 => Running,
2 => Notified,
3 => Scheduled,
4 => Complete,
_ => unreachable!(),
}
}
}
impl From<State> for usize {
fn from(src: State) -> Self {
use self::State::*;
match src {
Idle => 0,
Running => 1,
Notified => 2,
Scheduled => 3,
Complete => 4,
}
}
}
// ===== impl TaskFuture =====
impl TaskFuture {
#[allow(unused_variables)]
fn poll(&mut self, unpark: &Arc<Notifier>, id: usize, exec: &mut Sender) -> futures::Poll<(), ()> {
match *self {
TaskFuture::Futures1(ref mut fut) => fut.poll_future_notify(unpark, id),
#[cfg(feature = "unstable-futures")]
TaskFuture::Futures2 { ref mut fut, ref waker, ref mut tls } => {
let mut cx = futures2::task::Context::new(tls, waker, exec);
match fut.poll(&mut cx).unwrap() {
futures2::Async::Pending => Ok(Async::NotReady),
futures2::Async::Ready(x) => Ok(Async::Ready(x)),
}
}
}
}
} |
// ===== impl Inner ===== | random_line_split |
process_test.py | import json
import netCDF4
import numpy as np
import logging
import os
import argparse
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from glob import glob
# from pyclamps.utils import list_to_masked_array, ray_height
# from pyclamps.vad import calc_vad_3d, calc_homogeneity
# Global Values
FILL_VALUE = -9999.
VEL_LIM = (-30, 30)
HGT_LIM = (0, 1000)
PROFILES_PER_PLOT = 2
Re = 6371000
R43 = Re * 4.0 / 3.0
# python logging NOT deployment logging
# logging.basicConfig(filename='DLprocessing.log', level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')
# some needed fucntions
def wind_uv_to_dir(U, V):
"""
Calculates the wind direction from the u and v component of wind.
Takes into account the wind direction coordinates is different than the
trig unit circle coordinate. If the wind directin is 360 then returns zero
(by %360)
Inputs:
U = west/east direction (wind from the west is positive, from the east is negative)
V = south/noth direction (wind from the south is positive, from the north is negative)
"""
WDIR = (270 - np.rad2deg(np.arctan2(V, U))) % 360
return WDIR
def wind_uv_to_spd(U, V):
"""
Calculates the wind speed from the u and v wind components
Inputs:
U = west/east direction (wind from the west is positive, from the east is negative)
V = south/noth direction (wind from the south is positive, from the north is negative)
"""
WSPD = np.sqrt(np.square(U) + np.square(V))
return WSPD
def list_to_masked_array(in_list, mask_value):
a = np.array(in_list)
return np.ma.masked_where(a == mask_value, a)
def ray_height(rng, elev, H0=0, R1=R43):
"""
Center of radar beam height calculation.
Rinehart (1997), Eqn 3.12, Bech et al. (2003) Eqn 3
INPUT::
-----
r : float
Range from radar to point of interest [m]
elev : float
Elevation angle of radar beam [deg]
H0 : float
Height of radar antenna [m]
R1 : float
Effective radius
OUTPUT::
-----
H : float
Radar beam height [m]
USAGE::
-----
H = ray_height(r,elev,H0,[R1=6374000.*4/3])
NOTES::
-----
If no Effective radius is given a "standard atmosphere" is assumed,
the 4/3 approximation.
Bech et al. (2003) use a factor ke that is the ratio of earth's radius
to the effective radius (see r_effective function) and Eqn 4 in B03
"""
# Convert earth's radius to km for common dN/dH values and then
# multiply by 1000 to return radius in meters
hgt = np.sqrt(rng ** 2 + R1 ** 2 + 2 * rng * R1 * np.sin(np.deg2rad(elev)))
hgt = hgt - R1 + H0
return hgt
def rotate(u, v, w, yaw, pitch, roll):
'''
Calculate the value of u, v, and w after a specified axis rotation
:param u: U component of the wind
:param v: V component of the wind
:param w: W component of the wind
:param yaw: Rotation about the Z axis
:param pitch: Rotation about the X axis
:param roll: Rotation about the Y axis
:return:
result: 3D array of the new U, V, and W fields after the rotation
'''
rot_matrix = np.asarray(
[[np.cos(yaw) * np.cos(pitch), np.cos(yaw) * np.sin(pitch) * np.sin(roll) - np.sin(yaw) * np.cos(roll),
np.cos(yaw) * np.sin(pitch) * np.cos(roll) + np.sin(yaw) * np.sin(roll)],
[np.sin(yaw) * np.cos(pitch), np.sin(yaw) * np.sin(pitch) * np.sin(roll) + np.cos(yaw) * np.cos(roll),
np.sin(yaw) * np.sin(pitch) * np.cos(roll) - np.cos(yaw) * np.sin(roll)],
[-np.sin(pitch), np.cos(pitch) * np.sin(roll), np.cos(pitch) * np.cos(roll)]])
vel_matrix = np.asarray([[u], [v], [w]]).transpose()
result = np.dot(vel_matrix, rot_matrix)
return result
def calc_homogeneity(raw_vr, derived_vr):
"""
Determines homogeneity of the wind field as described in E. Paschke et. al. 2015 section 2.2.4
:param raw_vr: Raw radial velocity
:param derived_vr: Radial velocity derived from wind retrieval
:return:
"""
vr_bar = np.sum(raw_vr)
return 1 - np.sum((raw_vr - derived_vr) ** 2) / np.sum((raw_vr - vr_bar) ** 2)
def calc_vad_3d(az, elev, vel):
"""
Calculates the 3D VAD
:param az: Azimuth data
:param elev: Elevation Data
:param vel: Velocity Data
:return:
u: U component of the wind
v: V component of the wind
w: W component of the wind
"""
elev = np.deg2rad(elev)
az = np.deg2rad(az)
if vel.size > 1: # If there could be sufficient data points...
A = sum(vel * np.sin(az))
B = sum(np.sin(az) ** 2 * np.cos(elev))
C = sum(np.cos(az) * np.sin(az) * np.cos(elev))
G = sum(np.sin(az) * np.sin(elev))
D = sum(vel * np.cos(az))
E = sum(np.sin(az) * np.cos(az) * np.cos(elev))
F = sum(np.cos(az) ** 2 * np.cos(elev))
H = sum(np.cos(az) * np.sin(elev))
W = sum(vel)
X = sum(np.sin(az) * np.cos(elev))
Y = sum(np.cos(az) * np.cos(elev))
Z = sum(az * np.sin(elev))
# solve A = uB + vC + wG , D = uE + vF + wH and W = uX + vY+ wZ
y = np.array([[B, E, X], [C, F, Y], [G, H, Z]])
z = np.array([A, D, W])
# print y
# print z
try:
sol = np.linalg.solve(y, z)
# print sol
u = sol[0]
v = sol[1]
w = sol[2]
return u, v, w
except np.linalg.linalg.LinAlgError:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
else:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
def decode_header(header):
"""
Takes in a list of lines from the raw hpl file. Separates them by
tab and removes unnecessary text
"""
new_header = {}
for item in header:
split = item.split('\t')
new_header[split[0].replace(':', '')] = split[1].replace("\r\n", "")
return new_header
def _to_epoch(dt):
return (dt - datetime(1970, 1, 1)).total_seconds()
"""
process_file(in_file, out_dir, prefix):
Processes a raw halo hpl file and turns it into a netcdf
:param in_file:
:param out_dir:
:return:
"""
def writeVAD_to_nc(filename, date, elev, u, v, w, ws, wd, hgt, rmse, r_sq,up_flag,intensity):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'a', format="NETCDF4")
dim = nc.dimensions['t'].size
u_var = nc.variables['u']
v_var = nc.variables['v']
w_var = nc.variables['w']
ws_var = nc.variables['ws']
wd_var = nc.variables['wd']
rms_var = nc.variables['rms']
r_sq_var = nc.variables['r_sq']
time_var = nc.variables['time']
hgt_var = nc.variables['hgt']
up_flag_var = nc.variables['up_flag']
u_var[dim, :] = u
v_var[dim, :] = v
w_var[dim, :] = w
ws_var[dim, :] = ws
wd_var[dim, :] = wd
rms_var[dim, :] = rmse
r_sq_var[dim, :] = r_sq
up_flag_var[dim] = up_flag
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("elev", elev)
nc.setncattr("date", date.isoformat())
# Create the variables
u_var = nc.createVariable('u', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
v_var = nc.createVariable('v', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
w_var = nc.createVariable('w', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
ws_var = nc.createVariable('ws', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
wd_var = nc.createVariable('wd', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rms_var = nc.createVariable('rms', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
r_sq_var = nc.createVariable('r_sq', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
u_var[dim, :] = np.where(np.isnan(u), FILL_VALUE, u)
v_var[dim, :] = np.where(np.isnan(v), FILL_VALUE, v)
w_var[dim, :] = np.where(np.isnan(w), FILL_VALUE, w)
ws_var[dim, :] = np.where(np.isnan(ws), FILL_VALUE, ws)
wd_var[dim, :] = np.where(np.isnan(wd), FILL_VALUE, wd)
hgt_var[dim, :] = np.where(np.isnan(hgt), FILL_VALUE, hgt)
rms_var[dim, :] = np.where(np.isnan(rmse), FILL_VALUE, rmse)
r_sq_var[dim, :] = np.where(np.isnan(r_sq), FILL_VALUE, r_sq)
time_var[dim] = (date - datetime(1970, 1, 1)).total_seconds()
up_flag_var[dim]=up_flag
intensity_var[dim] = intensity
# Close the netcdf
nc.close()
def writeSTARE_to_nc(filename, date, w, hgt, intensity):
|
def writeRHI_to_nc(filename, date, vel, rng, elev, az, intensity,up_flag):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'r+', format="NETCDF4")
dim = nc.dimensions['t'].size
vel_var = nc.variables['velocity']
rng_var = nc.variables['range']
elev_var = nc.variables['elevation']
az_var = nc.variables['azimuth']
intensity_var = nc.variables['intensity']
time_var = nc.variables['time']
up_flag_var = nc.variables['up_flag']
# vel_var[dim, :] = vel
# rng_var[:] = rng
# elev_var[dim] = elev
# az_var[dim] = az
# intensity_var[dim, :] = intensity
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(rng))
nc.createDimension('t', None)
# Create the variables
vel_var = nc.createVariable('velocity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rng_var = nc.createVariable('range', 'f8', ('height'), fill_value=FILL_VALUE)
elev_var = nc.createVariable('elevation', 'f8', ('t'), fill_value=FILL_VALUE)
az_var = nc.createVariable('azimuth', 'f8', ('t'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
dim2 = dim+len(date)
vel_var[dim:dim2, :] = vel
rng_var[:] = np.where(np.isnan(rng), FILL_VALUE, rng)
elev_var[dim:dim2] = np.where(np.isnan(elev), FILL_VALUE, elev)
az_var[dim:dim2] = np.where(np.isnan(az), FILL_VALUE, az)
intensity_var[dim:dim2, :] = np.where(np.isnan(intensity), FILL_VALUE, intensity)
time_var[dim:dim2] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
#print [up_flag for i in range(len(date))]
up_flag_var[dim:dim2] = [float(up_flag) for i in range(len(date))]
# Close the netcdf
nc.close()
#########PROCESS CODE####################
#######deployment logging
# reads config file in TORUS_DL/logs/config.js
# writesout changes/events to deployment log in TORUS_DL/logs/log_MMDDYY.txt
# grab current timestamp for run
now = datetime.utcnow()
#now=datetime(2019,05,17,23,58)
log_time = now.strftime("%m%d%y_%H%M")
today = now.strftime("%Y%m%d") # - commented out for test date below.
# today_l = datetime(2019,05,07)
# today=today_l.strftime("%Y%m%d")
# now = datetime(year=2019, month=5, day=7, hour=16, minute=15)
# open and read in config file info
config = open('/Users/elizabethsmith/TORUS_DL/logs/config.js')
logdata = json.load(config)
config.close()
if logdata["status"]=='up':
print "we're up :)"
up_flag=1
if logdata["status"]=='down':
print "we're down :( "
up_flag=0
# TB - Running into an error here when starting a new log file.
# Added some automation to make your like easier
if os.path.exists('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"]):
# open and read most recent logfile entry
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "r+")
lines = current_logfile.readlines()
prev_status = lines[-6][8:-1] # reading logfile previous status (skipping text)
prev_heading = lines[-5][9:-1] # reading logfile previous heading (skipping text)
prev_lat = lines[-4][5:-1] # reading logfile previous lat (skipping text)
prev_lon = lines[-3][5:-1] # reading logfile previous lat (skipping text)
prev_note = lines[-2][6:-1] # reading logfile previous note (skipping text)
# check if the previous log entry matches the data in the config file..
print logdata["note"], prev_note
if (str(logdata["status"]) != prev_status or str(logdata["heading"]) != prev_heading or
str(logdata["lat"]) != prev_lat or str(logdata["lon"]) != prev_lon or
str(logdata["note"]) != prev_note):
print '**CONFIG FILE HAS BEEN UPDATED!** generating log entry...'
# generate writeout for new log entry based on config file.
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
else:
print "--no config changes"
current_logfile.close()
else:
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "w")
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
# get list of exisiting processed scans
path_proc = '/Users/elizabethsmith/TORUS_DL/data/nonQA_proc/dl/2019/201905/' + today + '/'
# check to make sure the output dir exists
try:
os.makedirs(path_proc)
except OSError:
logging.debug("Output path already exists...")
# Check to make sure the processed_files.txt exists
if not os.path.exists(path_proc + 'processed_files.txt'):
os.system('touch {}'.format(path_proc + 'processed_files.txt'))
# Open the processed files list and read it in
proc_list = open(path_proc + 'processed_files.txt', "r+")
proc_files = proc_list.readlines()
proc_list.close()
# Be sure to add the files that are processed to the running list
# get list of existing raw scans - always do the stare
# TB - I changed some things here so only the scans from the current hour are even looked at.
# - This cuts down on processing for the stare files!
path_raw = now.strftime('/Users/elizabethsmith/TORUS_DL/data/raw/dl/%Y/%Y%m/%Y%m%d/*%Y%m%d_*.hpl')
#print path_raw
raw_files = [f for f in glob(path_raw)]
#print raw_files
raw_files=sorted(raw_files)
# Process the scans
for in_file in raw_files:
# TB - I changed your logic for finding the files to process. This is a little easier and less prone to bugs
# Check to see if the file is in the alreasy processed files. If it is, skip it.
if in_file+'\n' in proc_files:
logging.debug("{} already processed".format(in_file))
continue
else:
logging.info("Processing {}".format(in_file))
# read in new scan
out_dir = path_proc
prefix = 'nonQA'
# Read in the text file
lines = []
with open(in_file) as f:
for line in f:
lines.append(line)
logging.debug("Decoding header")
# Read in the header info
header = decode_header(lines[0:11])
ngates = int(header['Number of gates'])
# nrays = int(header['No. of rays in file']) # Cant do this apparently. Not always correct (wtf)
len_data = len(lines[17:])
nrays = len_data / (ngates + 1)
gate_length = float(header['Range gate length (m)'])
start_time = datetime.strptime(header['Start time'], '%Y%m%d %H:%M:%S.%f')
scan_type = None
logging.debug("Reading data")
# Read in the actual data
az = np.zeros(nrays)
hour = np.zeros(nrays)
elev = np.zeros(nrays)
pitch = np.zeros(nrays)
roll = np.zeros(nrays)
rng = np.asarray([(gate + .5) * gate_length for gate in range(ngates)])
vel = np.zeros((ngates, nrays))
intensity = np.zeros((ngates, nrays))
beta = np.zeros((ngates, nrays))
try:
for ray in range(nrays):
# Get the scan info
info = lines[ray * (ngates + 1) + 17].split()
hour[ray] = float(info[0])
az[ray] = float(info[1])
elev[ray] = float(info[2])
pitch[ray] = float(info[3])
roll[ray] = float(info[4])
for gate in range(ngates):
data = lines[ray * (ngates + 1) + 17 + gate + 1].split()
vel[gate, ray] = float(data[1])
intensity[gate, ray] = float(data[2])
beta[gate, ray] = float(data[3])
except IndexError:
logging.warning("Something went wrong with the indexing here...")
# correction for some rounding/hysteresis in scanner azimuths... setting all vals==360. to 0.
az[np.where(az == 360.)] = 0.
# dynamic identification of lidar scan type (fp,ppi,rhi)
# TB - I Added the round here. Was getting a fp file ID'd as a rhi file
# - Also had an issue with and RHI file where az[0] was 0.01 and az[2] was 0
try:
if np.round(az[0], 1) == np.round(az[2], 1): # const azimuth could be RHI or stare
if np.round(elev[0], 1) == np.round(elev[2], 1): # const azimuth and constant elev = stare
scan_type = 'fp'
else: # const azimuth and non-constant elev = RHI
scan_type = 'rhi'
elif np.round(elev[0], 1) == np.round(elev[2]): # changing azimuth, const elev = PPI
scan_type = 'ppi'
if scan_type == None:
raise IndexError
logging.info("Scan Type: " + scan_type)
except IndexError:
logging.warning("Something went wrong with scan type IDing...")
if scan_type == 'ppi':
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
hgt = []
u = []
v = []
w = []
rmse = []
r_sq = []
for i, rng in enumerate(rng):
# Get the required stuff for this range ring
cnr = intensity[i, :] # range,azimuth
Vel = vel[i, :] # range,azimuth
Az = az # 8-terms of az
Elev = elev # 8-terms of az
# Filter out the bad values based on CNR - default was 1.015
Az = np.where(cnr <= 1.01, FILL_VALUE, Az)
Vel = np.where(cnr <= 1.01, FILL_VALUE, Vel)
Az = list_to_masked_array(Az, FILL_VALUE)
Vel = list_to_masked_array(Vel, FILL_VALUE)
# Calculate the vad and height for this range ring
tmp_u, tmp_v, tmp_w = calc_vad_3d(Az, Elev, Vel) # grab this to it can point to it!!!
# Calculate the RMSE
N = float(Vel.size)
az_rad = np.deg2rad(Az)
elev_rad = np.deg2rad(Elev)
derived_vr = (np.sin(az_rad) * np.cos(elev_rad) * tmp_u) + \
(np.cos(az_rad) * np.cos(elev_rad) * tmp_v) + \
(np.sin(elev_rad) * tmp_w)
tmp_E = Vel - derived_vr
# Calculate rms error
tmp_RMSE = np.sqrt(1 / N * np.sum(tmp_E ** 2))
tmp_r_sq = calc_homogeneity(Vel, derived_vr)
# Append to the lists for plotting
u.append(tmp_u)
v.append(tmp_v)
w.append(tmp_w)
hgt.append(ray_height(rng, Elev[0]))
rmse.append(tmp_RMSE)
r_sq.append(tmp_r_sq)
vector_wind = rotate(u, v, w, logdata["heading"], 0, 0)
vector_wind = vector_wind.squeeze()
u = vector_wind[:, 0]
v = vector_wind[:, 1]
w = vector_wind[:, 2]
ws = wind_uv_to_spd(u, v)
wd = wind_uv_to_dir(u, v)
writeVAD_to_nc(path_proc + prefix + date.strftime('%Y%m%d') + '_VAD.nc', date, elev, u, v, w, ws, wd, hgt,
rmse, r_sq,up_flag)
# add newly processed file to list of processed files
proc_list = open(path_proc + 'processed_files.txt', "a")
proc_list.writelines(in_file+'\n')
proc_list.close()
if scan_type == 'fp':
# TB - I decided that it is best to just process the entire stare file every time
# instead of try to append to the netcdf. This shouldn't hinder processeing time that much
# since I changed things to only grab things from the same hour you're processing
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
# Filter out the bad values based on CNR
Vel = np.where(intensity <= 1.01, FILL_VALUE, vel)
# Get the rng into a 2d array
rng = np.array([rng for i in range(len(rng))])
logging.debug("Writing stare file")
writeSTARE_to_nc(path_proc+prefix+date.strftime('%Y%m%d_%H_STARE.nc'), times, vel.transpose(), rng, up_flag)
if scan_type=='rhi':
# TB - A quick tip: Don't do an RHI at az=0. It bounces between 0 and 360 and is a pain in the ass to process
# - Just do it at like 1 deg. or even .1
# TB - Note to self - need to do a heading correction on this one.
date = start_time
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
filename = path_proc + prefix + date.strftime('%Y%m%d_%H') + '_RHI.nc'
# break
writeRHI_to_nc(filename, times, vel.transpose(), rng, elev, az, intensity.transpose(), up_flag)
# add newly processed file to list of processed files
proc_list = open(path_proc + 'processed_files.txt', "a")
proc_list.writelines(in_file+'\n')
proc_list.close()
| logging.debug(filename)
logging.debug(date)
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('hgt', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("date", date[0].isoformat())
# Create the variables
w_var = nc.createVariable('w', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
hgt_var[:] = hgt
time_var[:] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
w_var[:, :] = w
intensity_var[:] = intensity
# Close the netcdf
nc.close() | identifier_body |
process_test.py | import json
import netCDF4
import numpy as np
import logging
import os
import argparse
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from glob import glob
# from pyclamps.utils import list_to_masked_array, ray_height
# from pyclamps.vad import calc_vad_3d, calc_homogeneity
# Global Values
FILL_VALUE = -9999.
VEL_LIM = (-30, 30)
HGT_LIM = (0, 1000)
PROFILES_PER_PLOT = 2
Re = 6371000
R43 = Re * 4.0 / 3.0
# python logging NOT deployment logging
# logging.basicConfig(filename='DLprocessing.log', level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')
# some needed fucntions
def wind_uv_to_dir(U, V):
"""
Calculates the wind direction from the u and v component of wind.
Takes into account the wind direction coordinates is different than the
trig unit circle coordinate. If the wind directin is 360 then returns zero
(by %360)
Inputs:
U = west/east direction (wind from the west is positive, from the east is negative)
V = south/noth direction (wind from the south is positive, from the north is negative)
"""
WDIR = (270 - np.rad2deg(np.arctan2(V, U))) % 360
return WDIR
def wind_uv_to_spd(U, V):
"""
Calculates the wind speed from the u and v wind components
Inputs:
U = west/east direction (wind from the west is positive, from the east is negative)
V = south/noth direction (wind from the south is positive, from the north is negative)
"""
WSPD = np.sqrt(np.square(U) + np.square(V))
return WSPD
def list_to_masked_array(in_list, mask_value):
a = np.array(in_list)
return np.ma.masked_where(a == mask_value, a)
def ray_height(rng, elev, H0=0, R1=R43):
"""
Center of radar beam height calculation.
Rinehart (1997), Eqn 3.12, Bech et al. (2003) Eqn 3
INPUT::
-----
r : float
Range from radar to point of interest [m]
elev : float
Elevation angle of radar beam [deg]
H0 : float
Height of radar antenna [m]
R1 : float
Effective radius
OUTPUT::
-----
H : float
Radar beam height [m]
USAGE::
-----
H = ray_height(r,elev,H0,[R1=6374000.*4/3])
NOTES::
-----
If no Effective radius is given a "standard atmosphere" is assumed,
the 4/3 approximation.
Bech et al. (2003) use a factor ke that is the ratio of earth's radius
to the effective radius (see r_effective function) and Eqn 4 in B03
"""
# Convert earth's radius to km for common dN/dH values and then
# multiply by 1000 to return radius in meters
hgt = np.sqrt(rng ** 2 + R1 ** 2 + 2 * rng * R1 * np.sin(np.deg2rad(elev)))
hgt = hgt - R1 + H0
return hgt
def rotate(u, v, w, yaw, pitch, roll):
'''
Calculate the value of u, v, and w after a specified axis rotation
:param u: U component of the wind
:param v: V component of the wind
:param w: W component of the wind
:param yaw: Rotation about the Z axis
:param pitch: Rotation about the X axis
:param roll: Rotation about the Y axis
:return:
result: 3D array of the new U, V, and W fields after the rotation
'''
rot_matrix = np.asarray(
[[np.cos(yaw) * np.cos(pitch), np.cos(yaw) * np.sin(pitch) * np.sin(roll) - np.sin(yaw) * np.cos(roll),
np.cos(yaw) * np.sin(pitch) * np.cos(roll) + np.sin(yaw) * np.sin(roll)],
[np.sin(yaw) * np.cos(pitch), np.sin(yaw) * np.sin(pitch) * np.sin(roll) + np.cos(yaw) * np.cos(roll),
np.sin(yaw) * np.sin(pitch) * np.cos(roll) - np.cos(yaw) * np.sin(roll)],
[-np.sin(pitch), np.cos(pitch) * np.sin(roll), np.cos(pitch) * np.cos(roll)]])
vel_matrix = np.asarray([[u], [v], [w]]).transpose()
result = np.dot(vel_matrix, rot_matrix)
return result
def calc_homogeneity(raw_vr, derived_vr):
"""
Determines homogeneity of the wind field as described in E. Paschke et. al. 2015 section 2.2.4
:param raw_vr: Raw radial velocity
:param derived_vr: Radial velocity derived from wind retrieval
:return:
"""
vr_bar = np.sum(raw_vr)
return 1 - np.sum((raw_vr - derived_vr) ** 2) / np.sum((raw_vr - vr_bar) ** 2)
def calc_vad_3d(az, elev, vel):
"""
Calculates the 3D VAD
:param az: Azimuth data
:param elev: Elevation Data
:param vel: Velocity Data
:return:
u: U component of the wind
v: V component of the wind
w: W component of the wind
"""
elev = np.deg2rad(elev)
az = np.deg2rad(az)
if vel.size > 1: # If there could be sufficient data points...
A = sum(vel * np.sin(az))
B = sum(np.sin(az) ** 2 * np.cos(elev))
C = sum(np.cos(az) * np.sin(az) * np.cos(elev))
G = sum(np.sin(az) * np.sin(elev))
D = sum(vel * np.cos(az))
E = sum(np.sin(az) * np.cos(az) * np.cos(elev))
F = sum(np.cos(az) ** 2 * np.cos(elev))
H = sum(np.cos(az) * np.sin(elev))
W = sum(vel)
X = sum(np.sin(az) * np.cos(elev))
Y = sum(np.cos(az) * np.cos(elev))
Z = sum(az * np.sin(elev))
# solve A = uB + vC + wG , D = uE + vF + wH and W = uX + vY+ wZ
y = np.array([[B, E, X], [C, F, Y], [G, H, Z]])
z = np.array([A, D, W])
# print y
# print z
try:
sol = np.linalg.solve(y, z)
# print sol
u = sol[0]
v = sol[1]
w = sol[2]
return u, v, w
except np.linalg.linalg.LinAlgError:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
else:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
def decode_header(header):
"""
Takes in a list of lines from the raw hpl file. Separates them by
tab and removes unnecessary text
"""
new_header = {}
for item in header:
split = item.split('\t')
new_header[split[0].replace(':', '')] = split[1].replace("\r\n", "")
return new_header
def _to_epoch(dt):
return (dt - datetime(1970, 1, 1)).total_seconds()
"""
process_file(in_file, out_dir, prefix):
Processes a raw halo hpl file and turns it into a netcdf
:param in_file:
:param out_dir:
:return:
"""
def writeVAD_to_nc(filename, date, elev, u, v, w, ws, wd, hgt, rmse, r_sq,up_flag,intensity):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'a', format="NETCDF4")
dim = nc.dimensions['t'].size
u_var = nc.variables['u']
v_var = nc.variables['v']
w_var = nc.variables['w']
ws_var = nc.variables['ws']
wd_var = nc.variables['wd']
rms_var = nc.variables['rms']
r_sq_var = nc.variables['r_sq']
time_var = nc.variables['time']
hgt_var = nc.variables['hgt']
up_flag_var = nc.variables['up_flag']
u_var[dim, :] = u
v_var[dim, :] = v
w_var[dim, :] = w
ws_var[dim, :] = ws
wd_var[dim, :] = wd
rms_var[dim, :] = rmse
r_sq_var[dim, :] = r_sq
up_flag_var[dim] = up_flag
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("elev", elev)
nc.setncattr("date", date.isoformat())
# Create the variables
u_var = nc.createVariable('u', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
v_var = nc.createVariable('v', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
w_var = nc.createVariable('w', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
ws_var = nc.createVariable('ws', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
wd_var = nc.createVariable('wd', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rms_var = nc.createVariable('rms', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
r_sq_var = nc.createVariable('r_sq', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
u_var[dim, :] = np.where(np.isnan(u), FILL_VALUE, u)
v_var[dim, :] = np.where(np.isnan(v), FILL_VALUE, v)
w_var[dim, :] = np.where(np.isnan(w), FILL_VALUE, w)
ws_var[dim, :] = np.where(np.isnan(ws), FILL_VALUE, ws)
wd_var[dim, :] = np.where(np.isnan(wd), FILL_VALUE, wd)
hgt_var[dim, :] = np.where(np.isnan(hgt), FILL_VALUE, hgt)
rms_var[dim, :] = np.where(np.isnan(rmse), FILL_VALUE, rmse)
r_sq_var[dim, :] = np.where(np.isnan(r_sq), FILL_VALUE, r_sq)
time_var[dim] = (date - datetime(1970, 1, 1)).total_seconds()
up_flag_var[dim]=up_flag
intensity_var[dim] = intensity
# Close the netcdf
nc.close()
def writeSTARE_to_nc(filename, date, w, hgt, intensity):
logging.debug(filename)
logging.debug(date)
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('hgt', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("date", date[0].isoformat())
# Create the variables
w_var = nc.createVariable('w', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
hgt_var[:] = hgt
time_var[:] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
w_var[:, :] = w
intensity_var[:] = intensity
# Close the netcdf
nc.close()
def writeRHI_to_nc(filename, date, vel, rng, elev, az, intensity,up_flag):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'r+', format="NETCDF4")
dim = nc.dimensions['t'].size
vel_var = nc.variables['velocity']
rng_var = nc.variables['range']
elev_var = nc.variables['elevation']
az_var = nc.variables['azimuth']
intensity_var = nc.variables['intensity']
time_var = nc.variables['time']
up_flag_var = nc.variables['up_flag']
# vel_var[dim, :] = vel
# rng_var[:] = rng
# elev_var[dim] = elev
# az_var[dim] = az
# intensity_var[dim, :] = intensity
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(rng))
nc.createDimension('t', None)
# Create the variables
vel_var = nc.createVariable('velocity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rng_var = nc.createVariable('range', 'f8', ('height'), fill_value=FILL_VALUE)
elev_var = nc.createVariable('elevation', 'f8', ('t'), fill_value=FILL_VALUE)
az_var = nc.createVariable('azimuth', 'f8', ('t'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
dim2 = dim+len(date)
vel_var[dim:dim2, :] = vel
rng_var[:] = np.where(np.isnan(rng), FILL_VALUE, rng)
elev_var[dim:dim2] = np.where(np.isnan(elev), FILL_VALUE, elev)
az_var[dim:dim2] = np.where(np.isnan(az), FILL_VALUE, az)
intensity_var[dim:dim2, :] = np.where(np.isnan(intensity), FILL_VALUE, intensity)
time_var[dim:dim2] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
#print [up_flag for i in range(len(date))]
up_flag_var[dim:dim2] = [float(up_flag) for i in range(len(date))]
# Close the netcdf
nc.close()
#########PROCESS CODE####################
#######deployment logging
# reads config file in TORUS_DL/logs/config.js
# writesout changes/events to deployment log in TORUS_DL/logs/log_MMDDYY.txt
# grab current timestamp for run
now = datetime.utcnow()
#now=datetime(2019,05,17,23,58)
log_time = now.strftime("%m%d%y_%H%M")
today = now.strftime("%Y%m%d") # - commented out for test date below.
# today_l = datetime(2019,05,07)
# today=today_l.strftime("%Y%m%d")
# now = datetime(year=2019, month=5, day=7, hour=16, minute=15)
# open and read in config file info
config = open('/Users/elizabethsmith/TORUS_DL/logs/config.js')
logdata = json.load(config)
config.close()
if logdata["status"]=='up':
print "we're up :)"
up_flag=1
if logdata["status"]=='down':
print "we're down :( "
up_flag=0
# TB - Running into an error here when starting a new log file.
# Added some automation to make your like easier
if os.path.exists('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"]):
# open and read most recent logfile entry
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "r+")
lines = current_logfile.readlines()
prev_status = lines[-6][8:-1] # reading logfile previous status (skipping text)
prev_heading = lines[-5][9:-1] # reading logfile previous heading (skipping text)
prev_lat = lines[-4][5:-1] # reading logfile previous lat (skipping text)
prev_lon = lines[-3][5:-1] # reading logfile previous lat (skipping text)
prev_note = lines[-2][6:-1] # reading logfile previous note (skipping text)
# check if the previous log entry matches the data in the config file..
print logdata["note"], prev_note
if (str(logdata["status"]) != prev_status or str(logdata["heading"]) != prev_heading or
str(logdata["lat"]) != prev_lat or str(logdata["lon"]) != prev_lon or
str(logdata["note"]) != prev_note):
print '**CONFIG FILE HAS BEEN UPDATED!** generating log entry...'
# generate writeout for new log entry based on config file.
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
else:
print "--no config changes"
current_logfile.close()
else:
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "w")
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
# get list of exisiting processed scans
path_proc = '/Users/elizabethsmith/TORUS_DL/data/nonQA_proc/dl/2019/201905/' + today + '/'
# check to make sure the output dir exists
try:
os.makedirs(path_proc)
except OSError:
logging.debug("Output path already exists...")
# Check to make sure the processed_files.txt exists
if not os.path.exists(path_proc + 'processed_files.txt'):
os.system('touch {}'.format(path_proc + 'processed_files.txt'))
# Open the processed files list and read it in
proc_list = open(path_proc + 'processed_files.txt', "r+")
proc_files = proc_list.readlines()
proc_list.close()
# Be sure to add the files that are processed to the running list
# get list of existing raw scans - always do the stare
# TB - I changed some things here so only the scans from the current hour are even looked at.
# - This cuts down on processing for the stare files!
path_raw = now.strftime('/Users/elizabethsmith/TORUS_DL/data/raw/dl/%Y/%Y%m/%Y%m%d/*%Y%m%d_*.hpl')
#print path_raw
raw_files = [f for f in glob(path_raw)]
#print raw_files
raw_files=sorted(raw_files)
# Process the scans
for in_file in raw_files:
# TB - I changed your logic for finding the files to process. This is a little easier and less prone to bugs
# Check to see if the file is in the alreasy processed files. If it is, skip it.
if in_file+'\n' in proc_files:
logging.debug("{} already processed".format(in_file))
continue
else:
logging.info("Processing {}".format(in_file))
# read in new scan
out_dir = path_proc
prefix = 'nonQA'
# Read in the text file
lines = []
with open(in_file) as f:
for line in f:
|
logging.debug("Decoding header")
# Read in the header info
header = decode_header(lines[0:11])
ngates = int(header['Number of gates'])
# nrays = int(header['No. of rays in file']) # Cant do this apparently. Not always correct (wtf)
len_data = len(lines[17:])
nrays = len_data / (ngates + 1)
gate_length = float(header['Range gate length (m)'])
start_time = datetime.strptime(header['Start time'], '%Y%m%d %H:%M:%S.%f')
scan_type = None
logging.debug("Reading data")
# Read in the actual data
az = np.zeros(nrays)
hour = np.zeros(nrays)
elev = np.zeros(nrays)
pitch = np.zeros(nrays)
roll = np.zeros(nrays)
rng = np.asarray([(gate + .5) * gate_length for gate in range(ngates)])
vel = np.zeros((ngates, nrays))
intensity = np.zeros((ngates, nrays))
beta = np.zeros((ngates, nrays))
try:
for ray in range(nrays):
# Get the scan info
info = lines[ray * (ngates + 1) + 17].split()
hour[ray] = float(info[0])
az[ray] = float(info[1])
elev[ray] = float(info[2])
pitch[ray] = float(info[3])
roll[ray] = float(info[4])
for gate in range(ngates):
data = lines[ray * (ngates + 1) + 17 + gate + 1].split()
vel[gate, ray] = float(data[1])
intensity[gate, ray] = float(data[2])
beta[gate, ray] = float(data[3])
except IndexError:
logging.warning("Something went wrong with the indexing here...")
# correction for some rounding/hysteresis in scanner azimuths... setting all vals==360. to 0.
az[np.where(az == 360.)] = 0.
# dynamic identification of lidar scan type (fp,ppi,rhi)
# TB - I Added the round here. Was getting a fp file ID'd as a rhi file
# - Also had an issue with and RHI file where az[0] was 0.01 and az[2] was 0
try:
if np.round(az[0], 1) == np.round(az[2], 1): # const azimuth could be RHI or stare
if np.round(elev[0], 1) == np.round(elev[2], 1): # const azimuth and constant elev = stare
scan_type = 'fp'
else: # const azimuth and non-constant elev = RHI
scan_type = 'rhi'
elif np.round(elev[0], 1) == np.round(elev[2]): # changing azimuth, const elev = PPI
scan_type = 'ppi'
if scan_type == None:
raise IndexError
logging.info("Scan Type: " + scan_type)
except IndexError:
logging.warning("Something went wrong with scan type IDing...")
if scan_type == 'ppi':
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
hgt = []
u = []
v = []
w = []
rmse = []
r_sq = []
for i, rng in enumerate(rng):
# Get the required stuff for this range ring
cnr = intensity[i, :] # range,azimuth
Vel = vel[i, :] # range,azimuth
Az = az # 8-terms of az
Elev = elev # 8-terms of az
# Filter out the bad values based on CNR - default was 1.015
Az = np.where(cnr <= 1.01, FILL_VALUE, Az)
Vel = np.where(cnr <= 1.01, FILL_VALUE, Vel)
Az = list_to_masked_array(Az, FILL_VALUE)
Vel = list_to_masked_array(Vel, FILL_VALUE)
# Calculate the vad and height for this range ring
tmp_u, tmp_v, tmp_w = calc_vad_3d(Az, Elev, Vel) # grab this to it can point to it!!!
# Calculate the RMSE
N = float(Vel.size)
az_rad = np.deg2rad(Az)
elev_rad = np.deg2rad(Elev)
derived_vr = (np.sin(az_rad) * np.cos(elev_rad) * tmp_u) + \
(np.cos(az_rad) * np.cos(elev_rad) * tmp_v) + \
(np.sin(elev_rad) * tmp_w)
tmp_E = Vel - derived_vr
# Calculate rms error
tmp_RMSE = np.sqrt(1 / N * np.sum(tmp_E ** 2))
tmp_r_sq = calc_homogeneity(Vel, derived_vr)
# Append to the lists for plotting
u.append(tmp_u)
v.append(tmp_v)
w.append(tmp_w)
hgt.append(ray_height(rng, Elev[0]))
rmse.append(tmp_RMSE)
r_sq.append(tmp_r_sq)
vector_wind = rotate(u, v, w, logdata["heading"], 0, 0)
vector_wind = vector_wind.squeeze()
u = vector_wind[:, 0]
v = vector_wind[:, 1]
w = vector_wind[:, 2]
ws = wind_uv_to_spd(u, v)
wd = wind_uv_to_dir(u, v)
writeVAD_to_nc(path_proc + prefix + date.strftime('%Y%m%d') + '_VAD.nc', date, elev, u, v, w, ws, wd, hgt,
rmse, r_sq,up_flag)
# add newly processed file to list of processed files
proc_list = open(path_proc + 'processed_files.txt', "a")
proc_list.writelines(in_file+'\n')
proc_list.close()
if scan_type == 'fp':
# TB - I decided that it is best to just process the entire stare file every time
# instead of try to append to the netcdf. This shouldn't hinder processeing time that much
# since I changed things to only grab things from the same hour you're processing
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
# Filter out the bad values based on CNR
Vel = np.where(intensity <= 1.01, FILL_VALUE, vel)
# Get the rng into a 2d array
rng = np.array([rng for i in range(len(rng))])
logging.debug("Writing stare file")
writeSTARE_to_nc(path_proc+prefix+date.strftime('%Y%m%d_%H_STARE.nc'), times, vel.transpose(), rng, up_flag)
if scan_type=='rhi':
# TB - A quick tip: Don't do an RHI at az=0. It bounces between 0 and 360 and is a pain in the ass to process
# - Just do it at like 1 deg. or even .1
# TB - Note to self - need to do a heading correction on this one.
date = start_time
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
filename = path_proc + prefix + date.strftime('%Y%m%d_%H') + '_RHI.nc'
# break
writeRHI_to_nc(filename, times, vel.transpose(), rng, elev, az, intensity.transpose(), up_flag)
# add newly processed file to list of processed files
proc_list = open(path_proc + 'processed_files.txt', "a")
proc_list.writelines(in_file+'\n')
proc_list.close()
| lines.append(line) | conditional_block |
process_test.py | import json
import netCDF4
import numpy as np
import logging
import os
import argparse
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from glob import glob
# from pyclamps.utils import list_to_masked_array, ray_height
# from pyclamps.vad import calc_vad_3d, calc_homogeneity
# Global Values
FILL_VALUE = -9999.
VEL_LIM = (-30, 30)
HGT_LIM = (0, 1000)
PROFILES_PER_PLOT = 2
Re = 6371000
R43 = Re * 4.0 / 3.0
# python logging NOT deployment logging
# logging.basicConfig(filename='DLprocessing.log', level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')
# some needed fucntions
def wind_uv_to_dir(U, V):
"""
Calculates the wind direction from the u and v component of wind.
Takes into account the wind direction coordinates is different than the
trig unit circle coordinate. If the wind directin is 360 then returns zero
(by %360)
Inputs:
U = west/east direction (wind from the west is positive, from the east is negative)
V = south/noth direction (wind from the south is positive, from the north is negative)
"""
WDIR = (270 - np.rad2deg(np.arctan2(V, U))) % 360
return WDIR
def wind_uv_to_spd(U, V):
"""
Calculates the wind speed from the u and v wind components
Inputs:
U = west/east direction (wind from the west is positive, from the east is negative)
V = south/noth direction (wind from the south is positive, from the north is negative)
"""
WSPD = np.sqrt(np.square(U) + np.square(V))
return WSPD
def list_to_masked_array(in_list, mask_value):
a = np.array(in_list)
return np.ma.masked_where(a == mask_value, a)
def ray_height(rng, elev, H0=0, R1=R43):
"""
Center of radar beam height calculation.
Rinehart (1997), Eqn 3.12, Bech et al. (2003) Eqn 3
INPUT::
-----
r : float
Range from radar to point of interest [m]
elev : float
Elevation angle of radar beam [deg]
H0 : float
Height of radar antenna [m]
R1 : float
Effective radius
OUTPUT::
-----
H : float
Radar beam height [m]
USAGE::
-----
H = ray_height(r,elev,H0,[R1=6374000.*4/3])
NOTES::
-----
If no Effective radius is given a "standard atmosphere" is assumed,
the 4/3 approximation.
Bech et al. (2003) use a factor ke that is the ratio of earth's radius
to the effective radius (see r_effective function) and Eqn 4 in B03
"""
# Convert earth's radius to km for common dN/dH values and then
# multiply by 1000 to return radius in meters
hgt = np.sqrt(rng ** 2 + R1 ** 2 + 2 * rng * R1 * np.sin(np.deg2rad(elev)))
hgt = hgt - R1 + H0
return hgt
def rotate(u, v, w, yaw, pitch, roll):
'''
Calculate the value of u, v, and w after a specified axis rotation
:param u: U component of the wind
:param v: V component of the wind
:param w: W component of the wind
:param yaw: Rotation about the Z axis
:param pitch: Rotation about the X axis
:param roll: Rotation about the Y axis
:return:
result: 3D array of the new U, V, and W fields after the rotation
'''
rot_matrix = np.asarray(
[[np.cos(yaw) * np.cos(pitch), np.cos(yaw) * np.sin(pitch) * np.sin(roll) - np.sin(yaw) * np.cos(roll),
np.cos(yaw) * np.sin(pitch) * np.cos(roll) + np.sin(yaw) * np.sin(roll)],
[np.sin(yaw) * np.cos(pitch), np.sin(yaw) * np.sin(pitch) * np.sin(roll) + np.cos(yaw) * np.cos(roll),
np.sin(yaw) * np.sin(pitch) * np.cos(roll) - np.cos(yaw) * np.sin(roll)],
[-np.sin(pitch), np.cos(pitch) * np.sin(roll), np.cos(pitch) * np.cos(roll)]])
vel_matrix = np.asarray([[u], [v], [w]]).transpose()
result = np.dot(vel_matrix, rot_matrix)
return result
def calc_homogeneity(raw_vr, derived_vr):
"""
Determines homogeneity of the wind field as described in E. Paschke et. al. 2015 section 2.2.4
:param raw_vr: Raw radial velocity
:param derived_vr: Radial velocity derived from wind retrieval
:return:
"""
vr_bar = np.sum(raw_vr)
return 1 - np.sum((raw_vr - derived_vr) ** 2) / np.sum((raw_vr - vr_bar) ** 2)
def calc_vad_3d(az, elev, vel):
"""
Calculates the 3D VAD
:param az: Azimuth data
:param elev: Elevation Data
:param vel: Velocity Data
:return:
u: U component of the wind
v: V component of the wind
w: W component of the wind
"""
elev = np.deg2rad(elev)
az = np.deg2rad(az)
if vel.size > 1: # If there could be sufficient data points...
A = sum(vel * np.sin(az))
B = sum(np.sin(az) ** 2 * np.cos(elev))
C = sum(np.cos(az) * np.sin(az) * np.cos(elev))
G = sum(np.sin(az) * np.sin(elev))
D = sum(vel * np.cos(az))
E = sum(np.sin(az) * np.cos(az) * np.cos(elev))
F = sum(np.cos(az) ** 2 * np.cos(elev))
H = sum(np.cos(az) * np.sin(elev))
W = sum(vel)
X = sum(np.sin(az) * np.cos(elev))
Y = sum(np.cos(az) * np.cos(elev))
Z = sum(az * np.sin(elev))
# solve A = uB + vC + wG , D = uE + vF + wH and W = uX + vY+ wZ
y = np.array([[B, E, X], [C, F, Y], [G, H, Z]])
z = np.array([A, D, W])
# print y
# print z
try:
sol = np.linalg.solve(y, z)
# print sol
u = sol[0]
v = sol[1]
w = sol[2]
return u, v, w
except np.linalg.linalg.LinAlgError:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
else:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
def decode_header(header):
"""
Takes in a list of lines from the raw hpl file. Separates them by
tab and removes unnecessary text
"""
new_header = {}
for item in header:
split = item.split('\t')
new_header[split[0].replace(':', '')] = split[1].replace("\r\n", "")
return new_header
def _to_epoch(dt):
return (dt - datetime(1970, 1, 1)).total_seconds()
"""
process_file(in_file, out_dir, prefix):
Processes a raw halo hpl file and turns it into a netcdf
:param in_file:
:param out_dir:
:return:
"""
def writeVAD_to_nc(filename, date, elev, u, v, w, ws, wd, hgt, rmse, r_sq,up_flag,intensity):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'a', format="NETCDF4")
dim = nc.dimensions['t'].size
u_var = nc.variables['u']
v_var = nc.variables['v']
w_var = nc.variables['w']
ws_var = nc.variables['ws']
wd_var = nc.variables['wd']
rms_var = nc.variables['rms']
r_sq_var = nc.variables['r_sq']
time_var = nc.variables['time']
hgt_var = nc.variables['hgt']
up_flag_var = nc.variables['up_flag']
u_var[dim, :] = u
v_var[dim, :] = v
w_var[dim, :] = w
ws_var[dim, :] = ws
wd_var[dim, :] = wd
rms_var[dim, :] = rmse
r_sq_var[dim, :] = r_sq
up_flag_var[dim] = up_flag
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("elev", elev)
nc.setncattr("date", date.isoformat())
# Create the variables
u_var = nc.createVariable('u', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
v_var = nc.createVariable('v', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
w_var = nc.createVariable('w', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
ws_var = nc.createVariable('ws', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
wd_var = nc.createVariable('wd', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rms_var = nc.createVariable('rms', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
r_sq_var = nc.createVariable('r_sq', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
u_var[dim, :] = np.where(np.isnan(u), FILL_VALUE, u)
v_var[dim, :] = np.where(np.isnan(v), FILL_VALUE, v)
w_var[dim, :] = np.where(np.isnan(w), FILL_VALUE, w)
ws_var[dim, :] = np.where(np.isnan(ws), FILL_VALUE, ws)
wd_var[dim, :] = np.where(np.isnan(wd), FILL_VALUE, wd)
hgt_var[dim, :] = np.where(np.isnan(hgt), FILL_VALUE, hgt)
rms_var[dim, :] = np.where(np.isnan(rmse), FILL_VALUE, rmse)
r_sq_var[dim, :] = np.where(np.isnan(r_sq), FILL_VALUE, r_sq)
time_var[dim] = (date - datetime(1970, 1, 1)).total_seconds()
up_flag_var[dim]=up_flag
intensity_var[dim] = intensity
# Close the netcdf
nc.close()
def writeSTARE_to_nc(filename, date, w, hgt, intensity):
logging.debug(filename)
logging.debug(date)
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('hgt', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("date", date[0].isoformat())
# Create the variables
w_var = nc.createVariable('w', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
hgt_var[:] = hgt
time_var[:] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
w_var[:, :] = w
intensity_var[:] = intensity
# Close the netcdf
nc.close()
def writeRHI_to_nc(filename, date, vel, rng, elev, az, intensity,up_flag):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'r+', format="NETCDF4")
dim = nc.dimensions['t'].size
vel_var = nc.variables['velocity']
rng_var = nc.variables['range']
elev_var = nc.variables['elevation']
az_var = nc.variables['azimuth']
intensity_var = nc.variables['intensity']
time_var = nc.variables['time']
up_flag_var = nc.variables['up_flag']
# vel_var[dim, :] = vel
# rng_var[:] = rng
# elev_var[dim] = elev
# az_var[dim] = az
# intensity_var[dim, :] = intensity
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(rng))
nc.createDimension('t', None)
# Create the variables
vel_var = nc.createVariable('velocity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rng_var = nc.createVariable('range', 'f8', ('height'), fill_value=FILL_VALUE)
elev_var = nc.createVariable('elevation', 'f8', ('t'), fill_value=FILL_VALUE)
az_var = nc.createVariable('azimuth', 'f8', ('t'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
dim2 = dim+len(date)
vel_var[dim:dim2, :] = vel
rng_var[:] = np.where(np.isnan(rng), FILL_VALUE, rng)
elev_var[dim:dim2] = np.where(np.isnan(elev), FILL_VALUE, elev)
az_var[dim:dim2] = np.where(np.isnan(az), FILL_VALUE, az)
intensity_var[dim:dim2, :] = np.where(np.isnan(intensity), FILL_VALUE, intensity)
time_var[dim:dim2] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
#print [up_flag for i in range(len(date))]
up_flag_var[dim:dim2] = [float(up_flag) for i in range(len(date))]
# Close the netcdf
nc.close()
#########PROCESS CODE####################
#######deployment logging
# reads config file in TORUS_DL/logs/config.js
# writesout changes/events to deployment log in TORUS_DL/logs/log_MMDDYY.txt
# grab current timestamp for run
now = datetime.utcnow()
#now=datetime(2019,05,17,23,58)
log_time = now.strftime("%m%d%y_%H%M")
today = now.strftime("%Y%m%d") # - commented out for test date below.
# today_l = datetime(2019,05,07)
# today=today_l.strftime("%Y%m%d")
# now = datetime(year=2019, month=5, day=7, hour=16, minute=15)
# open and read in config file info
config = open('/Users/elizabethsmith/TORUS_DL/logs/config.js')
logdata = json.load(config)
config.close()
if logdata["status"]=='up':
print "we're up :)"
up_flag=1
if logdata["status"]=='down':
print "we're down :( "
up_flag=0
# TB - Running into an error here when starting a new log file.
# Added some automation to make your like easier
if os.path.exists('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"]):
# open and read most recent logfile entry
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "r+")
lines = current_logfile.readlines()
prev_status = lines[-6][8:-1] # reading logfile previous status (skipping text)
prev_heading = lines[-5][9:-1] # reading logfile previous heading (skipping text)
prev_lat = lines[-4][5:-1] # reading logfile previous lat (skipping text)
prev_lon = lines[-3][5:-1] # reading logfile previous lat (skipping text)
prev_note = lines[-2][6:-1] # reading logfile previous note (skipping text)
# check if the previous log entry matches the data in the config file..
print logdata["note"], prev_note
if (str(logdata["status"]) != prev_status or str(logdata["heading"]) != prev_heading or
str(logdata["lat"]) != prev_lat or str(logdata["lon"]) != prev_lon or
str(logdata["note"]) != prev_note):
print '**CONFIG FILE HAS BEEN UPDATED!** generating log entry...'
# generate writeout for new log entry based on config file.
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
else:
print "--no config changes"
current_logfile.close()
else:
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "w")
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
# get list of exisiting processed scans
path_proc = '/Users/elizabethsmith/TORUS_DL/data/nonQA_proc/dl/2019/201905/' + today + '/'
# check to make sure the output dir exists
try:
os.makedirs(path_proc)
except OSError:
logging.debug("Output path already exists...")
# Check to make sure the processed_files.txt exists
if not os.path.exists(path_proc + 'processed_files.txt'):
os.system('touch {}'.format(path_proc + 'processed_files.txt'))
# Open the processed files list and read it in
proc_list = open(path_proc + 'processed_files.txt', "r+")
proc_files = proc_list.readlines()
proc_list.close()
# Be sure to add the files that are processed to the running list
# get list of existing raw scans - always do the stare
# TB - I changed some things here so only the scans from the current hour are even looked at.
# - This cuts down on processing for the stare files!
path_raw = now.strftime('/Users/elizabethsmith/TORUS_DL/data/raw/dl/%Y/%Y%m/%Y%m%d/*%Y%m%d_*.hpl')
#print path_raw
raw_files = [f for f in glob(path_raw)]
#print raw_files
raw_files=sorted(raw_files)
# Process the scans
for in_file in raw_files:
# TB - I changed your logic for finding the files to process. This is a little easier and less prone to bugs
# Check to see if the file is in the alreasy processed files. If it is, skip it.
if in_file+'\n' in proc_files:
logging.debug("{} already processed".format(in_file))
continue
else:
logging.info("Processing {}".format(in_file))
# read in new scan
out_dir = path_proc
prefix = 'nonQA'
# Read in the text file
lines = []
with open(in_file) as f:
for line in f:
lines.append(line)
logging.debug("Decoding header")
# Read in the header info
header = decode_header(lines[0:11])
ngates = int(header['Number of gates'])
# nrays = int(header['No. of rays in file']) # Cant do this apparently. Not always correct (wtf)
len_data = len(lines[17:])
nrays = len_data / (ngates + 1)
gate_length = float(header['Range gate length (m)'])
start_time = datetime.strptime(header['Start time'], '%Y%m%d %H:%M:%S.%f')
scan_type = None
logging.debug("Reading data")
# Read in the actual data
az = np.zeros(nrays)
hour = np.zeros(nrays)
elev = np.zeros(nrays)
pitch = np.zeros(nrays)
roll = np.zeros(nrays)
rng = np.asarray([(gate + .5) * gate_length for gate in range(ngates)])
vel = np.zeros((ngates, nrays))
intensity = np.zeros((ngates, nrays))
beta = np.zeros((ngates, nrays))
try:
for ray in range(nrays):
# Get the scan info
info = lines[ray * (ngates + 1) + 17].split()
hour[ray] = float(info[0])
az[ray] = float(info[1])
elev[ray] = float(info[2])
pitch[ray] = float(info[3])
roll[ray] = float(info[4])
for gate in range(ngates):
data = lines[ray * (ngates + 1) + 17 + gate + 1].split()
vel[gate, ray] = float(data[1])
intensity[gate, ray] = float(data[2])
beta[gate, ray] = float(data[3])
except IndexError:
logging.warning("Something went wrong with the indexing here...")
# correction for some rounding/hysteresis in scanner azimuths... setting all vals==360. to 0.
az[np.where(az == 360.)] = 0.
# dynamic identification of lidar scan type (fp,ppi,rhi)
# TB - I Added the round here. Was getting a fp file ID'd as a rhi file
# - Also had an issue with and RHI file where az[0] was 0.01 and az[2] was 0
try:
if np.round(az[0], 1) == np.round(az[2], 1): # const azimuth could be RHI or stare
if np.round(elev[0], 1) == np.round(elev[2], 1): # const azimuth and constant elev = stare
scan_type = 'fp'
else: # const azimuth and non-constant elev = RHI
scan_type = 'rhi'
elif np.round(elev[0], 1) == np.round(elev[2]): # changing azimuth, const elev = PPI
scan_type = 'ppi'
if scan_type == None:
raise IndexError
logging.info("Scan Type: " + scan_type)
except IndexError:
logging.warning("Something went wrong with scan type IDing...")
if scan_type == 'ppi':
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
hgt = []
u = []
v = []
w = []
rmse = []
r_sq = []
for i, rng in enumerate(rng):
# Get the required stuff for this range ring
cnr = intensity[i, :] # range,azimuth
Vel = vel[i, :] # range,azimuth
Az = az # 8-terms of az
Elev = elev # 8-terms of az
# Filter out the bad values based on CNR - default was 1.015
Az = np.where(cnr <= 1.01, FILL_VALUE, Az)
Vel = np.where(cnr <= 1.01, FILL_VALUE, Vel)
Az = list_to_masked_array(Az, FILL_VALUE)
Vel = list_to_masked_array(Vel, FILL_VALUE)
# Calculate the vad and height for this range ring
tmp_u, tmp_v, tmp_w = calc_vad_3d(Az, Elev, Vel) # grab this to it can point to it!!!
# Calculate the RMSE
N = float(Vel.size)
az_rad = np.deg2rad(Az)
elev_rad = np.deg2rad(Elev)
derived_vr = (np.sin(az_rad) * np.cos(elev_rad) * tmp_u) + \
(np.cos(az_rad) * np.cos(elev_rad) * tmp_v) + \
(np.sin(elev_rad) * tmp_w)
tmp_E = Vel - derived_vr
# Calculate rms error
tmp_RMSE = np.sqrt(1 / N * np.sum(tmp_E ** 2))
tmp_r_sq = calc_homogeneity(Vel, derived_vr)
# Append to the lists for plotting
u.append(tmp_u)
v.append(tmp_v)
w.append(tmp_w)
hgt.append(ray_height(rng, Elev[0]))
rmse.append(tmp_RMSE)
r_sq.append(tmp_r_sq)
vector_wind = rotate(u, v, w, logdata["heading"], 0, 0)
vector_wind = vector_wind.squeeze()
u = vector_wind[:, 0]
v = vector_wind[:, 1]
w = vector_wind[:, 2]
ws = wind_uv_to_spd(u, v)
wd = wind_uv_to_dir(u, v)
writeVAD_to_nc(path_proc + prefix + date.strftime('%Y%m%d') + '_VAD.nc', date, elev, u, v, w, ws, wd, hgt,
rmse, r_sq,up_flag)
# add newly processed file to list of processed files
proc_list = open(path_proc + 'processed_files.txt', "a")
proc_list.writelines(in_file+'\n')
proc_list.close()
if scan_type == 'fp':
# TB - I decided that it is best to just process the entire stare file every time
# instead of try to append to the netcdf. This shouldn't hinder processeing time that much
# since I changed things to only grab things from the same hour you're processing
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
# Filter out the bad values based on CNR
Vel = np.where(intensity <= 1.01, FILL_VALUE, vel)
# Get the rng into a 2d array
rng = np.array([rng for i in range(len(rng))])
logging.debug("Writing stare file")
writeSTARE_to_nc(path_proc+prefix+date.strftime('%Y%m%d_%H_STARE.nc'), times, vel.transpose(), rng, up_flag)
if scan_type=='rhi':
# TB - A quick tip: Don't do an RHI at az=0. It bounces between 0 and 360 and is a pain in the ass to process
# - Just do it at like 1 deg. or even .1
# TB - Note to self - need to do a heading correction on this one.
date = start_time
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
filename = path_proc + prefix + date.strftime('%Y%m%d_%H') + '_RHI.nc'
# break |
# add newly processed file to list of processed files
proc_list = open(path_proc + 'processed_files.txt', "a")
proc_list.writelines(in_file+'\n')
proc_list.close() | writeRHI_to_nc(filename, times, vel.transpose(), rng, elev, az, intensity.transpose(), up_flag) | random_line_split |
process_test.py | import json
import netCDF4
import numpy as np
import logging
import os
import argparse
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from glob import glob
# from pyclamps.utils import list_to_masked_array, ray_height
# from pyclamps.vad import calc_vad_3d, calc_homogeneity
# Global Values
FILL_VALUE = -9999.
VEL_LIM = (-30, 30)
HGT_LIM = (0, 1000)
PROFILES_PER_PLOT = 2
Re = 6371000
R43 = Re * 4.0 / 3.0
# python logging NOT deployment logging
# logging.basicConfig(filename='DLprocessing.log', level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')
# some needed fucntions
def wind_uv_to_dir(U, V):
"""
Calculates the wind direction from the u and v component of wind.
Takes into account the wind direction coordinates is different than the
trig unit circle coordinate. If the wind directin is 360 then returns zero
(by %360)
Inputs:
U = west/east direction (wind from the west is positive, from the east is negative)
V = south/noth direction (wind from the south is positive, from the north is negative)
"""
WDIR = (270 - np.rad2deg(np.arctan2(V, U))) % 360
return WDIR
def wind_uv_to_spd(U, V):
"""
Calculates the wind speed from the u and v wind components
Inputs:
U = west/east direction (wind from the west is positive, from the east is negative)
V = south/noth direction (wind from the south is positive, from the north is negative)
"""
WSPD = np.sqrt(np.square(U) + np.square(V))
return WSPD
def list_to_masked_array(in_list, mask_value):
a = np.array(in_list)
return np.ma.masked_where(a == mask_value, a)
def ray_height(rng, elev, H0=0, R1=R43):
"""
Center of radar beam height calculation.
Rinehart (1997), Eqn 3.12, Bech et al. (2003) Eqn 3
INPUT::
-----
r : float
Range from radar to point of interest [m]
elev : float
Elevation angle of radar beam [deg]
H0 : float
Height of radar antenna [m]
R1 : float
Effective radius
OUTPUT::
-----
H : float
Radar beam height [m]
USAGE::
-----
H = ray_height(r,elev,H0,[R1=6374000.*4/3])
NOTES::
-----
If no Effective radius is given a "standard atmosphere" is assumed,
the 4/3 approximation.
Bech et al. (2003) use a factor ke that is the ratio of earth's radius
to the effective radius (see r_effective function) and Eqn 4 in B03
"""
# Convert earth's radius to km for common dN/dH values and then
# multiply by 1000 to return radius in meters
hgt = np.sqrt(rng ** 2 + R1 ** 2 + 2 * rng * R1 * np.sin(np.deg2rad(elev)))
hgt = hgt - R1 + H0
return hgt
def rotate(u, v, w, yaw, pitch, roll):
'''
Calculate the value of u, v, and w after a specified axis rotation
:param u: U component of the wind
:param v: V component of the wind
:param w: W component of the wind
:param yaw: Rotation about the Z axis
:param pitch: Rotation about the X axis
:param roll: Rotation about the Y axis
:return:
result: 3D array of the new U, V, and W fields after the rotation
'''
rot_matrix = np.asarray(
[[np.cos(yaw) * np.cos(pitch), np.cos(yaw) * np.sin(pitch) * np.sin(roll) - np.sin(yaw) * np.cos(roll),
np.cos(yaw) * np.sin(pitch) * np.cos(roll) + np.sin(yaw) * np.sin(roll)],
[np.sin(yaw) * np.cos(pitch), np.sin(yaw) * np.sin(pitch) * np.sin(roll) + np.cos(yaw) * np.cos(roll),
np.sin(yaw) * np.sin(pitch) * np.cos(roll) - np.cos(yaw) * np.sin(roll)],
[-np.sin(pitch), np.cos(pitch) * np.sin(roll), np.cos(pitch) * np.cos(roll)]])
vel_matrix = np.asarray([[u], [v], [w]]).transpose()
result = np.dot(vel_matrix, rot_matrix)
return result
def calc_homogeneity(raw_vr, derived_vr):
"""
Determines homogeneity of the wind field as described in E. Paschke et. al. 2015 section 2.2.4
:param raw_vr: Raw radial velocity
:param derived_vr: Radial velocity derived from wind retrieval
:return:
"""
vr_bar = np.sum(raw_vr)
return 1 - np.sum((raw_vr - derived_vr) ** 2) / np.sum((raw_vr - vr_bar) ** 2)
def calc_vad_3d(az, elev, vel):
"""
Calculates the 3D VAD
:param az: Azimuth data
:param elev: Elevation Data
:param vel: Velocity Data
:return:
u: U component of the wind
v: V component of the wind
w: W component of the wind
"""
elev = np.deg2rad(elev)
az = np.deg2rad(az)
if vel.size > 1: # If there could be sufficient data points...
A = sum(vel * np.sin(az))
B = sum(np.sin(az) ** 2 * np.cos(elev))
C = sum(np.cos(az) * np.sin(az) * np.cos(elev))
G = sum(np.sin(az) * np.sin(elev))
D = sum(vel * np.cos(az))
E = sum(np.sin(az) * np.cos(az) * np.cos(elev))
F = sum(np.cos(az) ** 2 * np.cos(elev))
H = sum(np.cos(az) * np.sin(elev))
W = sum(vel)
X = sum(np.sin(az) * np.cos(elev))
Y = sum(np.cos(az) * np.cos(elev))
Z = sum(az * np.sin(elev))
# solve A = uB + vC + wG , D = uE + vF + wH and W = uX + vY+ wZ
y = np.array([[B, E, X], [C, F, Y], [G, H, Z]])
z = np.array([A, D, W])
# print y
# print z
try:
sol = np.linalg.solve(y, z)
# print sol
u = sol[0]
v = sol[1]
w = sol[2]
return u, v, w
except np.linalg.linalg.LinAlgError:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
else:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
def | (header):
"""
Takes in a list of lines from the raw hpl file. Separates them by
tab and removes unnecessary text
"""
new_header = {}
for item in header:
split = item.split('\t')
new_header[split[0].replace(':', '')] = split[1].replace("\r\n", "")
return new_header
def _to_epoch(dt):
return (dt - datetime(1970, 1, 1)).total_seconds()
"""
process_file(in_file, out_dir, prefix):
Processes a raw halo hpl file and turns it into a netcdf
:param in_file:
:param out_dir:
:return:
"""
def writeVAD_to_nc(filename, date, elev, u, v, w, ws, wd, hgt, rmse, r_sq,up_flag,intensity):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'a', format="NETCDF4")
dim = nc.dimensions['t'].size
u_var = nc.variables['u']
v_var = nc.variables['v']
w_var = nc.variables['w']
ws_var = nc.variables['ws']
wd_var = nc.variables['wd']
rms_var = nc.variables['rms']
r_sq_var = nc.variables['r_sq']
time_var = nc.variables['time']
hgt_var = nc.variables['hgt']
up_flag_var = nc.variables['up_flag']
u_var[dim, :] = u
v_var[dim, :] = v
w_var[dim, :] = w
ws_var[dim, :] = ws
wd_var[dim, :] = wd
rms_var[dim, :] = rmse
r_sq_var[dim, :] = r_sq
up_flag_var[dim] = up_flag
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("elev", elev)
nc.setncattr("date", date.isoformat())
# Create the variables
u_var = nc.createVariable('u', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
v_var = nc.createVariable('v', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
w_var = nc.createVariable('w', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
ws_var = nc.createVariable('ws', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
wd_var = nc.createVariable('wd', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rms_var = nc.createVariable('rms', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
r_sq_var = nc.createVariable('r_sq', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
u_var[dim, :] = np.where(np.isnan(u), FILL_VALUE, u)
v_var[dim, :] = np.where(np.isnan(v), FILL_VALUE, v)
w_var[dim, :] = np.where(np.isnan(w), FILL_VALUE, w)
ws_var[dim, :] = np.where(np.isnan(ws), FILL_VALUE, ws)
wd_var[dim, :] = np.where(np.isnan(wd), FILL_VALUE, wd)
hgt_var[dim, :] = np.where(np.isnan(hgt), FILL_VALUE, hgt)
rms_var[dim, :] = np.where(np.isnan(rmse), FILL_VALUE, rmse)
r_sq_var[dim, :] = np.where(np.isnan(r_sq), FILL_VALUE, r_sq)
time_var[dim] = (date - datetime(1970, 1, 1)).total_seconds()
up_flag_var[dim]=up_flag
intensity_var[dim] = intensity
# Close the netcdf
nc.close()
def writeSTARE_to_nc(filename, date, w, hgt, intensity):
logging.debug(filename)
logging.debug(date)
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('hgt', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("date", date[0].isoformat())
# Create the variables
w_var = nc.createVariable('w', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
hgt_var[:] = hgt
time_var[:] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
w_var[:, :] = w
intensity_var[:] = intensity
# Close the netcdf
nc.close()
def writeRHI_to_nc(filename, date, vel, rng, elev, az, intensity,up_flag):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'r+', format="NETCDF4")
dim = nc.dimensions['t'].size
vel_var = nc.variables['velocity']
rng_var = nc.variables['range']
elev_var = nc.variables['elevation']
az_var = nc.variables['azimuth']
intensity_var = nc.variables['intensity']
time_var = nc.variables['time']
up_flag_var = nc.variables['up_flag']
# vel_var[dim, :] = vel
# rng_var[:] = rng
# elev_var[dim] = elev
# az_var[dim] = az
# intensity_var[dim, :] = intensity
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(rng))
nc.createDimension('t', None)
# Create the variables
vel_var = nc.createVariable('velocity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rng_var = nc.createVariable('range', 'f8', ('height'), fill_value=FILL_VALUE)
elev_var = nc.createVariable('elevation', 'f8', ('t'), fill_value=FILL_VALUE)
az_var = nc.createVariable('azimuth', 'f8', ('t'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
dim2 = dim+len(date)
vel_var[dim:dim2, :] = vel
rng_var[:] = np.where(np.isnan(rng), FILL_VALUE, rng)
elev_var[dim:dim2] = np.where(np.isnan(elev), FILL_VALUE, elev)
az_var[dim:dim2] = np.where(np.isnan(az), FILL_VALUE, az)
intensity_var[dim:dim2, :] = np.where(np.isnan(intensity), FILL_VALUE, intensity)
time_var[dim:dim2] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
#print [up_flag for i in range(len(date))]
up_flag_var[dim:dim2] = [float(up_flag) for i in range(len(date))]
# Close the netcdf
nc.close()
#########PROCESS CODE####################
#######deployment logging
# reads config file in TORUS_DL/logs/config.js
# writesout changes/events to deployment log in TORUS_DL/logs/log_MMDDYY.txt
# grab current timestamp for run
now = datetime.utcnow()
#now=datetime(2019,05,17,23,58)
log_time = now.strftime("%m%d%y_%H%M")
today = now.strftime("%Y%m%d") # - commented out for test date below.
# today_l = datetime(2019,05,07)
# today=today_l.strftime("%Y%m%d")
# now = datetime(year=2019, month=5, day=7, hour=16, minute=15)
# open and read in config file info
config = open('/Users/elizabethsmith/TORUS_DL/logs/config.js')
logdata = json.load(config)
config.close()
if logdata["status"]=='up':
print "we're up :)"
up_flag=1
if logdata["status"]=='down':
print "we're down :( "
up_flag=0
# TB - Running into an error here when starting a new log file.
# Added some automation to make your like easier
if os.path.exists('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"]):
# open and read most recent logfile entry
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "r+")
lines = current_logfile.readlines()
prev_status = lines[-6][8:-1] # reading logfile previous status (skipping text)
prev_heading = lines[-5][9:-1] # reading logfile previous heading (skipping text)
prev_lat = lines[-4][5:-1] # reading logfile previous lat (skipping text)
prev_lon = lines[-3][5:-1] # reading logfile previous lat (skipping text)
prev_note = lines[-2][6:-1] # reading logfile previous note (skipping text)
# check if the previous log entry matches the data in the config file..
print logdata["note"], prev_note
if (str(logdata["status"]) != prev_status or str(logdata["heading"]) != prev_heading or
str(logdata["lat"]) != prev_lat or str(logdata["lon"]) != prev_lon or
str(logdata["note"]) != prev_note):
print '**CONFIG FILE HAS BEEN UPDATED!** generating log entry...'
# generate writeout for new log entry based on config file.
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
else:
print "--no config changes"
current_logfile.close()
else:
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "w")
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
# get list of exisiting processed scans
path_proc = '/Users/elizabethsmith/TORUS_DL/data/nonQA_proc/dl/2019/201905/' + today + '/'
# check to make sure the output dir exists
try:
os.makedirs(path_proc)
except OSError:
logging.debug("Output path already exists...")
# Check to make sure the processed_files.txt exists
if not os.path.exists(path_proc + 'processed_files.txt'):
os.system('touch {}'.format(path_proc + 'processed_files.txt'))
# Open the processed files list and read it in
proc_list = open(path_proc + 'processed_files.txt', "r+")
proc_files = proc_list.readlines()
proc_list.close()
# Be sure to add the files that are processed to the running list
# get list of existing raw scans - always do the stare
# TB - I changed some things here so only the scans from the current hour are even looked at.
# - This cuts down on processing for the stare files!
path_raw = now.strftime('/Users/elizabethsmith/TORUS_DL/data/raw/dl/%Y/%Y%m/%Y%m%d/*%Y%m%d_*.hpl')
#print path_raw
raw_files = [f for f in glob(path_raw)]
#print raw_files
raw_files=sorted(raw_files)
# Process the scans
for in_file in raw_files:
# TB - I changed your logic for finding the files to process. This is a little easier and less prone to bugs
# Check to see if the file is in the alreasy processed files. If it is, skip it.
if in_file+'\n' in proc_files:
logging.debug("{} already processed".format(in_file))
continue
else:
logging.info("Processing {}".format(in_file))
# read in new scan
out_dir = path_proc
prefix = 'nonQA'
# Read in the text file
lines = []
with open(in_file) as f:
for line in f:
lines.append(line)
logging.debug("Decoding header")
# Read in the header info
header = decode_header(lines[0:11])
ngates = int(header['Number of gates'])
# nrays = int(header['No. of rays in file']) # Cant do this apparently. Not always correct (wtf)
len_data = len(lines[17:])
nrays = len_data / (ngates + 1)
gate_length = float(header['Range gate length (m)'])
start_time = datetime.strptime(header['Start time'], '%Y%m%d %H:%M:%S.%f')
scan_type = None
logging.debug("Reading data")
# Read in the actual data
az = np.zeros(nrays)
hour = np.zeros(nrays)
elev = np.zeros(nrays)
pitch = np.zeros(nrays)
roll = np.zeros(nrays)
rng = np.asarray([(gate + .5) * gate_length for gate in range(ngates)])
vel = np.zeros((ngates, nrays))
intensity = np.zeros((ngates, nrays))
beta = np.zeros((ngates, nrays))
try:
for ray in range(nrays):
# Get the scan info
info = lines[ray * (ngates + 1) + 17].split()
hour[ray] = float(info[0])
az[ray] = float(info[1])
elev[ray] = float(info[2])
pitch[ray] = float(info[3])
roll[ray] = float(info[4])
for gate in range(ngates):
data = lines[ray * (ngates + 1) + 17 + gate + 1].split()
vel[gate, ray] = float(data[1])
intensity[gate, ray] = float(data[2])
beta[gate, ray] = float(data[3])
except IndexError:
logging.warning("Something went wrong with the indexing here...")
# correction for some rounding/hysteresis in scanner azimuths... setting all vals==360. to 0.
az[np.where(az == 360.)] = 0.
# dynamic identification of lidar scan type (fp,ppi,rhi)
# TB - I Added the round here. Was getting a fp file ID'd as a rhi file
# - Also had an issue with and RHI file where az[0] was 0.01 and az[2] was 0
try:
if np.round(az[0], 1) == np.round(az[2], 1): # const azimuth could be RHI or stare
if np.round(elev[0], 1) == np.round(elev[2], 1): # const azimuth and constant elev = stare
scan_type = 'fp'
else: # const azimuth and non-constant elev = RHI
scan_type = 'rhi'
elif np.round(elev[0], 1) == np.round(elev[2]): # changing azimuth, const elev = PPI
scan_type = 'ppi'
if scan_type == None:
raise IndexError
logging.info("Scan Type: " + scan_type)
except IndexError:
logging.warning("Something went wrong with scan type IDing...")
if scan_type == 'ppi':
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
hgt = []
u = []
v = []
w = []
rmse = []
r_sq = []
for i, rng in enumerate(rng):
# Get the required stuff for this range ring
cnr = intensity[i, :] # range,azimuth
Vel = vel[i, :] # range,azimuth
Az = az # 8-terms of az
Elev = elev # 8-terms of az
# Filter out the bad values based on CNR - default was 1.015
Az = np.where(cnr <= 1.01, FILL_VALUE, Az)
Vel = np.where(cnr <= 1.01, FILL_VALUE, Vel)
Az = list_to_masked_array(Az, FILL_VALUE)
Vel = list_to_masked_array(Vel, FILL_VALUE)
# Calculate the vad and height for this range ring
tmp_u, tmp_v, tmp_w = calc_vad_3d(Az, Elev, Vel) # grab this to it can point to it!!!
# Calculate the RMSE
N = float(Vel.size)
az_rad = np.deg2rad(Az)
elev_rad = np.deg2rad(Elev)
derived_vr = (np.sin(az_rad) * np.cos(elev_rad) * tmp_u) + \
(np.cos(az_rad) * np.cos(elev_rad) * tmp_v) + \
(np.sin(elev_rad) * tmp_w)
tmp_E = Vel - derived_vr
# Calculate rms error
tmp_RMSE = np.sqrt(1 / N * np.sum(tmp_E ** 2))
tmp_r_sq = calc_homogeneity(Vel, derived_vr)
# Append to the lists for plotting
u.append(tmp_u)
v.append(tmp_v)
w.append(tmp_w)
hgt.append(ray_height(rng, Elev[0]))
rmse.append(tmp_RMSE)
r_sq.append(tmp_r_sq)
vector_wind = rotate(u, v, w, logdata["heading"], 0, 0)
vector_wind = vector_wind.squeeze()
u = vector_wind[:, 0]
v = vector_wind[:, 1]
w = vector_wind[:, 2]
ws = wind_uv_to_spd(u, v)
wd = wind_uv_to_dir(u, v)
writeVAD_to_nc(path_proc + prefix + date.strftime('%Y%m%d') + '_VAD.nc', date, elev, u, v, w, ws, wd, hgt,
rmse, r_sq,up_flag)
# add newly processed file to list of processed files
proc_list = open(path_proc + 'processed_files.txt', "a")
proc_list.writelines(in_file+'\n')
proc_list.close()
if scan_type == 'fp':
# TB - I decided that it is best to just process the entire stare file every time
# instead of try to append to the netcdf. This shouldn't hinder processeing time that much
# since I changed things to only grab things from the same hour you're processing
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
# Filter out the bad values based on CNR
Vel = np.where(intensity <= 1.01, FILL_VALUE, vel)
# Get the rng into a 2d array
rng = np.array([rng for i in range(len(rng))])
logging.debug("Writing stare file")
writeSTARE_to_nc(path_proc+prefix+date.strftime('%Y%m%d_%H_STARE.nc'), times, vel.transpose(), rng, up_flag)
if scan_type=='rhi':
# TB - A quick tip: Don't do an RHI at az=0. It bounces between 0 and 360 and is a pain in the ass to process
# - Just do it at like 1 deg. or even .1
# TB - Note to self - need to do a heading correction on this one.
date = start_time
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
filename = path_proc + prefix + date.strftime('%Y%m%d_%H') + '_RHI.nc'
# break
writeRHI_to_nc(filename, times, vel.transpose(), rng, elev, az, intensity.transpose(), up_flag)
# add newly processed file to list of processed files
proc_list = open(path_proc + 'processed_files.txt', "a")
proc_list.writelines(in_file+'\n')
proc_list.close()
| decode_header | identifier_name |
sender.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/multierr"
)
type appendResponse struct {
// sent gives information if the data was sent or not
sent bool
// appended keeps state of appending new log line to the body
appended bool
}
// metricPair represents information required to send one metric to the Sumo Logic
type metricPair struct {
attributes pcommon.Map
metric pmetric.Metric
}
type sender struct {
logBuffer []plog.LogRecord
metricBuffer []metricPair
config *Config
client *http.Client
filter filter
sources sourceFormats
compressor compressor
prometheusFormatter prometheusFormatter
graphiteFormatter graphiteFormatter
}
const (
logKey string = "log"
// maxBufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries)
maxBufferSize int = 1024 * 1024
headerContentType string = "Content-Type"
headerContentEncoding string = "Content-Encoding"
headerClient string = "X-Sumo-Client"
headerHost string = "X-Sumo-Host"
headerName string = "X-Sumo-Name"
headerCategory string = "X-Sumo-Category"
headerFields string = "X-Sumo-Fields"
contentTypeLogs string = "application/x-www-form-urlencoded"
contentTypePrometheus string = "application/vnd.sumologic.prometheus"
contentTypeCarbon2 string = "application/vnd.sumologic.carbon2"
contentTypeGraphite string = "application/vnd.sumologic.graphite"
contentEncodingGzip string = "gzip"
contentEncodingDeflate string = "deflate"
)
func newAppendResponse() appendResponse {
return appendResponse{
appended: true,
}
}
func newSender(
cfg *Config,
cl *http.Client,
f filter,
s sourceFormats,
c compressor,
pf prometheusFormatter,
gf graphiteFormatter,
) *sender {
return &sender{
config: cfg,
client: cl,
filter: f,
sources: s,
compressor: c,
prometheusFormatter: pf,
graphiteFormatter: gf,
}
}
// send sends data to sumologic
func (s *sender) send(ctx context.Context, pipeline PipelineType, body io.Reader, flds fields) error {
data, err := s.compressor.compress(body)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.config.HTTPClientSettings.Endpoint, data)
if err != nil |
// Add headers
switch s.config.CompressEncoding {
case GZIPCompression:
req.Header.Set(headerContentEncoding, contentEncodingGzip)
case DeflateCompression:
req.Header.Set(headerContentEncoding, contentEncodingDeflate)
case NoCompression:
default:
return fmt.Errorf("invalid content encoding: %s", s.config.CompressEncoding)
}
req.Header.Add(headerClient, s.config.Client)
if s.sources.host.isSet() {
req.Header.Add(headerHost, s.sources.host.format(flds))
}
if s.sources.name.isSet() {
req.Header.Add(headerName, s.sources.name.format(flds))
}
if s.sources.category.isSet() {
req.Header.Add(headerCategory, s.sources.category.format(flds))
}
switch pipeline {
case LogsPipeline:
req.Header.Add(headerContentType, contentTypeLogs)
req.Header.Add(headerFields, flds.string())
case MetricsPipeline:
switch s.config.MetricFormat {
case PrometheusFormat:
req.Header.Add(headerContentType, contentTypePrometheus)
case Carbon2Format:
req.Header.Add(headerContentType, contentTypeCarbon2)
case GraphiteFormat:
req.Header.Add(headerContentType, contentTypeGraphite)
default:
return fmt.Errorf("unsupported metrics format: %s", s.config.MetricFormat)
}
default:
return errors.New("unexpected pipeline")
}
resp, err := s.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return fmt.Errorf("error during sending data: %s", resp.Status)
}
return nil
}
// logToText converts LogRecord to a plain text line, returns it and error eventually
func (s *sender) logToText(record plog.LogRecord) string {
return record.Body().AsString()
}
// logToJSON converts LogRecord to a json line, returns it and error eventually
func (s *sender) logToJSON(record plog.LogRecord) (string, error) {
data := s.filter.filterOut(record.Attributes())
record.Body().CopyTo(data.orig.PutEmpty(logKey))
nextLine, err := json.Marshal(data.orig.AsRaw())
if err != nil {
return "", err
}
return bytes.NewBuffer(nextLine).String(), nil
}
// sendLogs sends log records from the logBuffer formatted according
// to configured LogFormat and as the result of execution
// returns array of records which has not been sent correctly and error
func (s *sender) sendLogs(ctx context.Context, flds fields) ([]plog.LogRecord, error) {
var (
body strings.Builder
errs error
droppedRecords []plog.LogRecord
currentRecords []plog.LogRecord
)
for _, record := range s.logBuffer {
var formattedLine string
var err error
switch s.config.LogFormat {
case TextFormat:
formattedLine = s.logToText(record)
case JSONFormat:
formattedLine, err = s.logToJSON(record)
default:
err = errors.New("unexpected log format")
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, LogsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, LogsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// sendMetrics sends metrics in right format basing on the s.config.MetricFormat
func (s *sender) sendMetrics(ctx context.Context, flds fields) ([]metricPair, error) {
var (
body strings.Builder
errs error
droppedRecords []metricPair
currentRecords []metricPair
)
for _, record := range s.metricBuffer {
var formattedLine string
var err error
switch s.config.MetricFormat {
case PrometheusFormat:
formattedLine = s.prometheusFormatter.metric2String(record)
case Carbon2Format:
formattedLine = carbon2Metric2String(record)
case GraphiteFormat:
formattedLine = s.graphiteFormatter.metric2String(record)
default:
err = fmt.Errorf("unexpected metric format: %s", s.config.MetricFormat)
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, MetricsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, MetricsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// appendAndSend appends line to the request body that will be sent and sends
// the accumulated data if the internal logBuffer has been filled (with maxBufferSize elements).
// It returns appendResponse
func (s *sender) appendAndSend(
ctx context.Context,
line string,
pipeline PipelineType,
body *strings.Builder,
flds fields,
) (appendResponse, error) {
var errs error
ar := newAppendResponse()
if body.Len() > 0 && body.Len()+len(line) >= s.config.MaxRequestBodySize {
ar.sent = true
errs = multierr.Append(errs, s.send(ctx, pipeline, strings.NewReader(body.String()), flds))
body.Reset()
}
if body.Len() > 0 {
// Do not add newline if the body is empty
if _, err := body.WriteString("\n"); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
if ar.appended {
// Do not append new line if separator was not appended
if _, err := body.WriteString(line); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
return ar, errs
}
// cleanLogsBuffer zeroes logBuffer
func (s *sender) cleanLogsBuffer() {
s.logBuffer = (s.logBuffer)[:0]
}
// batchLog adds log to the logBuffer and flushes them if logBuffer is full to avoid overflow
// returns list of log records which were not sent successfully
func (s *sender) batchLog(ctx context.Context, log plog.LogRecord, metadata fields) ([]plog.LogRecord, error) {
s.logBuffer = append(s.logBuffer, log)
if s.countLogs() >= maxBufferSize {
dropped, err := s.sendLogs(ctx, metadata)
s.cleanLogsBuffer()
return dropped, err
}
return nil, nil
}
// countLogs returns number of logs in logBuffer
func (s *sender) countLogs() int {
return len(s.logBuffer)
}
// cleanMetricBuffer zeroes metricBuffer
func (s *sender) cleanMetricBuffer() {
s.metricBuffer = (s.metricBuffer)[:0]
}
// batchMetric adds metric to the metricBuffer and flushes them if metricBuffer is full to avoid overflow
// returns list of metric records which were not sent successfully
func (s *sender) batchMetric(ctx context.Context, metric metricPair, metadata fields) ([]metricPair, error) {
s.metricBuffer = append(s.metricBuffer, metric)
if s.countMetrics() >= maxBufferSize {
dropped, err := s.sendMetrics(ctx, metadata)
s.cleanMetricBuffer()
return dropped, err
}
return nil, nil
}
// countMetrics returns number of metrics in metricBuffer
func (s *sender) countMetrics() int {
return len(s.metricBuffer)
}
| {
return err
} | conditional_block |
sender.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/multierr"
)
type appendResponse struct {
// sent gives information if the data was sent or not
sent bool
// appended keeps state of appending new log line to the body
appended bool
}
// metricPair represents information required to send one metric to the Sumo Logic
type metricPair struct {
attributes pcommon.Map
metric pmetric.Metric
}
type sender struct {
logBuffer []plog.LogRecord
metricBuffer []metricPair
config *Config
client *http.Client
filter filter
sources sourceFormats
compressor compressor
prometheusFormatter prometheusFormatter
graphiteFormatter graphiteFormatter
}
const (
logKey string = "log"
// maxBufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries)
maxBufferSize int = 1024 * 1024
headerContentType string = "Content-Type"
headerContentEncoding string = "Content-Encoding"
headerClient string = "X-Sumo-Client"
headerHost string = "X-Sumo-Host"
headerName string = "X-Sumo-Name"
headerCategory string = "X-Sumo-Category"
headerFields string = "X-Sumo-Fields"
contentTypeLogs string = "application/x-www-form-urlencoded"
contentTypePrometheus string = "application/vnd.sumologic.prometheus"
contentTypeCarbon2 string = "application/vnd.sumologic.carbon2"
contentTypeGraphite string = "application/vnd.sumologic.graphite"
contentEncodingGzip string = "gzip"
contentEncodingDeflate string = "deflate"
)
func newAppendResponse() appendResponse {
return appendResponse{
appended: true,
}
}
func newSender(
cfg *Config,
cl *http.Client,
f filter,
s sourceFormats,
c compressor,
pf prometheusFormatter,
gf graphiteFormatter,
) *sender {
return &sender{
config: cfg,
client: cl,
filter: f,
sources: s,
compressor: c,
prometheusFormatter: pf,
graphiteFormatter: gf,
}
}
// send sends data to sumologic
func (s *sender) send(ctx context.Context, pipeline PipelineType, body io.Reader, flds fields) error {
data, err := s.compressor.compress(body)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.config.HTTPClientSettings.Endpoint, data)
if err != nil {
return err
}
// Add headers
switch s.config.CompressEncoding {
case GZIPCompression:
req.Header.Set(headerContentEncoding, contentEncodingGzip)
case DeflateCompression:
req.Header.Set(headerContentEncoding, contentEncodingDeflate)
case NoCompression:
default:
return fmt.Errorf("invalid content encoding: %s", s.config.CompressEncoding)
}
req.Header.Add(headerClient, s.config.Client)
if s.sources.host.isSet() {
req.Header.Add(headerHost, s.sources.host.format(flds))
}
if s.sources.name.isSet() {
req.Header.Add(headerName, s.sources.name.format(flds))
}
if s.sources.category.isSet() {
req.Header.Add(headerCategory, s.sources.category.format(flds))
}
switch pipeline {
case LogsPipeline:
req.Header.Add(headerContentType, contentTypeLogs)
req.Header.Add(headerFields, flds.string())
case MetricsPipeline:
switch s.config.MetricFormat {
case PrometheusFormat:
req.Header.Add(headerContentType, contentTypePrometheus)
case Carbon2Format:
req.Header.Add(headerContentType, contentTypeCarbon2)
case GraphiteFormat:
req.Header.Add(headerContentType, contentTypeGraphite)
default:
return fmt.Errorf("unsupported metrics format: %s", s.config.MetricFormat)
}
default:
return errors.New("unexpected pipeline")
}
resp, err := s.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return fmt.Errorf("error during sending data: %s", resp.Status)
}
return nil
}
// logToText converts LogRecord to a plain text line, returns it and error eventually
func (s *sender) | (record plog.LogRecord) string {
return record.Body().AsString()
}
// logToJSON converts LogRecord to a json line, returns it and error eventually
func (s *sender) logToJSON(record plog.LogRecord) (string, error) {
data := s.filter.filterOut(record.Attributes())
record.Body().CopyTo(data.orig.PutEmpty(logKey))
nextLine, err := json.Marshal(data.orig.AsRaw())
if err != nil {
return "", err
}
return bytes.NewBuffer(nextLine).String(), nil
}
// sendLogs sends log records from the logBuffer formatted according
// to configured LogFormat and as the result of execution
// returns array of records which has not been sent correctly and error
func (s *sender) sendLogs(ctx context.Context, flds fields) ([]plog.LogRecord, error) {
var (
body strings.Builder
errs error
droppedRecords []plog.LogRecord
currentRecords []plog.LogRecord
)
for _, record := range s.logBuffer {
var formattedLine string
var err error
switch s.config.LogFormat {
case TextFormat:
formattedLine = s.logToText(record)
case JSONFormat:
formattedLine, err = s.logToJSON(record)
default:
err = errors.New("unexpected log format")
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, LogsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, LogsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// sendMetrics sends metrics in right format basing on the s.config.MetricFormat
func (s *sender) sendMetrics(ctx context.Context, flds fields) ([]metricPair, error) {
var (
body strings.Builder
errs error
droppedRecords []metricPair
currentRecords []metricPair
)
for _, record := range s.metricBuffer {
var formattedLine string
var err error
switch s.config.MetricFormat {
case PrometheusFormat:
formattedLine = s.prometheusFormatter.metric2String(record)
case Carbon2Format:
formattedLine = carbon2Metric2String(record)
case GraphiteFormat:
formattedLine = s.graphiteFormatter.metric2String(record)
default:
err = fmt.Errorf("unexpected metric format: %s", s.config.MetricFormat)
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, MetricsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, MetricsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// appendAndSend appends line to the request body that will be sent and sends
// the accumulated data if the internal logBuffer has been filled (with maxBufferSize elements).
// It returns appendResponse
func (s *sender) appendAndSend(
ctx context.Context,
line string,
pipeline PipelineType,
body *strings.Builder,
flds fields,
) (appendResponse, error) {
var errs error
ar := newAppendResponse()
if body.Len() > 0 && body.Len()+len(line) >= s.config.MaxRequestBodySize {
ar.sent = true
errs = multierr.Append(errs, s.send(ctx, pipeline, strings.NewReader(body.String()), flds))
body.Reset()
}
if body.Len() > 0 {
// Do not add newline if the body is empty
if _, err := body.WriteString("\n"); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
if ar.appended {
// Do not append new line if separator was not appended
if _, err := body.WriteString(line); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
return ar, errs
}
// cleanLogsBuffer zeroes logBuffer
func (s *sender) cleanLogsBuffer() {
s.logBuffer = (s.logBuffer)[:0]
}
// batchLog adds log to the logBuffer and flushes them if logBuffer is full to avoid overflow
// returns list of log records which were not sent successfully
func (s *sender) batchLog(ctx context.Context, log plog.LogRecord, metadata fields) ([]plog.LogRecord, error) {
s.logBuffer = append(s.logBuffer, log)
if s.countLogs() >= maxBufferSize {
dropped, err := s.sendLogs(ctx, metadata)
s.cleanLogsBuffer()
return dropped, err
}
return nil, nil
}
// countLogs returns number of logs in logBuffer
func (s *sender) countLogs() int {
return len(s.logBuffer)
}
// cleanMetricBuffer zeroes metricBuffer
func (s *sender) cleanMetricBuffer() {
s.metricBuffer = (s.metricBuffer)[:0]
}
// batchMetric adds metric to the metricBuffer and flushes them if metricBuffer is full to avoid overflow
// returns list of metric records which were not sent successfully
func (s *sender) batchMetric(ctx context.Context, metric metricPair, metadata fields) ([]metricPair, error) {
s.metricBuffer = append(s.metricBuffer, metric)
if s.countMetrics() >= maxBufferSize {
dropped, err := s.sendMetrics(ctx, metadata)
s.cleanMetricBuffer()
return dropped, err
}
return nil, nil
}
// countMetrics returns number of metrics in metricBuffer
func (s *sender) countMetrics() int {
return len(s.metricBuffer)
}
| logToText | identifier_name |
sender.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/multierr"
)
type appendResponse struct {
// sent gives information if the data was sent or not
sent bool
// appended keeps state of appending new log line to the body
appended bool
}
// metricPair represents information required to send one metric to the Sumo Logic
type metricPair struct {
attributes pcommon.Map
metric pmetric.Metric
}
type sender struct {
logBuffer []plog.LogRecord
metricBuffer []metricPair
config *Config
client *http.Client
filter filter
sources sourceFormats
compressor compressor
prometheusFormatter prometheusFormatter
graphiteFormatter graphiteFormatter
}
const (
logKey string = "log"
// maxBufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries)
maxBufferSize int = 1024 * 1024
headerContentType string = "Content-Type"
headerContentEncoding string = "Content-Encoding"
headerClient string = "X-Sumo-Client"
headerHost string = "X-Sumo-Host"
headerName string = "X-Sumo-Name"
headerCategory string = "X-Sumo-Category"
headerFields string = "X-Sumo-Fields"
contentTypeLogs string = "application/x-www-form-urlencoded"
contentTypePrometheus string = "application/vnd.sumologic.prometheus"
contentTypeCarbon2 string = "application/vnd.sumologic.carbon2"
contentTypeGraphite string = "application/vnd.sumologic.graphite"
contentEncodingGzip string = "gzip"
contentEncodingDeflate string = "deflate"
)
func newAppendResponse() appendResponse {
return appendResponse{
appended: true,
}
}
func newSender(
cfg *Config,
cl *http.Client,
f filter,
s sourceFormats,
c compressor,
pf prometheusFormatter,
gf graphiteFormatter,
) *sender {
return &sender{
config: cfg,
client: cl,
filter: f,
sources: s,
compressor: c,
prometheusFormatter: pf,
graphiteFormatter: gf,
}
}
// send sends data to sumologic
func (s *sender) send(ctx context.Context, pipeline PipelineType, body io.Reader, flds fields) error {
data, err := s.compressor.compress(body)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.config.HTTPClientSettings.Endpoint, data)
if err != nil {
return err
}
// Add headers
switch s.config.CompressEncoding {
case GZIPCompression:
req.Header.Set(headerContentEncoding, contentEncodingGzip)
case DeflateCompression:
req.Header.Set(headerContentEncoding, contentEncodingDeflate)
case NoCompression:
default:
return fmt.Errorf("invalid content encoding: %s", s.config.CompressEncoding)
}
req.Header.Add(headerClient, s.config.Client)
if s.sources.host.isSet() {
req.Header.Add(headerHost, s.sources.host.format(flds))
}
if s.sources.name.isSet() {
req.Header.Add(headerName, s.sources.name.format(flds))
}
if s.sources.category.isSet() {
req.Header.Add(headerCategory, s.sources.category.format(flds))
}
switch pipeline {
case LogsPipeline:
req.Header.Add(headerContentType, contentTypeLogs)
req.Header.Add(headerFields, flds.string())
case MetricsPipeline:
switch s.config.MetricFormat {
case PrometheusFormat:
req.Header.Add(headerContentType, contentTypePrometheus)
case Carbon2Format:
req.Header.Add(headerContentType, contentTypeCarbon2)
case GraphiteFormat:
req.Header.Add(headerContentType, contentTypeGraphite)
default:
return fmt.Errorf("unsupported metrics format: %s", s.config.MetricFormat)
}
default:
return errors.New("unexpected pipeline")
}
resp, err := s.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return fmt.Errorf("error during sending data: %s", resp.Status)
}
return nil
}
// logToText converts LogRecord to a plain text line, returns it and error eventually
func (s *sender) logToText(record plog.LogRecord) string {
return record.Body().AsString()
}
// logToJSON converts LogRecord to a json line, returns it and error eventually
func (s *sender) logToJSON(record plog.LogRecord) (string, error) {
data := s.filter.filterOut(record.Attributes())
record.Body().CopyTo(data.orig.PutEmpty(logKey))
nextLine, err := json.Marshal(data.orig.AsRaw())
if err != nil {
return "", err
}
return bytes.NewBuffer(nextLine).String(), nil
}
// sendLogs sends log records from the logBuffer formatted according
// to configured LogFormat and as the result of execution
// returns array of records which has not been sent correctly and error
func (s *sender) sendLogs(ctx context.Context, flds fields) ([]plog.LogRecord, error) {
var (
body strings.Builder
errs error
droppedRecords []plog.LogRecord
currentRecords []plog.LogRecord
)
for _, record := range s.logBuffer {
var formattedLine string
var err error
switch s.config.LogFormat {
case TextFormat:
formattedLine = s.logToText(record)
case JSONFormat:
formattedLine, err = s.logToJSON(record)
default:
err = errors.New("unexpected log format")
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, LogsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, LogsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
} | func (s *sender) sendMetrics(ctx context.Context, flds fields) ([]metricPair, error) {
var (
body strings.Builder
errs error
droppedRecords []metricPair
currentRecords []metricPair
)
for _, record := range s.metricBuffer {
var formattedLine string
var err error
switch s.config.MetricFormat {
case PrometheusFormat:
formattedLine = s.prometheusFormatter.metric2String(record)
case Carbon2Format:
formattedLine = carbon2Metric2String(record)
case GraphiteFormat:
formattedLine = s.graphiteFormatter.metric2String(record)
default:
err = fmt.Errorf("unexpected metric format: %s", s.config.MetricFormat)
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, MetricsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, MetricsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// appendAndSend appends line to the request body that will be sent and sends
// the accumulated data if the internal logBuffer has been filled (with maxBufferSize elements).
// It returns appendResponse
func (s *sender) appendAndSend(
ctx context.Context,
line string,
pipeline PipelineType,
body *strings.Builder,
flds fields,
) (appendResponse, error) {
var errs error
ar := newAppendResponse()
if body.Len() > 0 && body.Len()+len(line) >= s.config.MaxRequestBodySize {
ar.sent = true
errs = multierr.Append(errs, s.send(ctx, pipeline, strings.NewReader(body.String()), flds))
body.Reset()
}
if body.Len() > 0 {
// Do not add newline if the body is empty
if _, err := body.WriteString("\n"); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
if ar.appended {
// Do not append new line if separator was not appended
if _, err := body.WriteString(line); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
return ar, errs
}
// cleanLogsBuffer zeroes logBuffer
func (s *sender) cleanLogsBuffer() {
s.logBuffer = (s.logBuffer)[:0]
}
// batchLog adds log to the logBuffer and flushes them if logBuffer is full to avoid overflow
// returns list of log records which were not sent successfully
func (s *sender) batchLog(ctx context.Context, log plog.LogRecord, metadata fields) ([]plog.LogRecord, error) {
s.logBuffer = append(s.logBuffer, log)
if s.countLogs() >= maxBufferSize {
dropped, err := s.sendLogs(ctx, metadata)
s.cleanLogsBuffer()
return dropped, err
}
return nil, nil
}
// countLogs returns number of logs in logBuffer
func (s *sender) countLogs() int {
return len(s.logBuffer)
}
// cleanMetricBuffer zeroes metricBuffer
func (s *sender) cleanMetricBuffer() {
s.metricBuffer = (s.metricBuffer)[:0]
}
// batchMetric adds metric to the metricBuffer and flushes them if metricBuffer is full to avoid overflow
// returns list of metric records which were not sent successfully
func (s *sender) batchMetric(ctx context.Context, metric metricPair, metadata fields) ([]metricPair, error) {
s.metricBuffer = append(s.metricBuffer, metric)
if s.countMetrics() >= maxBufferSize {
dropped, err := s.sendMetrics(ctx, metadata)
s.cleanMetricBuffer()
return dropped, err
}
return nil, nil
}
// countMetrics returns number of metrics in metricBuffer
func (s *sender) countMetrics() int {
return len(s.metricBuffer)
} |
return droppedRecords, errs
}
// sendMetrics sends metrics in right format basing on the s.config.MetricFormat | random_line_split |
sender.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/multierr"
)
type appendResponse struct {
// sent gives information if the data was sent or not
sent bool
// appended keeps state of appending new log line to the body
appended bool
}
// metricPair represents information required to send one metric to the Sumo Logic
type metricPair struct {
attributes pcommon.Map
metric pmetric.Metric
}
type sender struct {
logBuffer []plog.LogRecord
metricBuffer []metricPair
config *Config
client *http.Client
filter filter
sources sourceFormats
compressor compressor
prometheusFormatter prometheusFormatter
graphiteFormatter graphiteFormatter
}
const (
logKey string = "log"
// maxBufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries)
maxBufferSize int = 1024 * 1024
headerContentType string = "Content-Type"
headerContentEncoding string = "Content-Encoding"
headerClient string = "X-Sumo-Client"
headerHost string = "X-Sumo-Host"
headerName string = "X-Sumo-Name"
headerCategory string = "X-Sumo-Category"
headerFields string = "X-Sumo-Fields"
contentTypeLogs string = "application/x-www-form-urlencoded"
contentTypePrometheus string = "application/vnd.sumologic.prometheus"
contentTypeCarbon2 string = "application/vnd.sumologic.carbon2"
contentTypeGraphite string = "application/vnd.sumologic.graphite"
contentEncodingGzip string = "gzip"
contentEncodingDeflate string = "deflate"
)
func newAppendResponse() appendResponse {
return appendResponse{
appended: true,
}
}
func newSender(
cfg *Config,
cl *http.Client,
f filter,
s sourceFormats,
c compressor,
pf prometheusFormatter,
gf graphiteFormatter,
) *sender {
return &sender{
config: cfg,
client: cl,
filter: f,
sources: s,
compressor: c,
prometheusFormatter: pf,
graphiteFormatter: gf,
}
}
// send sends data to sumologic
func (s *sender) send(ctx context.Context, pipeline PipelineType, body io.Reader, flds fields) error {
data, err := s.compressor.compress(body)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.config.HTTPClientSettings.Endpoint, data)
if err != nil {
return err
}
// Add headers
switch s.config.CompressEncoding {
case GZIPCompression:
req.Header.Set(headerContentEncoding, contentEncodingGzip)
case DeflateCompression:
req.Header.Set(headerContentEncoding, contentEncodingDeflate)
case NoCompression:
default:
return fmt.Errorf("invalid content encoding: %s", s.config.CompressEncoding)
}
req.Header.Add(headerClient, s.config.Client)
if s.sources.host.isSet() {
req.Header.Add(headerHost, s.sources.host.format(flds))
}
if s.sources.name.isSet() {
req.Header.Add(headerName, s.sources.name.format(flds))
}
if s.sources.category.isSet() {
req.Header.Add(headerCategory, s.sources.category.format(flds))
}
switch pipeline {
case LogsPipeline:
req.Header.Add(headerContentType, contentTypeLogs)
req.Header.Add(headerFields, flds.string())
case MetricsPipeline:
switch s.config.MetricFormat {
case PrometheusFormat:
req.Header.Add(headerContentType, contentTypePrometheus)
case Carbon2Format:
req.Header.Add(headerContentType, contentTypeCarbon2)
case GraphiteFormat:
req.Header.Add(headerContentType, contentTypeGraphite)
default:
return fmt.Errorf("unsupported metrics format: %s", s.config.MetricFormat)
}
default:
return errors.New("unexpected pipeline")
}
resp, err := s.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return fmt.Errorf("error during sending data: %s", resp.Status)
}
return nil
}
// logToText converts LogRecord to a plain text line, returns it and error eventually
func (s *sender) logToText(record plog.LogRecord) string {
return record.Body().AsString()
}
// logToJSON converts LogRecord to a json line, returns it and error eventually
func (s *sender) logToJSON(record plog.LogRecord) (string, error) {
data := s.filter.filterOut(record.Attributes())
record.Body().CopyTo(data.orig.PutEmpty(logKey))
nextLine, err := json.Marshal(data.orig.AsRaw())
if err != nil {
return "", err
}
return bytes.NewBuffer(nextLine).String(), nil
}
// sendLogs sends log records from the logBuffer formatted according
// to configured LogFormat and as the result of execution
// returns array of records which has not been sent correctly and error
func (s *sender) sendLogs(ctx context.Context, flds fields) ([]plog.LogRecord, error) {
var (
body strings.Builder
errs error
droppedRecords []plog.LogRecord
currentRecords []plog.LogRecord
)
for _, record := range s.logBuffer {
var formattedLine string
var err error
switch s.config.LogFormat {
case TextFormat:
formattedLine = s.logToText(record)
case JSONFormat:
formattedLine, err = s.logToJSON(record)
default:
err = errors.New("unexpected log format")
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, LogsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, LogsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// sendMetrics sends metrics in right format basing on the s.config.MetricFormat
func (s *sender) sendMetrics(ctx context.Context, flds fields) ([]metricPair, error) {
var (
body strings.Builder
errs error
droppedRecords []metricPair
currentRecords []metricPair
)
for _, record := range s.metricBuffer {
var formattedLine string
var err error
switch s.config.MetricFormat {
case PrometheusFormat:
formattedLine = s.prometheusFormatter.metric2String(record)
case Carbon2Format:
formattedLine = carbon2Metric2String(record)
case GraphiteFormat:
formattedLine = s.graphiteFormatter.metric2String(record)
default:
err = fmt.Errorf("unexpected metric format: %s", s.config.MetricFormat)
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, MetricsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, MetricsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// appendAndSend appends line to the request body that will be sent and sends
// the accumulated data if the internal logBuffer has been filled (with maxBufferSize elements).
// It returns appendResponse
func (s *sender) appendAndSend(
ctx context.Context,
line string,
pipeline PipelineType,
body *strings.Builder,
flds fields,
) (appendResponse, error) {
var errs error
ar := newAppendResponse()
if body.Len() > 0 && body.Len()+len(line) >= s.config.MaxRequestBodySize {
ar.sent = true
errs = multierr.Append(errs, s.send(ctx, pipeline, strings.NewReader(body.String()), flds))
body.Reset()
}
if body.Len() > 0 {
// Do not add newline if the body is empty
if _, err := body.WriteString("\n"); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
if ar.appended {
// Do not append new line if separator was not appended
if _, err := body.WriteString(line); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
return ar, errs
}
// cleanLogsBuffer zeroes logBuffer
func (s *sender) cleanLogsBuffer() |
// batchLog adds log to the logBuffer and flushes them if logBuffer is full to avoid overflow
// returns list of log records which were not sent successfully
func (s *sender) batchLog(ctx context.Context, log plog.LogRecord, metadata fields) ([]plog.LogRecord, error) {
s.logBuffer = append(s.logBuffer, log)
if s.countLogs() >= maxBufferSize {
dropped, err := s.sendLogs(ctx, metadata)
s.cleanLogsBuffer()
return dropped, err
}
return nil, nil
}
// countLogs returns number of logs in logBuffer
func (s *sender) countLogs() int {
return len(s.logBuffer)
}
// cleanMetricBuffer zeroes metricBuffer
func (s *sender) cleanMetricBuffer() {
s.metricBuffer = (s.metricBuffer)[:0]
}
// batchMetric adds metric to the metricBuffer and flushes them if metricBuffer is full to avoid overflow
// returns list of metric records which were not sent successfully
func (s *sender) batchMetric(ctx context.Context, metric metricPair, metadata fields) ([]metricPair, error) {
s.metricBuffer = append(s.metricBuffer, metric)
if s.countMetrics() >= maxBufferSize {
dropped, err := s.sendMetrics(ctx, metadata)
s.cleanMetricBuffer()
return dropped, err
}
return nil, nil
}
// countMetrics returns number of metrics in metricBuffer
func (s *sender) countMetrics() int {
return len(s.metricBuffer)
}
| {
s.logBuffer = (s.logBuffer)[:0]
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.