file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
livestream.rs | use std::collections::{HashMap, BTreeMap};
use tokio::sync::mpsc::*;
use std::{thread, fs};
use crate::camera::CameraProvider;
use std::sync::Arc;
use std::cell::RefCell;
use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::mpsc::error::ErrorKind;
use std::io::Write;
use std::sync::mpsc as bchan;
pub type VideoFrame=(Vec<u8>, usize, usize, usize);
use crate::inference_engine::{start_inference_service, InfererHandler};
use crate::time_now;
// 10 Frames as a batch.
pub struct VideoBatchContent{
pub data: Vec<u8>,
pub sizes: [usize; 10],
pub capture_timestamps: [usize; 10],
pub infer_timestamps: [usize; 10],
pub infer_results: [usize; 10]
}
pub struct MutableVideoBatchContent{
pub data: Vec<u8>,
pub sizes: [usize; 10],
pub capture_timestamps: [usize; 10]
}
pub type VideoBatch=Arc<VideoBatchContent>;
pub type MutableVideoBatch=Box<MutableVideoBatchContent>;
pub enum IncomingMessage{
CameraShot(VideoBatch),
FrameReq(usize, usize),
ClientJoin(Sender<OutcomingMessage>),
ClientQuit(usize),
QueryInfo(usize)
}
pub struct StreamInfo{
pub current_range: (usize, usize),
pub h264_header: Arc<Vec<u8>>
}
pub enum OutcomingMessage{
FrameArrive(Result<VideoBatch, (usize, usize)>),
ClientID(usize),
CurrentInfo(StreamInfo)
}
// A simple single-threaded ring buffer.
pub struct RingBuffer<T: Clone>{
data: Vec<Option<T>>,
size: usize,
start: usize,
end: usize,
offset: usize, // real index of offset
next_index: usize // real index of end
}
impl<T:Clone> RingBuffer<T>{
pub fn new(size: usize)->RingBuffer<T>{
assert!(size>1);
let mut v=Vec::new();
for i in 0..size{
v.push(None);
}
RingBuffer{
data: v,
size,
start: 0,
end: 0,
offset: 0,
next_index: 0
}
}
pub fn info(&self){
println!("<RingBuffer size={}, start={}, end={}, offset={}, next_index={}>", self.size, self.start, self.end, self.offset, self.next_index);
}
pub fn fetch(&mut self, index: usize)->Option<T>{
//println!("fetching frame {} from [{}, {})", index, self.offset, self.next_index);
if index<self.offset || index>=self.next_index{
return None;
}
let mut idx=index-self.offset+self.start;
if idx>=self.size{
idx-=self.size;
}
Some(self.data[idx].as_ref().unwrap().clone())
}
pub fn push(&mut self, value: T){
let index=self.next_index;
self.next_index=index+1;
self.data[self.end]=Some(value);
self.end+=1;
if self.end>=self.size{
self.end-=self.size;
}
if self.end==self.start{ // The ring-buffer is full. Push start ahead.
self.start+=1;
if self.start>=self.size{
self.start-=self.size;
}
self.offset+=1;
}
}
pub fn current_range(&self)->(usize, usize){
(self.offset, self.next_index)
}
pub fn fetch_with_err(&mut self, index: usize)->Result<T, (usize, usize)>{
match self.fetch(index){
Some(x)=>Ok(x),
None=>Err(self.current_range())
}
}
}
pub struct LiveStream{
next_client_id: usize,
clients: BTreeMap<usize, Sender<OutcomingMessage>>,
cached_frames: RingBuffer<VideoBatch>,
channel: (Sender<IncomingMessage>, Receiver<IncomingMessage>),
first_frame: Option<Arc<Vec<u8>>>
}
impl LiveStream{
pub fn | ()->Self{
LiveStream{
next_client_id: 0,
clients: BTreeMap::new(),
cached_frames: RingBuffer::new(20),
channel: channel(5),
first_frame: None
}
}
pub fn get_sender(&self)->Sender<IncomingMessage>{
self.channel.0.clone()
}
pub fn start(mut self, mut camera: Box<CameraProvider>, mut inferer: Box<InfererHandler>, runtime: &mut tokio::runtime::Runtime)->Sender<IncomingMessage>{
let mut sender=self.get_sender();
let ret=sender.clone();
println!("Taking first frame");
//let mut camera=camera.take().unwrap();
self.first_frame=Some(camera.h264_header());
//let mut inferer=inferer.take().unwrap();
// Start camera thread.
std::thread::spawn(move ||{
let mut i:usize=0;
use std::time::Instant;
let mut now = Instant::now();
loop {
//println!("camera {}", i);
i=i+1;
let msg=Box::new({
let mut buffer=Vec::new();
buffer.reserve(640*480*3*10);
let mut timestamps=[0 as usize; 10];
let mut old_size=0;
let mut sizes=[0; 10];
for i in 0..=9{
camera.capture_zerocopy(&mut buffer).unwrap();
timestamps[i]=time_now();
sizes[i]=buffer.len()-old_size;
old_size=buffer.len();
}
MutableVideoBatchContent{data: buffer, sizes, capture_timestamps: timestamps}
});
/*
let mut msg= ({
let mut data: [std::mem::MaybeUninit<Option<(Vec<u8>, usize)>>; 10] = unsafe {
std::mem::MaybeUninit::uninit().assume_init()
};
for elem in &mut data[..] {
unsafe { std::ptr::write(elem.as_mut_ptr(), Some({
let pic=camera.capture().unwrap();
let stamp=time_now();
(pic, stamp)
})); }
}
let batch=unsafe { std::mem::transmute::<_, [Option<(Vec<u8>, usize)>; 10]>(data) };
//let mut file = fs::File::create(&format!("frame-{}.264", i)).unwrap();
//for i in batch.iter(){
// file.write_all(&i.0).unwrap();
//}
batch
});
*/
//println!("sending to inferer");
inferer.send(msg).unwrap();
//println!("sent");
/*
loop {
let ret=sender.try_send(msg);
match ret{
Ok(())=>{
break;
}
Err(TrySendError{kind: ErrorKind::NoCapacity, value:p})=>{
msg=p;
}
Err(TrySendError{kind: ErrorKind::Closed, value:p})=>{
panic!("Closed!");
}
}
}
*/
if i%2==0{
let elapsed = now.elapsed();
let sec = (elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1000_000_000.0);
println!("i={} sec={} FPS={}", i*10, sec, 20.0/sec);
now = Instant::now();
}
//std::thread::sleep(std::time::Duration::new(1, 0));
}
});
// Start tokio coroutine
runtime.spawn (async move {
loop{
let msg=self.channel.1.recv().await.unwrap();
self.handle_message(msg).await;
}
});
return ret;
}
pub async fn handle_message(&mut self, msg: IncomingMessage){
match msg{
IncomingMessage::CameraShot(video)=>{
self.cached_frames.push(video);
//self.cached_frames.info();
}
IncomingMessage::FrameReq(client_id, frame_id)=>{
let sender=self.clients.get(&client_id).unwrap();
sender.clone().send(OutcomingMessage::FrameArrive(self.cached_frames.fetch_with_err(frame_id))).await.ok().unwrap();
}
IncomingMessage::ClientJoin(sender)=>{
let id=self.next_client_id;
self.next_client_id+=1;
sender.clone().send(OutcomingMessage::ClientID(id)).await.ok().unwrap();
self.clients.insert(id, sender.clone());
}
IncomingMessage::ClientQuit(client_id)=>{
self.clients.remove(&client_id);
}
IncomingMessage::QueryInfo(client_id)=>{
let sender=self.clients.get(&client_id).unwrap();
sender.clone().send(OutcomingMessage::CurrentInfo(StreamInfo{
current_range: self.cached_frames.current_range(),
h264_header: Arc::clone(&self.first_frame.as_ref().unwrap())
})).await.ok().unwrap();
}
}
}
}
pub struct LiveStreamClient{
index: usize,
stream: Sender<IncomingMessage>,
receiver: Receiver<OutcomingMessage>
}
impl LiveStreamClient{
pub async fn connect(stream: Sender<IncomingMessage>)->LiveStreamClient{
let (tx, mut rx)=channel(5);
stream.clone().send(IncomingMessage::ClientJoin(tx)).await.ok().unwrap();
match rx.recv().await.unwrap() {
OutcomingMessage::ClientID(index)=>{
LiveStreamClient{
index,
stream,
receiver: rx
}
}
_=>unreachable!()
}
}
pub async fn stream_info(&mut self)->StreamInfo{
self.stream.clone().send(IncomingMessage::QueryInfo(self.index)).await.ok().unwrap();
match self.receiver.recv().await.unwrap(){
OutcomingMessage::CurrentInfo(info)=>{
info
}
_=>unreachable!()
}
}
pub async fn request_batch(&mut self, index: usize)->Result<VideoBatch, (usize, usize)>{
self.stream.clone().send(IncomingMessage::FrameReq(self.index, index)).await.ok().unwrap();
match self.receiver.recv().await.unwrap(){
OutcomingMessage::FrameArrive(info)=>{
info
}
_=>unreachable!()
}
}
pub async fn destroy(&mut self){
self.stream.clone().send(IncomingMessage::ClientQuit(self.index)).await.ok().unwrap();
}
} | new | identifier_name |
livestream.rs | use std::collections::{HashMap, BTreeMap};
use tokio::sync::mpsc::*;
use std::{thread, fs};
use crate::camera::CameraProvider;
use std::sync::Arc;
use std::cell::RefCell;
use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::mpsc::error::ErrorKind;
use std::io::Write;
use std::sync::mpsc as bchan;
pub type VideoFrame=(Vec<u8>, usize, usize, usize);
use crate::inference_engine::{start_inference_service, InfererHandler};
use crate::time_now;
// 10 Frames as a batch.
pub struct VideoBatchContent{
pub data: Vec<u8>,
pub sizes: [usize; 10],
pub capture_timestamps: [usize; 10],
pub infer_timestamps: [usize; 10],
pub infer_results: [usize; 10]
}
pub struct MutableVideoBatchContent{
pub data: Vec<u8>,
pub sizes: [usize; 10],
pub capture_timestamps: [usize; 10]
}
pub type VideoBatch=Arc<VideoBatchContent>;
pub type MutableVideoBatch=Box<MutableVideoBatchContent>;
pub enum IncomingMessage{
CameraShot(VideoBatch),
FrameReq(usize, usize),
ClientJoin(Sender<OutcomingMessage>),
ClientQuit(usize),
QueryInfo(usize)
}
pub struct StreamInfo{
pub current_range: (usize, usize),
pub h264_header: Arc<Vec<u8>>
}
pub enum OutcomingMessage{
FrameArrive(Result<VideoBatch, (usize, usize)>),
ClientID(usize),
CurrentInfo(StreamInfo)
}
// A simple single-threaded ring buffer.
pub struct RingBuffer<T: Clone>{
data: Vec<Option<T>>,
size: usize,
start: usize,
end: usize,
offset: usize, // real index of offset
next_index: usize // real index of end
}
impl<T:Clone> RingBuffer<T>{
pub fn new(size: usize)->RingBuffer<T>{
assert!(size>1);
let mut v=Vec::new();
for i in 0..size{
| data: v,
size,
start: 0,
end: 0,
offset: 0,
next_index: 0
}
}
pub fn info(&self){
println!("<RingBuffer size={}, start={}, end={}, offset={}, next_index={}>", self.size, self.start, self.end, self.offset, self.next_index);
}
pub fn fetch(&mut self, index: usize)->Option<T>{
//println!("fetching frame {} from [{}, {})", index, self.offset, self.next_index);
if index<self.offset || index>=self.next_index{
return None;
}
let mut idx=index-self.offset+self.start;
if idx>=self.size{
idx-=self.size;
}
Some(self.data[idx].as_ref().unwrap().clone())
}
pub fn push(&mut self, value: T){
let index=self.next_index;
self.next_index=index+1;
self.data[self.end]=Some(value);
self.end+=1;
if self.end>=self.size{
self.end-=self.size;
}
if self.end==self.start{ // The ring-buffer is full. Push start ahead.
self.start+=1;
if self.start>=self.size{
self.start-=self.size;
}
self.offset+=1;
}
}
pub fn current_range(&self)->(usize, usize){
(self.offset, self.next_index)
}
pub fn fetch_with_err(&mut self, index: usize)->Result<T, (usize, usize)>{
match self.fetch(index){
Some(x)=>Ok(x),
None=>Err(self.current_range())
}
}
}
pub struct LiveStream{
next_client_id: usize,
clients: BTreeMap<usize, Sender<OutcomingMessage>>,
cached_frames: RingBuffer<VideoBatch>,
channel: (Sender<IncomingMessage>, Receiver<IncomingMessage>),
first_frame: Option<Arc<Vec<u8>>>
}
impl LiveStream{
pub fn new()->Self{
LiveStream{
next_client_id: 0,
clients: BTreeMap::new(),
cached_frames: RingBuffer::new(20),
channel: channel(5),
first_frame: None
}
}
pub fn get_sender(&self)->Sender<IncomingMessage>{
self.channel.0.clone()
}
pub fn start(mut self, mut camera: Box<CameraProvider>, mut inferer: Box<InfererHandler>, runtime: &mut tokio::runtime::Runtime)->Sender<IncomingMessage>{
let mut sender=self.get_sender();
let ret=sender.clone();
println!("Taking first frame");
//let mut camera=camera.take().unwrap();
self.first_frame=Some(camera.h264_header());
//let mut inferer=inferer.take().unwrap();
// Start camera thread.
std::thread::spawn(move ||{
let mut i:usize=0;
use std::time::Instant;
let mut now = Instant::now();
loop {
//println!("camera {}", i);
i=i+1;
let msg=Box::new({
let mut buffer=Vec::new();
buffer.reserve(640*480*3*10);
let mut timestamps=[0 as usize; 10];
let mut old_size=0;
let mut sizes=[0; 10];
for i in 0..=9{
camera.capture_zerocopy(&mut buffer).unwrap();
timestamps[i]=time_now();
sizes[i]=buffer.len()-old_size;
old_size=buffer.len();
}
MutableVideoBatchContent{data: buffer, sizes, capture_timestamps: timestamps}
});
/*
let mut msg= ({
let mut data: [std::mem::MaybeUninit<Option<(Vec<u8>, usize)>>; 10] = unsafe {
std::mem::MaybeUninit::uninit().assume_init()
};
for elem in &mut data[..] {
unsafe { std::ptr::write(elem.as_mut_ptr(), Some({
let pic=camera.capture().unwrap();
let stamp=time_now();
(pic, stamp)
})); }
}
let batch=unsafe { std::mem::transmute::<_, [Option<(Vec<u8>, usize)>; 10]>(data) };
//let mut file = fs::File::create(&format!("frame-{}.264", i)).unwrap();
//for i in batch.iter(){
// file.write_all(&i.0).unwrap();
//}
batch
});
*/
//println!("sending to inferer");
inferer.send(msg).unwrap();
//println!("sent");
/*
loop {
let ret=sender.try_send(msg);
match ret{
Ok(())=>{
break;
}
Err(TrySendError{kind: ErrorKind::NoCapacity, value:p})=>{
msg=p;
}
Err(TrySendError{kind: ErrorKind::Closed, value:p})=>{
panic!("Closed!");
}
}
}
*/
if i%2==0{
let elapsed = now.elapsed();
let sec = (elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1000_000_000.0);
println!("i={} sec={} FPS={}", i*10, sec, 20.0/sec);
now = Instant::now();
}
//std::thread::sleep(std::time::Duration::new(1, 0));
}
});
// Start tokio coroutine
runtime.spawn (async move {
loop{
let msg=self.channel.1.recv().await.unwrap();
self.handle_message(msg).await;
}
});
return ret;
}
pub async fn handle_message(&mut self, msg: IncomingMessage){
match msg{
IncomingMessage::CameraShot(video)=>{
self.cached_frames.push(video);
//self.cached_frames.info();
}
IncomingMessage::FrameReq(client_id, frame_id)=>{
let sender=self.clients.get(&client_id).unwrap();
sender.clone().send(OutcomingMessage::FrameArrive(self.cached_frames.fetch_with_err(frame_id))).await.ok().unwrap();
}
IncomingMessage::ClientJoin(sender)=>{
let id=self.next_client_id;
self.next_client_id+=1;
sender.clone().send(OutcomingMessage::ClientID(id)).await.ok().unwrap();
self.clients.insert(id, sender.clone());
}
IncomingMessage::ClientQuit(client_id)=>{
self.clients.remove(&client_id);
}
IncomingMessage::QueryInfo(client_id)=>{
let sender=self.clients.get(&client_id).unwrap();
sender.clone().send(OutcomingMessage::CurrentInfo(StreamInfo{
current_range: self.cached_frames.current_range(),
h264_header: Arc::clone(&self.first_frame.as_ref().unwrap())
})).await.ok().unwrap();
}
}
}
}
pub struct LiveStreamClient{
index: usize,
stream: Sender<IncomingMessage>,
receiver: Receiver<OutcomingMessage>
}
impl LiveStreamClient{
pub async fn connect(stream: Sender<IncomingMessage>)->LiveStreamClient{
let (tx, mut rx)=channel(5);
stream.clone().send(IncomingMessage::ClientJoin(tx)).await.ok().unwrap();
match rx.recv().await.unwrap() {
OutcomingMessage::ClientID(index)=>{
LiveStreamClient{
index,
stream,
receiver: rx
}
}
_=>unreachable!()
}
}
pub async fn stream_info(&mut self)->StreamInfo{
self.stream.clone().send(IncomingMessage::QueryInfo(self.index)).await.ok().unwrap();
match self.receiver.recv().await.unwrap(){
OutcomingMessage::CurrentInfo(info)=>{
info
}
_=>unreachable!()
}
}
pub async fn request_batch(&mut self, index: usize)->Result<VideoBatch, (usize, usize)>{
self.stream.clone().send(IncomingMessage::FrameReq(self.index, index)).await.ok().unwrap();
match self.receiver.recv().await.unwrap(){
OutcomingMessage::FrameArrive(info)=>{
info
}
_=>unreachable!()
}
}
pub async fn destroy(&mut self){
self.stream.clone().send(IncomingMessage::ClientQuit(self.index)).await.ok().unwrap();
}
} | v.push(None);
}
RingBuffer{
| random_line_split |
create_tumor_dataset.py | import argparse
import dicom
import nibabel as nib
import numpy as np
import os
import pickle
from matplotlib import pyplot as plt
"""
The goal of this code is to loop through all the patients and show their PET images
(plots will only appear if line ~247, with function 'plot_pet_volume' is not commented)
and their respective MTV shapes. See main (line ~253) to see how this happens. It should
have plenty of comments. Finally the code will save a file volumes.pkl with the full volumes
and MTV shapes. This .pkl file will be read by 'parse_volumes_dataset.py' to generate the
final numpy dataset.
"""
def | (pet_image, pixel_shape, pixel_spacing, mask=None, patient="?", mask_name="?"):
"""
The transparent option makes all zeros transparent, and all ones red (expects image with only
1s and 0s)
"""
# create axis for plotting
pixel_shape = pet_image.shape
x = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[1], pixel_spacing[1])
# z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if mask is not None:
masked_pet_image = np.ma.masked_array(pet_image, mask)
# normalize values
vmin = np.min(pet_image)
vmax = np.max(pet_image)
cmap = plt.cm.gray
cmap.set_bad('r', 1)
i = 0
while i < pet_image.shape[2]:
# show images
fig_num = 0
fig = plt.figure(fig_num)
plt.clf()
plt.pcolormesh(x, y, pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('y')
plt.ylabel('x')
title = "Patient: {} - Slice: {}/{}".format(patient, i + 1, pet_image.shape[2])
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
if mask is not None:
input("Press ENTER to reveal contour. ")
fig = plt.figure(fig_num)
plt.pcolormesh(x, y, masked_pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap,
rasterized=True, linewidth=0)
title += " - Contour Name: {}".format(mask_name)
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
c = input("ENTER=continue, Q=quit, M=median, R=repeat, P=previous, N=start over. ")
if c.startswith("q"):
break
elif c.startswith("m"):
i = int(pet_image.shape[2] / 2) - 1
elif c.startswith("r"):
i -= 1
elif c.startswith("p"):
i -= 2
if i < -1:
i = -1
elif c.startswith("n"):
i = -1
i += 1
def plot_pet_medians(pet_image, pixel_spacing, mask, patient="?", mask_name="?",
median=0, fig_num=0):
"""
Plot pet_medians and project mask. median can be 0, 1 or 2
"""
# create axis for plotting
pixel_shape = pet_image.shape
x = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[1], pixel_spacing[1])
z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if median == 2:
x, y = x, y
median_pet_image = pet_image[:, :, int(pet_image.shape[2] / 2)]
projected_mask = mask[:, :, 0]
for i in range(mask.shape[2]):
projected_mask += mask[:, :, i]
elif median == 1:
x, y = y, z
median_pet_image = pet_image[:, int(pet_image.shape[1] / 2), :]
projected_mask = mask[:, 0, :]
for i in range(mask.shape[1]):
projected_mask += mask[:, i, :]
elif median == 0:
x, y = x, z
median_pet_image = pet_image[int(pet_image.shape[0] / 2), :, :]
projected_mask = mask[0, :, :]
for i in range(mask.shape[0]):
projected_mask += mask[i, :, :]
print(median_pet_image.shape)
masked_pet_image = np.ma.masked_array(median_pet_image, projected_mask)
if median == 0 or median == 1:
masked_pet_image = np.rot90(masked_pet_image)
median_pet_image = np.rot90(median_pet_image)
# normalize values
vmin = np.min(pet_image)
vmax = np.max(pet_image)
cmap = plt.cm.gray
cmap.set_bad('r', 1)
# show images
fig = plt.figure(fig_num)
plt.clf()
ax = fig.add_subplot(121)
ax.pcolormesh(x, y, median_pet_image, vmin=vmin, vmax=vmax, cmap=cmap)
ax.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax = fig.add_subplot(122)
ax.pcolormesh(x, y, masked_pet_image, vmin=vmin, vmax=vmax, cmap=cmap,
rasterized=True, linewidth=0)
ax.set_aspect('equal')
plt.xticks([])
plt.yticks([])
title = "Patient: {} - Slice: {}/{}".format(patient, i + 1, pet_image.shape[2])
title += " - Contour Name: {}".format(mask_name)
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
def plot_pet_image(pet_image, yz_slice_pos, xz_slice_pos, xy_slice_pos, pixel_shape,
pixel_spacing, mask=None):
"""
The transparent option makes all zeros transparent, and all ones red (expects image with only
1s and 0s)
"""
# create axis for plotting
x = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[1], pixel_spacing[1])
z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if mask is not None:
pet_image = np.ma.masked_array(pet_image, mask)
# create slices that will be shown
yz_slice = pet_image[yz_slice_pos, :, :]
xz_slice = pet_image[:, xz_slice_pos, :]
xy_slice = pet_image[:, :, xy_slice_pos]
vmin = min(np.min(yz_slice), np.min(xz_slice), np.min(xy_slice))
vmax = max(np.max(yz_slice), np.max(xz_slice), np.max(xy_slice))
yz_slice = np.rot90(yz_slice)
xz_slice = np.fliplr(np.rot90(xz_slice))
# normalize values
vmin = min(np.min(yz_slice), np.min(xz_slice), np.min(xy_slice))
vmax = max(np.max(yz_slice), np.max(xz_slice), np.max(xy_slice))
cmap = plt.cm.gray
cmap.set_bad('r', 1)
# show images
plt.figure(0)
plt.clf()
plt.subplot(221)
plt.pcolormesh(y, z, yz_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.ylabel('z')
plt.subplot(222)
plt.pcolormesh(x, z, xz_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('x')
plt.subplot(223)
plt.pcolormesh(x, y, xy_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('y')
plt.ylabel('x')
plt.subplot(224)
plt.axis([0, 5, 0, 4.5])
plt.axis('off')
plt.text(1, 3, "x: {:.4f}".format(yz_slice_pos * pixel_spacing[0]), fontsize=15)
plt.text(1, 2, "y: {:.4f}".format(xz_slice_pos * pixel_spacing[1]), fontsize=15)
plt.text(1, 1, "z: {:.4f}".format(xy_slice_pos * pixel_spacing[2]), fontsize=15)
return vmin, vmax
def find_centroid(image, discretize=False):
# finds centroid of 2D or 3D image
if len(image.shape) == 2:
w, h = image.shape
cumulative = 0
centroid = [0, 0]
for x in range(w):
for y in range(h):
centroid[0] += image[x, y] * x
centroid[1] += image[x, y] * y
cumulative += image[x, y]
centroid = centroid[0] / cumulative, centroid[1] / cumulative
if discretize:
centroid = tuple([np.round(c) for c in centroid])
return centroid
elif len(image.shape) == 3:
w, h, d = image.shape
cumulative = 0
centroid = [0, 0, 0]
for x in range(w):
for y in range(h):
for z in range(d):
centroid[0] += image[x, y, z] * x
centroid[1] += image[x, y, z] * y
centroid[2] += image[x, y, z] * z
cumulative += image[x, y, z]
centroid = centroid[0] / cumulative, centroid[1] / cumulative, centroid[2] / cumulative
if discretize:
centroid = tuple([np.round(c) for c in centroid])
return centroid
return None
def get_pet_location(patient, options):
"""
This function holds the exceptional patients: those that have a weird number of PT folders and
have to be specified manually
"""
patient_dictionary = {
"11111774": "FoR_008/Series_001_PT_001",
"11102077": "FoR_005/Series_002_PT_001",
"20100039": "FoR_005/Series_004_PT_001",
"20100052": "FoR_005/Series_004_PT_001",
"20090735": "FoR_002/Series_001_PT_001",
"11112002": "FoR_006/Series_002_PT_001",
"11110941": "FoR_002/Series_001_PT_001",
"20092802": "FoR_006/Series_001_PT_001"
}
if patient not in patient_dictionary:
return None
for op in options:
if op.endswith(patient_dictionary[patient]):
return op
print("Problem found in the dictionary, ignoring patient")
return None
def get_volumes(patient, pet_folder, struct_folders, number, volumes, plot_data=False):
"""
volumes is where the function writes the volumes found
it is a dictionary, where keys are the names of the patients, and each value is a list
where the first element is always the original 3D PET image, and the following are the
contours of the volumes. Every contour is a dict with 4 fields: a mask (3D map of 1s and 0s),
the contour label, a range (the 2 3D position of the opposite corners of the tumor box)
and the folder where the contour was found.
"""
print("--------------------------------------------------------------------------------------")
print("Patient {:02d}: {}".format(number, patient))
# get all dicom image's paths
dicom_images = [pet_folder+"/"+f for f in os.listdir(pet_folder) if f.lower().endswith(".dcm")]
dicom_images.sort()
# get information from dicom header
dicom_info = dicom.read_file(dicom_images[0])
pixel_shape = (int(dicom_info.Rows), int(dicom_info.Columns), int(dicom_info.NumberOfSlices))
pixel_spacing = (float(dicom_info.PixelSpacing[0]), float(dicom_info.PixelSpacing[1]),
float(dicom_info.SliceThickness))
print(" Pixel spacing: {}".format(pixel_spacing))
# create 3D array for pet image
pet_image = np.zeros(pixel_shape, dtype=dicom_info.pixel_array.dtype)
for i, dicom_img in enumerate(dicom_images):
ds = dicom.read_file(dicom_img)
pet_image[:, :, i] = ds.pixel_array
# create contours structure
mtv_variables = []
for struct_folder in struct_folders:
# extract contours labels and index from lvol.txt
lvoltxt_file = struct_folder + "/lvol.txt"
with open(lvoltxt_file) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if ("mtv" in line.lower() and ("cervix" in line.lower() or "tumor" in line.lower()) and
"nodal" not in line.lower() and "nodes" not in line.lower() and
"ring" not in line.lower() and "opt" not in line.lower()):
struct = line.strip().split("|")
mtv_variables.append((int(struct[0]), struct[-1], struct_folder))
# return nothing if no mtv contours were found
if len(mtv_variables) == 0:
return [], volumes, []
# add contours to original image and plot it
prev_folder = None
patient_volumes = [pet_image]
print(" Possible MTV contours:")
for mtv_idx, mtv_label, mtv_folder in mtv_variables:
# read and transform data from nii file
if prev_folder != mtv_folder:
# only read mtv_folder if it has changed
nii_obj = nib.load(mtv_folder + "/lvol.nii")
nii_data = nii_obj.get_data()
volume = np.zeros(nii_data.shape[:3], dtype=int)
for i in range(nii_data.shape[-1]):
volume += nii_data[:, :, :, 0, i] << (8 * i)
volume = np.swapaxes(volume, 0, 1)
volume = np.flip(volume, 2)
print(" * Structures folder: {}".format(mtv_folder.split("/")[-1]))
print(" MTV_index:", mtv_idx)
print(" MTV_label:", mtv_label.split("/")[-1])
prev_folder = mtv_folder
# create 3D matrix with 1s where ROI is and 0s everwhere else
try:
tumor_volume = (np.bitwise_and(volume, 2 ** mtv_idx) > 0) * 1
except TypeError:
print("Error while reading volume for index: {}, label: {}!".format(mtv_idx,
mtv_label))
patient_volumes.append(())
continue
# find bounding box for volume
mask_range = [[pixel_shape[0], pixel_shape[1], pixel_shape[2]], [-1, -1, -1]]
tumor_exists = False
for xx in range(pixel_shape[0]):
for yy in range(pixel_shape[1]):
for zz in range(pixel_shape[2]):
if tumor_volume[xx, yy, zz]:
tumor_exists = True
mask_range[0][0] = min(mask_range[0][0], xx)
mask_range[0][1] = min(mask_range[0][1], yy)
mask_range[0][2] = min(mask_range[0][2], zz)
mask_range[1][0] = max(mask_range[1][0], xx)
mask_range[1][1] = max(mask_range[1][1], yy)
mask_range[1][2] = max(mask_range[1][2], zz)
# continue if the mask is all 0s
if not tumor_exists:
print("Volume not found for index: {}, label: {}!".format(mtv_idx, mtv_label))
patient_volumes.append(())
continue
# Get ROI
current_volume = pet_image[mask_range[0][0]:mask_range[1][0]+1,
mask_range[0][1]:mask_range[1][1]+1,
mask_range[0][2]:mask_range[1][2]+1]
current_mask = tumor_volume[mask_range[0][0]:mask_range[1][0]+1,
mask_range[0][1]:mask_range[1][1]+1,
mask_range[0][2]:mask_range[1][2]+1]
# Add volumes to patient_volumes
patient_volumes.append((current_mask, mtv_label, mask_range, mtv_folder))
# Plot volumes
if plot_data:
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=0, fig_num=0,
patient=patient, mask_name=mtv_label.split("/")[-1])
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=1, fig_num=1,
patient=patient, mask_name=mtv_label.split("/")[-1])
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=2, fig_num=2,
patient=patient, mask_name=mtv_label.split("/")[-1])
input("press ENTER to continue... ")
plot_pet_volume(current_volume, pixel_shape, pixel_spacing, mask=current_mask,
patient=patient, mask_name=mtv_label.split("/")[-1])
volumes[patient] = patient_volumes
return mtv_variables, volumes, pixel_spacing
def parse_arguments(root_path):
"""Parse arguments in code."""
parser = argparse.ArgumentParser(description="The goal of this code is to loop through all "
"the patients in the folder root_path (default: '{}') and "
"show their PET images and their respective MTV shapes "
"(if the plot argument is toggled). The code also saves a "
"file 'volumes.pkl' with the full volumes and MTV shapes. "
"This .pkl file can then be read by "
"'parse_volumes_dataset.py' to generate the final numpy "
"dataset.".format(root_path))
parser.add_argument('-p', '--plot', default=False, action="store_true",
help="show figures before saving them")
parser.add_argument('-rp', '--root_path', default=None, type=str,
help="root path to search for files (default is '{}')".format(root_path))
parser.add_argument('--patients', default=None, type=str,
help="enter the list of patients that you want to see and save, separated"
"with spaces, and surroud them with ' or \" (i.e. 11111874 or "
"'02092013 11110482')")
return parser.parse_args()
if __name__ == "__main__":
# path for all patients
root_path = "/home/dani/Documents/disease-detection/Cervical Radiomic Images"
args = parse_arguments(root_path)
if args.root_path is not None:
root_path = args.root_path
# get all patients in dataset
patient_folders = sorted(next(os.walk(root_path))[1])
if args.patients is not None:
tmp_patients = []
my_patients = args.patients.split()
for patient in patient_folders:
if patient in my_patients:
tmp_patients.append(patient)
patient_folders = tmp_patients
# create structure to ignore patients that have an unexpected folder structure
ignored_patients = {p: False for p in patient_folders}
num_ignored_patients = 0
# loop to get PET folders (contain dicom images) and all structure folders (contain nii files)
pet_folders = {}
num_pet_folders = 0
struct_folders = {}
num_struct_folders = 0
for patient in patient_folders:
pet_scans_per_patient = 0
path = "{}/{}".format(root_path, patient)
FoR_folders = [f for f in next(os.walk(path))[1] if f.startswith("FoR_")]
for folder in FoR_folders:
FoR_path = "{}/{}".format(path, folder)
PT_folders = [FoR_path + "/" + f for f in next(os.walk(FoR_path))[1] if f.find("PT") > -1]
num_pet_folders += len(PT_folders)
pet_scans_per_patient += len(PT_folders)
if patient not in pet_folders:
pet_folders[patient] = []
pet_folders[patient] += PT_folders
if pet_scans_per_patient != 1:
location = get_pet_location(patient, pet_folders[patient])
if location is not None:
pet_folders[patient] = [location]
pet_scans_per_patient = 1
if pet_scans_per_patient != 1:
num_ignored_patients += 1
if pet_scans_per_patient == 0:
print("Patient {} has {} PET images.\nThis patient will be ignored!\n"
"".format(patient, pet_scans_per_patient))
ignored_patients[patient] = "Too few PET images: {}".format(pet_scans_per_patient)
else:
print("Patient {} has {} PET images in: \n{}\nThis patient will be ignored!\n"
"".format(patient, pet_scans_per_patient, "\n".join(pet_folders[patient])))
ignored_patients[patient] = "Too many PET images: {}".format(pet_scans_per_patient)
else:
path = pet_folders[patient][0]
s_folders = [path + "/" + f for f in next(os.walk(path))[1] if f.startswith("struct")]
num_struct_folders += len(s_folders)
struct_folders[patient] = s_folders
print("{} patient folders found.".format(len(patient_folders)))
print("{} PET folders found.".format(num_pet_folders))
print("{} structures folders found.".format(num_struct_folders))
print("{} patients ignored.".format(num_ignored_patients))
# Get all volumes and save them
plt.ion()
contour_names = set()
i = 0
volumes = {}
all_pixel_spacing = []
for patient in patient_folders:
if ignored_patients[patient]: # skip ignored patients
continue
i += 1
# This function does all the volumes extraction, and also plots the tumors
mtv_variables, volumes, spacing = get_volumes(patient, pet_folders[patient][0],
struct_folders[patient], i, volumes,
plot_data=args.plot)
if len(spacing) == 3:
all_pixel_spacing.append(spacing)
# Track all the names found
for mtv_idx, mtv_label, mtv_folder in mtv_variables:
contour_names.add(mtv_label)
# If no contour is detected, add patient to ignored set
if len(mtv_variables) == 0 or len(volumes[patient]) <= 1:
ignored_patients[patient] = "No valid MTV contour found"
num_ignored_patients += 1
print("Patient", patient, "has no MTV contour. \nThis patient will be ignored!\n")
if patient in volumes:
volumes.pop(patient)
plt.ioff()
if args.plot:
all_pixel_spacing = np.array(all_pixel_spacing)
plt.plot(all_pixel_spacing[:, 0], label="width (x)")
plt.plot(all_pixel_spacing[:, 1], label="lenght (y)")
plt.plot(all_pixel_spacing[:, 2], label="height (z)")
plt.legend()
plt.title("Dimensions 3D pixels PET images")
plt.show()
# Print some statistics and data from the extraction
print("UNIQUE LABELS:")
for c in contour_names:
print(c)
print(" ")
print("DATASET STRUCTURE:")
patients = sorted(volumes.keys())
for i, patient in enumerate(patients):
print("Patient {}: {}".format(i, patient))
contents = [volumes[patient][0]]
prev_folder = None
for info in volumes[patient][1:]:
if len(info) == 0:
continue
contents.append(info)
current_mask, mtv_label, mask_range, mtv_folder = info
if prev_folder != mtv_folder:
print("Folder: {}".format(mtv_folder.split("/")[-1]))
prev_folder = mtv_folder
print(mtv_label, " ", mask_range)
print(" ")
volumes[patient] = contents
print("IGNORED PATIENTS:")
i = 0
for patient in ignored_patients:
if ignored_patients[patient] is False:
continue
print("Patient {}: {}".format(i, patient))
print("Reason: {}".format(ignored_patients[patient]))
i += 1
# Save the volumes
print(" ")
if os.path.isfile('volumes.pkl'):
answer = ""
while len(answer) <= 0 or answer[0].strip().lower() != "y":
print("Continuing will overwrite the existing 'volumes.pkl' file.")
answer = input("Type 'y' to overwrite data or Ctrl-C to abort.\n>> ")
print(" ")
print("Saving data, this may take a few minutes")
with open('volumes.pkl', 'wb') as f:
pickle.dump(volumes, f)
print("Data saved in 'volumes.pkl'.")
| plot_pet_volume | identifier_name |
create_tumor_dataset.py | import argparse
import dicom
import nibabel as nib
import numpy as np
import os
import pickle
from matplotlib import pyplot as plt
"""
The goal of this code is to loop through all the patients and show their PET images
(plots will only appear if line ~247, with function 'plot_pet_volume' is not commented)
and their respective MTV shapes. See main (line ~253) to see how this happens. It should
have plenty of comments. Finally the code will save a file volumes.pkl with the full volumes
and MTV shapes. This .pkl file will be read by 'parse_volumes_dataset.py' to generate the
final numpy dataset.
"""
def plot_pet_volume(pet_image, pixel_shape, pixel_spacing, mask=None, patient="?", mask_name="?"):
"""
The transparent option makes all zeros transparent, and all ones red (expects image with only
1s and 0s)
"""
# create axis for plotting
pixel_shape = pet_image.shape
x = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[1], pixel_spacing[1])
# z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if mask is not None:
masked_pet_image = np.ma.masked_array(pet_image, mask)
# normalize values
vmin = np.min(pet_image)
vmax = np.max(pet_image)
cmap = plt.cm.gray
cmap.set_bad('r', 1)
i = 0
while i < pet_image.shape[2]:
# show images
fig_num = 0
fig = plt.figure(fig_num)
plt.clf()
plt.pcolormesh(x, y, pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('y')
plt.ylabel('x')
title = "Patient: {} - Slice: {}/{}".format(patient, i + 1, pet_image.shape[2])
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
if mask is not None:
input("Press ENTER to reveal contour. ")
fig = plt.figure(fig_num)
plt.pcolormesh(x, y, masked_pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap,
rasterized=True, linewidth=0)
title += " - Contour Name: {}".format(mask_name)
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
c = input("ENTER=continue, Q=quit, M=median, R=repeat, P=previous, N=start over. ")
if c.startswith("q"):
break
elif c.startswith("m"):
i = int(pet_image.shape[2] / 2) - 1
elif c.startswith("r"):
i -= 1
elif c.startswith("p"):
i -= 2
if i < -1:
i = -1
elif c.startswith("n"):
i = -1
i += 1
def plot_pet_medians(pet_image, pixel_spacing, mask, patient="?", mask_name="?",
median=0, fig_num=0):
"""
Plot pet_medians and project mask. median can be 0, 1 or 2
"""
# create axis for plotting
pixel_shape = pet_image.shape
x = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[1], pixel_spacing[1])
z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if median == 2:
x, y = x, y
median_pet_image = pet_image[:, :, int(pet_image.shape[2] / 2)]
projected_mask = mask[:, :, 0]
for i in range(mask.shape[2]):
projected_mask += mask[:, :, i]
elif median == 1:
x, y = y, z
median_pet_image = pet_image[:, int(pet_image.shape[1] / 2), :]
projected_mask = mask[:, 0, :]
for i in range(mask.shape[1]):
projected_mask += mask[:, i, :]
elif median == 0:
x, y = x, z
median_pet_image = pet_image[int(pet_image.shape[0] / 2), :, :]
projected_mask = mask[0, :, :]
for i in range(mask.shape[0]):
projected_mask += mask[i, :, :]
print(median_pet_image.shape)
masked_pet_image = np.ma.masked_array(median_pet_image, projected_mask)
if median == 0 or median == 1:
masked_pet_image = np.rot90(masked_pet_image)
median_pet_image = np.rot90(median_pet_image)
# normalize values
vmin = np.min(pet_image)
vmax = np.max(pet_image)
cmap = plt.cm.gray
cmap.set_bad('r', 1)
# show images
fig = plt.figure(fig_num)
plt.clf()
ax = fig.add_subplot(121)
ax.pcolormesh(x, y, median_pet_image, vmin=vmin, vmax=vmax, cmap=cmap)
ax.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax = fig.add_subplot(122)
ax.pcolormesh(x, y, masked_pet_image, vmin=vmin, vmax=vmax, cmap=cmap,
rasterized=True, linewidth=0)
ax.set_aspect('equal')
plt.xticks([])
plt.yticks([])
title = "Patient: {} - Slice: {}/{}".format(patient, i + 1, pet_image.shape[2])
title += " - Contour Name: {}".format(mask_name)
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
def plot_pet_image(pet_image, yz_slice_pos, xz_slice_pos, xy_slice_pos, pixel_shape,
pixel_spacing, mask=None):
"""
The transparent option makes all zeros transparent, and all ones red (expects image with only
1s and 0s)
"""
# create axis for plotting
x = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[1], pixel_spacing[1])
z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if mask is not None:
pet_image = np.ma.masked_array(pet_image, mask)
# create slices that will be shown
yz_slice = pet_image[yz_slice_pos, :, :]
xz_slice = pet_image[:, xz_slice_pos, :]
xy_slice = pet_image[:, :, xy_slice_pos]
vmin = min(np.min(yz_slice), np.min(xz_slice), np.min(xy_slice))
vmax = max(np.max(yz_slice), np.max(xz_slice), np.max(xy_slice))
yz_slice = np.rot90(yz_slice)
xz_slice = np.fliplr(np.rot90(xz_slice))
# normalize values
vmin = min(np.min(yz_slice), np.min(xz_slice), np.min(xy_slice))
vmax = max(np.max(yz_slice), np.max(xz_slice), np.max(xy_slice))
cmap = plt.cm.gray
cmap.set_bad('r', 1)
# show images
plt.figure(0)
plt.clf()
plt.subplot(221)
plt.pcolormesh(y, z, yz_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.ylabel('z')
plt.subplot(222)
plt.pcolormesh(x, z, xz_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('x')
plt.subplot(223)
plt.pcolormesh(x, y, xy_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('y')
plt.ylabel('x')
plt.subplot(224)
plt.axis([0, 5, 0, 4.5])
plt.axis('off')
plt.text(1, 3, "x: {:.4f}".format(yz_slice_pos * pixel_spacing[0]), fontsize=15)
plt.text(1, 2, "y: {:.4f}".format(xz_slice_pos * pixel_spacing[1]), fontsize=15)
plt.text(1, 1, "z: {:.4f}".format(xy_slice_pos * pixel_spacing[2]), fontsize=15)
return vmin, vmax
def find_centroid(image, discretize=False):
# finds centroid of 2D or 3D image
if len(image.shape) == 2:
w, h = image.shape
cumulative = 0
centroid = [0, 0]
for x in range(w):
for y in range(h):
centroid[0] += image[x, y] * x
centroid[1] += image[x, y] * y
cumulative += image[x, y]
centroid = centroid[0] / cumulative, centroid[1] / cumulative
if discretize:
centroid = tuple([np.round(c) for c in centroid])
return centroid
elif len(image.shape) == 3:
w, h, d = image.shape
cumulative = 0
centroid = [0, 0, 0]
for x in range(w):
for y in range(h):
for z in range(d):
centroid[0] += image[x, y, z] * x
centroid[1] += image[x, y, z] * y
centroid[2] += image[x, y, z] * z
cumulative += image[x, y, z]
centroid = centroid[0] / cumulative, centroid[1] / cumulative, centroid[2] / cumulative
if discretize:
centroid = tuple([np.round(c) for c in centroid])
return centroid
return None
def get_pet_location(patient, options):
"""
This function holds the exceptional patients: those that have a weird number of PT folders and
have to be specified manually
"""
patient_dictionary = {
"11111774": "FoR_008/Series_001_PT_001",
"11102077": "FoR_005/Series_002_PT_001",
"20100039": "FoR_005/Series_004_PT_001",
"20100052": "FoR_005/Series_004_PT_001",
"20090735": "FoR_002/Series_001_PT_001",
"11112002": "FoR_006/Series_002_PT_001",
"11110941": "FoR_002/Series_001_PT_001",
"20092802": "FoR_006/Series_001_PT_001"
}
if patient not in patient_dictionary:
return None
for op in options:
if op.endswith(patient_dictionary[patient]):
return op
print("Problem found in the dictionary, ignoring patient")
return None
def get_volumes(patient, pet_folder, struct_folders, number, volumes, plot_data=False):
"""
volumes is where the function writes the volumes found
it is a dictionary, where keys are the names of the patients, and each value is a list
where the first element is always the original 3D PET image, and the following are the
contours of the volumes. Every contour is a dict with 4 fields: a mask (3D map of 1s and 0s),
the contour label, a range (the 2 3D position of the opposite corners of the tumor box)
and the folder where the contour was found.
"""
print("--------------------------------------------------------------------------------------")
print("Patient {:02d}: {}".format(number, patient))
# get all dicom image's paths
dicom_images = [pet_folder+"/"+f for f in os.listdir(pet_folder) if f.lower().endswith(".dcm")]
dicom_images.sort()
# get information from dicom header
dicom_info = dicom.read_file(dicom_images[0])
pixel_shape = (int(dicom_info.Rows), int(dicom_info.Columns), int(dicom_info.NumberOfSlices))
pixel_spacing = (float(dicom_info.PixelSpacing[0]), float(dicom_info.PixelSpacing[1]),
float(dicom_info.SliceThickness))
print(" Pixel spacing: {}".format(pixel_spacing))
# create 3D array for pet image
pet_image = np.zeros(pixel_shape, dtype=dicom_info.pixel_array.dtype)
for i, dicom_img in enumerate(dicom_images):
ds = dicom.read_file(dicom_img)
pet_image[:, :, i] = ds.pixel_array
# create contours structure
mtv_variables = []
for struct_folder in struct_folders:
# extract contours labels and index from lvol.txt
lvoltxt_file = struct_folder + "/lvol.txt"
with open(lvoltxt_file) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if ("mtv" in line.lower() and ("cervix" in line.lower() or "tumor" in line.lower()) and
"nodal" not in line.lower() and "nodes" not in line.lower() and
"ring" not in line.lower() and "opt" not in line.lower()):
struct = line.strip().split("|")
mtv_variables.append((int(struct[0]), struct[-1], struct_folder))
# return nothing if no mtv contours were found
if len(mtv_variables) == 0:
return [], volumes, []
# add contours to original image and plot it
prev_folder = None
patient_volumes = [pet_image]
print(" Possible MTV contours:")
for mtv_idx, mtv_label, mtv_folder in mtv_variables:
# read and transform data from nii file
if prev_folder != mtv_folder:
# only read mtv_folder if it has changed
nii_obj = nib.load(mtv_folder + "/lvol.nii")
nii_data = nii_obj.get_data()
volume = np.zeros(nii_data.shape[:3], dtype=int)
for i in range(nii_data.shape[-1]):
volume += nii_data[:, :, :, 0, i] << (8 * i)
volume = np.swapaxes(volume, 0, 1)
volume = np.flip(volume, 2)
print(" * Structures folder: {}".format(mtv_folder.split("/")[-1]))
print(" MTV_index:", mtv_idx)
print(" MTV_label:", mtv_label.split("/")[-1])
prev_folder = mtv_folder
# create 3D matrix with 1s where ROI is and 0s everwhere else
try:
tumor_volume = (np.bitwise_and(volume, 2 ** mtv_idx) > 0) * 1
except TypeError:
print("Error while reading volume for index: {}, label: {}!".format(mtv_idx,
mtv_label))
patient_volumes.append(())
continue
# find bounding box for volume
mask_range = [[pixel_shape[0], pixel_shape[1], pixel_shape[2]], [-1, -1, -1]]
tumor_exists = False
for xx in range(pixel_shape[0]):
for yy in range(pixel_shape[1]):
for zz in range(pixel_shape[2]):
if tumor_volume[xx, yy, zz]:
tumor_exists = True
mask_range[0][0] = min(mask_range[0][0], xx)
mask_range[0][1] = min(mask_range[0][1], yy)
mask_range[0][2] = min(mask_range[0][2], zz)
mask_range[1][0] = max(mask_range[1][0], xx)
mask_range[1][1] = max(mask_range[1][1], yy)
mask_range[1][2] = max(mask_range[1][2], zz)
# continue if the mask is all 0s
if not tumor_exists:
print("Volume not found for index: {}, label: {}!".format(mtv_idx, mtv_label))
patient_volumes.append(())
continue
# Get ROI
current_volume = pet_image[mask_range[0][0]:mask_range[1][0]+1,
mask_range[0][1]:mask_range[1][1]+1,
mask_range[0][2]:mask_range[1][2]+1]
current_mask = tumor_volume[mask_range[0][0]:mask_range[1][0]+1,
mask_range[0][1]:mask_range[1][1]+1,
mask_range[0][2]:mask_range[1][2]+1]
# Add volumes to patient_volumes
patient_volumes.append((current_mask, mtv_label, mask_range, mtv_folder))
# Plot volumes
if plot_data:
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=0, fig_num=0,
patient=patient, mask_name=mtv_label.split("/")[-1])
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=1, fig_num=1,
patient=patient, mask_name=mtv_label.split("/")[-1])
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=2, fig_num=2,
patient=patient, mask_name=mtv_label.split("/")[-1])
input("press ENTER to continue... ")
plot_pet_volume(current_volume, pixel_shape, pixel_spacing, mask=current_mask,
patient=patient, mask_name=mtv_label.split("/")[-1])
volumes[patient] = patient_volumes
return mtv_variables, volumes, pixel_spacing
def parse_arguments(root_path):
"""Parse arguments in code."""
parser = argparse.ArgumentParser(description="The goal of this code is to loop through all "
"the patients in the folder root_path (default: '{}') and "
"show their PET images and their respective MTV shapes "
"(if the plot argument is toggled). The code also saves a "
"file 'volumes.pkl' with the full volumes and MTV shapes. "
"This .pkl file can then be read by "
"'parse_volumes_dataset.py' to generate the final numpy "
"dataset.".format(root_path))
parser.add_argument('-p', '--plot', default=False, action="store_true",
help="show figures before saving them")
parser.add_argument('-rp', '--root_path', default=None, type=str,
help="root path to search for files (default is '{}')".format(root_path))
parser.add_argument('--patients', default=None, type=str,
help="enter the list of patients that you want to see and save, separated"
"with spaces, and surroud them with ' or \" (i.e. 11111874 or "
"'02092013 11110482')")
return parser.parse_args()
if __name__ == "__main__":
# path for all patients
root_path = "/home/dani/Documents/disease-detection/Cervical Radiomic Images"
args = parse_arguments(root_path)
if args.root_path is not None:
root_path = args.root_path
# get all patients in dataset
patient_folders = sorted(next(os.walk(root_path))[1])
if args.patients is not None:
tmp_patients = []
my_patients = args.patients.split()
for patient in patient_folders:
if patient in my_patients:
tmp_patients.append(patient)
patient_folders = tmp_patients
# create structure to ignore patients that have an unexpected folder structure
ignored_patients = {p: False for p in patient_folders}
num_ignored_patients = 0
# loop to get PET folders (contain dicom images) and all structure folders (contain nii files)
pet_folders = {}
num_pet_folders = 0
struct_folders = {}
num_struct_folders = 0
for patient in patient_folders:
pet_scans_per_patient = 0
path = "{}/{}".format(root_path, patient)
FoR_folders = [f for f in next(os.walk(path))[1] if f.startswith("FoR_")]
for folder in FoR_folders:
FoR_path = "{}/{}".format(path, folder)
PT_folders = [FoR_path + "/" + f for f in next(os.walk(FoR_path))[1] if f.find("PT") > -1]
num_pet_folders += len(PT_folders)
pet_scans_per_patient += len(PT_folders)
if patient not in pet_folders:
pet_folders[patient] = []
pet_folders[patient] += PT_folders
if pet_scans_per_patient != 1:
location = get_pet_location(patient, pet_folders[patient])
if location is not None:
pet_folders[patient] = [location]
pet_scans_per_patient = 1
if pet_scans_per_patient != 1:
num_ignored_patients += 1
if pet_scans_per_patient == 0:
print("Patient {} has {} PET images.\nThis patient will be ignored!\n"
"".format(patient, pet_scans_per_patient))
ignored_patients[patient] = "Too few PET images: {}".format(pet_scans_per_patient)
else:
print("Patient {} has {} PET images in: \n{}\nThis patient will be ignored!\n"
"".format(patient, pet_scans_per_patient, "\n".join(pet_folders[patient])))
ignored_patients[patient] = "Too many PET images: {}".format(pet_scans_per_patient)
else:
path = pet_folders[patient][0]
s_folders = [path + "/" + f for f in next(os.walk(path))[1] if f.startswith("struct")]
num_struct_folders += len(s_folders)
struct_folders[patient] = s_folders
print("{} patient folders found.".format(len(patient_folders)))
print("{} PET folders found.".format(num_pet_folders))
print("{} structures folders found.".format(num_struct_folders))
print("{} patients ignored.".format(num_ignored_patients))
# Get all volumes and save them
plt.ion()
contour_names = set()
i = 0
volumes = {}
all_pixel_spacing = []
for patient in patient_folders:
if ignored_patients[patient]: # skip ignored patients
continue
i += 1
# This function does all the volumes extraction, and also plots the tumors
mtv_variables, volumes, spacing = get_volumes(patient, pet_folders[patient][0],
struct_folders[patient], i, volumes,
plot_data=args.plot)
if len(spacing) == 3:
all_pixel_spacing.append(spacing)
# Track all the names found
for mtv_idx, mtv_label, mtv_folder in mtv_variables:
contour_names.add(mtv_label)
# If no contour is detected, add patient to ignored set
if len(mtv_variables) == 0 or len(volumes[patient]) <= 1:
|
plt.ioff()
if args.plot:
all_pixel_spacing = np.array(all_pixel_spacing)
plt.plot(all_pixel_spacing[:, 0], label="width (x)")
plt.plot(all_pixel_spacing[:, 1], label="lenght (y)")
plt.plot(all_pixel_spacing[:, 2], label="height (z)")
plt.legend()
plt.title("Dimensions 3D pixels PET images")
plt.show()
# Print some statistics and data from the extraction
print("UNIQUE LABELS:")
for c in contour_names:
print(c)
print(" ")
print("DATASET STRUCTURE:")
patients = sorted(volumes.keys())
for i, patient in enumerate(patients):
print("Patient {}: {}".format(i, patient))
contents = [volumes[patient][0]]
prev_folder = None
for info in volumes[patient][1:]:
if len(info) == 0:
continue
contents.append(info)
current_mask, mtv_label, mask_range, mtv_folder = info
if prev_folder != mtv_folder:
print("Folder: {}".format(mtv_folder.split("/")[-1]))
prev_folder = mtv_folder
print(mtv_label, " ", mask_range)
print(" ")
volumes[patient] = contents
print("IGNORED PATIENTS:")
i = 0
for patient in ignored_patients:
if ignored_patients[patient] is False:
continue
print("Patient {}: {}".format(i, patient))
print("Reason: {}".format(ignored_patients[patient]))
i += 1
# Save the volumes
print(" ")
if os.path.isfile('volumes.pkl'):
answer = ""
while len(answer) <= 0 or answer[0].strip().lower() != "y":
print("Continuing will overwrite the existing 'volumes.pkl' file.")
answer = input("Type 'y' to overwrite data or Ctrl-C to abort.\n>> ")
print(" ")
print("Saving data, this may take a few minutes")
with open('volumes.pkl', 'wb') as f:
pickle.dump(volumes, f)
print("Data saved in 'volumes.pkl'.")
| ignored_patients[patient] = "No valid MTV contour found"
num_ignored_patients += 1
print("Patient", patient, "has no MTV contour. \nThis patient will be ignored!\n")
if patient in volumes:
volumes.pop(patient) | conditional_block |
create_tumor_dataset.py | import argparse
import dicom
import nibabel as nib
import numpy as np
import os
import pickle
from matplotlib import pyplot as plt
"""
The goal of this code is to loop through all the patients and show their PET images
(plots will only appear if line ~247, with function 'plot_pet_volume' is not commented)
and their respective MTV shapes. See main (line ~253) to see how this happens. It should
have plenty of comments. Finally the code will save a file volumes.pkl with the full volumes
and MTV shapes. This .pkl file will be read by 'parse_volumes_dataset.py' to generate the
final numpy dataset.
"""
def plot_pet_volume(pet_image, pixel_shape, pixel_spacing, mask=None, patient="?", mask_name="?"):
"""
The transparent option makes all zeros transparent, and all ones red (expects image with only
1s and 0s)
"""
# create axis for plotting
pixel_shape = pet_image.shape
x = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[1], pixel_spacing[1])
# z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if mask is not None:
masked_pet_image = np.ma.masked_array(pet_image, mask)
# normalize values
vmin = np.min(pet_image)
vmax = np.max(pet_image)
cmap = plt.cm.gray
cmap.set_bad('r', 1)
i = 0
while i < pet_image.shape[2]:
# show images
fig_num = 0
fig = plt.figure(fig_num)
plt.clf()
plt.pcolormesh(x, y, pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('y')
plt.ylabel('x')
title = "Patient: {} - Slice: {}/{}".format(patient, i + 1, pet_image.shape[2])
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
if mask is not None:
input("Press ENTER to reveal contour. ")
fig = plt.figure(fig_num)
plt.pcolormesh(x, y, masked_pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap,
rasterized=True, linewidth=0)
title += " - Contour Name: {}".format(mask_name)
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
c = input("ENTER=continue, Q=quit, M=median, R=repeat, P=previous, N=start over. ")
if c.startswith("q"):
break
elif c.startswith("m"):
i = int(pet_image.shape[2] / 2) - 1
elif c.startswith("r"):
i -= 1
elif c.startswith("p"):
i -= 2
if i < -1:
i = -1
elif c.startswith("n"):
i = -1
i += 1
def plot_pet_medians(pet_image, pixel_spacing, mask, patient="?", mask_name="?",
median=0, fig_num=0):
|
def plot_pet_image(pet_image, yz_slice_pos, xz_slice_pos, xy_slice_pos, pixel_shape,
pixel_spacing, mask=None):
"""
The transparent option makes all zeros transparent, and all ones red (expects image with only
1s and 0s)
"""
# create axis for plotting
x = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[1], pixel_spacing[1])
z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if mask is not None:
pet_image = np.ma.masked_array(pet_image, mask)
# create slices that will be shown
yz_slice = pet_image[yz_slice_pos, :, :]
xz_slice = pet_image[:, xz_slice_pos, :]
xy_slice = pet_image[:, :, xy_slice_pos]
vmin = min(np.min(yz_slice), np.min(xz_slice), np.min(xy_slice))
vmax = max(np.max(yz_slice), np.max(xz_slice), np.max(xy_slice))
yz_slice = np.rot90(yz_slice)
xz_slice = np.fliplr(np.rot90(xz_slice))
# normalize values
vmin = min(np.min(yz_slice), np.min(xz_slice), np.min(xy_slice))
vmax = max(np.max(yz_slice), np.max(xz_slice), np.max(xy_slice))
cmap = plt.cm.gray
cmap.set_bad('r', 1)
# show images
plt.figure(0)
plt.clf()
plt.subplot(221)
plt.pcolormesh(y, z, yz_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.ylabel('z')
plt.subplot(222)
plt.pcolormesh(x, z, xz_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('x')
plt.subplot(223)
plt.pcolormesh(x, y, xy_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('y')
plt.ylabel('x')
plt.subplot(224)
plt.axis([0, 5, 0, 4.5])
plt.axis('off')
plt.text(1, 3, "x: {:.4f}".format(yz_slice_pos * pixel_spacing[0]), fontsize=15)
plt.text(1, 2, "y: {:.4f}".format(xz_slice_pos * pixel_spacing[1]), fontsize=15)
plt.text(1, 1, "z: {:.4f}".format(xy_slice_pos * pixel_spacing[2]), fontsize=15)
return vmin, vmax
def find_centroid(image, discretize=False):
# finds centroid of 2D or 3D image
if len(image.shape) == 2:
w, h = image.shape
cumulative = 0
centroid = [0, 0]
for x in range(w):
for y in range(h):
centroid[0] += image[x, y] * x
centroid[1] += image[x, y] * y
cumulative += image[x, y]
centroid = centroid[0] / cumulative, centroid[1] / cumulative
if discretize:
centroid = tuple([np.round(c) for c in centroid])
return centroid
elif len(image.shape) == 3:
w, h, d = image.shape
cumulative = 0
centroid = [0, 0, 0]
for x in range(w):
for y in range(h):
for z in range(d):
centroid[0] += image[x, y, z] * x
centroid[1] += image[x, y, z] * y
centroid[2] += image[x, y, z] * z
cumulative += image[x, y, z]
centroid = centroid[0] / cumulative, centroid[1] / cumulative, centroid[2] / cumulative
if discretize:
centroid = tuple([np.round(c) for c in centroid])
return centroid
return None
def get_pet_location(patient, options):
"""
This function holds the exceptional patients: those that have a weird number of PT folders and
have to be specified manually
"""
patient_dictionary = {
"11111774": "FoR_008/Series_001_PT_001",
"11102077": "FoR_005/Series_002_PT_001",
"20100039": "FoR_005/Series_004_PT_001",
"20100052": "FoR_005/Series_004_PT_001",
"20090735": "FoR_002/Series_001_PT_001",
"11112002": "FoR_006/Series_002_PT_001",
"11110941": "FoR_002/Series_001_PT_001",
"20092802": "FoR_006/Series_001_PT_001"
}
if patient not in patient_dictionary:
return None
for op in options:
if op.endswith(patient_dictionary[patient]):
return op
print("Problem found in the dictionary, ignoring patient")
return None
def get_volumes(patient, pet_folder, struct_folders, number, volumes, plot_data=False):
"""
volumes is where the function writes the volumes found
it is a dictionary, where keys are the names of the patients, and each value is a list
where the first element is always the original 3D PET image, and the following are the
contours of the volumes. Every contour is a dict with 4 fields: a mask (3D map of 1s and 0s),
the contour label, a range (the 2 3D position of the opposite corners of the tumor box)
and the folder where the contour was found.
"""
print("--------------------------------------------------------------------------------------")
print("Patient {:02d}: {}".format(number, patient))
# get all dicom image's paths
dicom_images = [pet_folder+"/"+f for f in os.listdir(pet_folder) if f.lower().endswith(".dcm")]
dicom_images.sort()
# get information from dicom header
dicom_info = dicom.read_file(dicom_images[0])
pixel_shape = (int(dicom_info.Rows), int(dicom_info.Columns), int(dicom_info.NumberOfSlices))
pixel_spacing = (float(dicom_info.PixelSpacing[0]), float(dicom_info.PixelSpacing[1]),
float(dicom_info.SliceThickness))
print(" Pixel spacing: {}".format(pixel_spacing))
# create 3D array for pet image
pet_image = np.zeros(pixel_shape, dtype=dicom_info.pixel_array.dtype)
for i, dicom_img in enumerate(dicom_images):
ds = dicom.read_file(dicom_img)
pet_image[:, :, i] = ds.pixel_array
# create contours structure
mtv_variables = []
for struct_folder in struct_folders:
# extract contours labels and index from lvol.txt
lvoltxt_file = struct_folder + "/lvol.txt"
with open(lvoltxt_file) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if ("mtv" in line.lower() and ("cervix" in line.lower() or "tumor" in line.lower()) and
"nodal" not in line.lower() and "nodes" not in line.lower() and
"ring" not in line.lower() and "opt" not in line.lower()):
struct = line.strip().split("|")
mtv_variables.append((int(struct[0]), struct[-1], struct_folder))
# return nothing if no mtv contours were found
if len(mtv_variables) == 0:
return [], volumes, []
# add contours to original image and plot it
prev_folder = None
patient_volumes = [pet_image]
print(" Possible MTV contours:")
for mtv_idx, mtv_label, mtv_folder in mtv_variables:
# read and transform data from nii file
if prev_folder != mtv_folder:
# only read mtv_folder if it has changed
nii_obj = nib.load(mtv_folder + "/lvol.nii")
nii_data = nii_obj.get_data()
volume = np.zeros(nii_data.shape[:3], dtype=int)
for i in range(nii_data.shape[-1]):
volume += nii_data[:, :, :, 0, i] << (8 * i)
volume = np.swapaxes(volume, 0, 1)
volume = np.flip(volume, 2)
print(" * Structures folder: {}".format(mtv_folder.split("/")[-1]))
print(" MTV_index:", mtv_idx)
print(" MTV_label:", mtv_label.split("/")[-1])
prev_folder = mtv_folder
# create 3D matrix with 1s where ROI is and 0s everwhere else
try:
tumor_volume = (np.bitwise_and(volume, 2 ** mtv_idx) > 0) * 1
except TypeError:
print("Error while reading volume for index: {}, label: {}!".format(mtv_idx,
mtv_label))
patient_volumes.append(())
continue
# find bounding box for volume
mask_range = [[pixel_shape[0], pixel_shape[1], pixel_shape[2]], [-1, -1, -1]]
tumor_exists = False
for xx in range(pixel_shape[0]):
for yy in range(pixel_shape[1]):
for zz in range(pixel_shape[2]):
if tumor_volume[xx, yy, zz]:
tumor_exists = True
mask_range[0][0] = min(mask_range[0][0], xx)
mask_range[0][1] = min(mask_range[0][1], yy)
mask_range[0][2] = min(mask_range[0][2], zz)
mask_range[1][0] = max(mask_range[1][0], xx)
mask_range[1][1] = max(mask_range[1][1], yy)
mask_range[1][2] = max(mask_range[1][2], zz)
# continue if the mask is all 0s
if not tumor_exists:
print("Volume not found for index: {}, label: {}!".format(mtv_idx, mtv_label))
patient_volumes.append(())
continue
# Get ROI
current_volume = pet_image[mask_range[0][0]:mask_range[1][0]+1,
mask_range[0][1]:mask_range[1][1]+1,
mask_range[0][2]:mask_range[1][2]+1]
current_mask = tumor_volume[mask_range[0][0]:mask_range[1][0]+1,
mask_range[0][1]:mask_range[1][1]+1,
mask_range[0][2]:mask_range[1][2]+1]
# Add volumes to patient_volumes
patient_volumes.append((current_mask, mtv_label, mask_range, mtv_folder))
# Plot volumes
if plot_data:
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=0, fig_num=0,
patient=patient, mask_name=mtv_label.split("/")[-1])
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=1, fig_num=1,
patient=patient, mask_name=mtv_label.split("/")[-1])
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=2, fig_num=2,
patient=patient, mask_name=mtv_label.split("/")[-1])
input("press ENTER to continue... ")
plot_pet_volume(current_volume, pixel_shape, pixel_spacing, mask=current_mask,
patient=patient, mask_name=mtv_label.split("/")[-1])
volumes[patient] = patient_volumes
return mtv_variables, volumes, pixel_spacing
def parse_arguments(root_path):
"""Parse arguments in code."""
parser = argparse.ArgumentParser(description="The goal of this code is to loop through all "
"the patients in the folder root_path (default: '{}') and "
"show their PET images and their respective MTV shapes "
"(if the plot argument is toggled). The code also saves a "
"file 'volumes.pkl' with the full volumes and MTV shapes. "
"This .pkl file can then be read by "
"'parse_volumes_dataset.py' to generate the final numpy "
"dataset.".format(root_path))
parser.add_argument('-p', '--plot', default=False, action="store_true",
help="show figures before saving them")
parser.add_argument('-rp', '--root_path', default=None, type=str,
help="root path to search for files (default is '{}')".format(root_path))
parser.add_argument('--patients', default=None, type=str,
help="enter the list of patients that you want to see and save, separated"
"with spaces, and surroud them with ' or \" (i.e. 11111874 or "
"'02092013 11110482')")
return parser.parse_args()
if __name__ == "__main__":
# path for all patients
root_path = "/home/dani/Documents/disease-detection/Cervical Radiomic Images"
args = parse_arguments(root_path)
if args.root_path is not None:
root_path = args.root_path
# get all patients in dataset
patient_folders = sorted(next(os.walk(root_path))[1])
if args.patients is not None:
tmp_patients = []
my_patients = args.patients.split()
for patient in patient_folders:
if patient in my_patients:
tmp_patients.append(patient)
patient_folders = tmp_patients
# create structure to ignore patients that have an unexpected folder structure
ignored_patients = {p: False for p in patient_folders}
num_ignored_patients = 0
# loop to get PET folders (contain dicom images) and all structure folders (contain nii files)
pet_folders = {}
num_pet_folders = 0
struct_folders = {}
num_struct_folders = 0
for patient in patient_folders:
pet_scans_per_patient = 0
path = "{}/{}".format(root_path, patient)
FoR_folders = [f for f in next(os.walk(path))[1] if f.startswith("FoR_")]
for folder in FoR_folders:
FoR_path = "{}/{}".format(path, folder)
PT_folders = [FoR_path + "/" + f for f in next(os.walk(FoR_path))[1] if f.find("PT") > -1]
num_pet_folders += len(PT_folders)
pet_scans_per_patient += len(PT_folders)
if patient not in pet_folders:
pet_folders[patient] = []
pet_folders[patient] += PT_folders
if pet_scans_per_patient != 1:
location = get_pet_location(patient, pet_folders[patient])
if location is not None:
pet_folders[patient] = [location]
pet_scans_per_patient = 1
if pet_scans_per_patient != 1:
num_ignored_patients += 1
if pet_scans_per_patient == 0:
print("Patient {} has {} PET images.\nThis patient will be ignored!\n"
"".format(patient, pet_scans_per_patient))
ignored_patients[patient] = "Too few PET images: {}".format(pet_scans_per_patient)
else:
print("Patient {} has {} PET images in: \n{}\nThis patient will be ignored!\n"
"".format(patient, pet_scans_per_patient, "\n".join(pet_folders[patient])))
ignored_patients[patient] = "Too many PET images: {}".format(pet_scans_per_patient)
else:
path = pet_folders[patient][0]
s_folders = [path + "/" + f for f in next(os.walk(path))[1] if f.startswith("struct")]
num_struct_folders += len(s_folders)
struct_folders[patient] = s_folders
print("{} patient folders found.".format(len(patient_folders)))
print("{} PET folders found.".format(num_pet_folders))
print("{} structures folders found.".format(num_struct_folders))
print("{} patients ignored.".format(num_ignored_patients))
# Get all volumes and save them
plt.ion()
contour_names = set()
i = 0
volumes = {}
all_pixel_spacing = []
for patient in patient_folders:
if ignored_patients[patient]: # skip ignored patients
continue
i += 1
# This function does all the volumes extraction, and also plots the tumors
mtv_variables, volumes, spacing = get_volumes(patient, pet_folders[patient][0],
struct_folders[patient], i, volumes,
plot_data=args.plot)
if len(spacing) == 3:
all_pixel_spacing.append(spacing)
# Track all the names found
for mtv_idx, mtv_label, mtv_folder in mtv_variables:
contour_names.add(mtv_label)
# If no contour is detected, add patient to ignored set
if len(mtv_variables) == 0 or len(volumes[patient]) <= 1:
ignored_patients[patient] = "No valid MTV contour found"
num_ignored_patients += 1
print("Patient", patient, "has no MTV contour. \nThis patient will be ignored!\n")
if patient in volumes:
volumes.pop(patient)
plt.ioff()
if args.plot:
all_pixel_spacing = np.array(all_pixel_spacing)
plt.plot(all_pixel_spacing[:, 0], label="width (x)")
plt.plot(all_pixel_spacing[:, 1], label="lenght (y)")
plt.plot(all_pixel_spacing[:, 2], label="height (z)")
plt.legend()
plt.title("Dimensions 3D pixels PET images")
plt.show()
# Print some statistics and data from the extraction
print("UNIQUE LABELS:")
for c in contour_names:
print(c)
print(" ")
print("DATASET STRUCTURE:")
patients = sorted(volumes.keys())
for i, patient in enumerate(patients):
print("Patient {}: {}".format(i, patient))
contents = [volumes[patient][0]]
prev_folder = None
for info in volumes[patient][1:]:
if len(info) == 0:
continue
contents.append(info)
current_mask, mtv_label, mask_range, mtv_folder = info
if prev_folder != mtv_folder:
print("Folder: {}".format(mtv_folder.split("/")[-1]))
prev_folder = mtv_folder
print(mtv_label, " ", mask_range)
print(" ")
volumes[patient] = contents
print("IGNORED PATIENTS:")
i = 0
for patient in ignored_patients:
if ignored_patients[patient] is False:
continue
print("Patient {}: {}".format(i, patient))
print("Reason: {}".format(ignored_patients[patient]))
i += 1
# Save the volumes
print(" ")
if os.path.isfile('volumes.pkl'):
answer = ""
while len(answer) <= 0 or answer[0].strip().lower() != "y":
print("Continuing will overwrite the existing 'volumes.pkl' file.")
answer = input("Type 'y' to overwrite data or Ctrl-C to abort.\n>> ")
print(" ")
print("Saving data, this may take a few minutes")
with open('volumes.pkl', 'wb') as f:
pickle.dump(volumes, f)
print("Data saved in 'volumes.pkl'.")
| """
Plot pet_medians and project mask. median can be 0, 1 or 2
"""
# create axis for plotting
pixel_shape = pet_image.shape
x = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[1], pixel_spacing[1])
z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if median == 2:
x, y = x, y
median_pet_image = pet_image[:, :, int(pet_image.shape[2] / 2)]
projected_mask = mask[:, :, 0]
for i in range(mask.shape[2]):
projected_mask += mask[:, :, i]
elif median == 1:
x, y = y, z
median_pet_image = pet_image[:, int(pet_image.shape[1] / 2), :]
projected_mask = mask[:, 0, :]
for i in range(mask.shape[1]):
projected_mask += mask[:, i, :]
elif median == 0:
x, y = x, z
median_pet_image = pet_image[int(pet_image.shape[0] / 2), :, :]
projected_mask = mask[0, :, :]
for i in range(mask.shape[0]):
projected_mask += mask[i, :, :]
print(median_pet_image.shape)
masked_pet_image = np.ma.masked_array(median_pet_image, projected_mask)
if median == 0 or median == 1:
masked_pet_image = np.rot90(masked_pet_image)
median_pet_image = np.rot90(median_pet_image)
# normalize values
vmin = np.min(pet_image)
vmax = np.max(pet_image)
cmap = plt.cm.gray
cmap.set_bad('r', 1)
# show images
fig = plt.figure(fig_num)
plt.clf()
ax = fig.add_subplot(121)
ax.pcolormesh(x, y, median_pet_image, vmin=vmin, vmax=vmax, cmap=cmap)
ax.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax = fig.add_subplot(122)
ax.pcolormesh(x, y, masked_pet_image, vmin=vmin, vmax=vmax, cmap=cmap,
rasterized=True, linewidth=0)
ax.set_aspect('equal')
plt.xticks([])
plt.yticks([])
title = "Patient: {} - Slice: {}/{}".format(patient, i + 1, pet_image.shape[2])
title += " - Contour Name: {}".format(mask_name)
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title)) | identifier_body |
create_tumor_dataset.py | import argparse
import dicom
import nibabel as nib
import numpy as np
import os
import pickle
from matplotlib import pyplot as plt
"""
The goal of this code is to loop through all the patients and show their PET images
(plots will only appear if line ~247, with function 'plot_pet_volume' is not commented)
and their respective MTV shapes. See main (line ~253) to see how this happens. It should
have plenty of comments. Finally the code will save a file volumes.pkl with the full volumes
and MTV shapes. This .pkl file will be read by 'parse_volumes_dataset.py' to generate the
final numpy dataset.
"""
def plot_pet_volume(pet_image, pixel_shape, pixel_spacing, mask=None, patient="?", mask_name="?"):
"""
The transparent option makes all zeros transparent, and all ones red (expects image with only
1s and 0s)
"""
# create axis for plotting
pixel_shape = pet_image.shape
x = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[1], pixel_spacing[1])
# z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if mask is not None:
masked_pet_image = np.ma.masked_array(pet_image, mask)
# normalize values
vmin = np.min(pet_image)
vmax = np.max(pet_image)
cmap = plt.cm.gray
cmap.set_bad('r', 1)
i = 0
while i < pet_image.shape[2]:
# show images
fig_num = 0
fig = plt.figure(fig_num)
plt.clf()
plt.pcolormesh(x, y, pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('y')
plt.ylabel('x')
title = "Patient: {} - Slice: {}/{}".format(patient, i + 1, pet_image.shape[2])
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
if mask is not None:
input("Press ENTER to reveal contour. ")
fig = plt.figure(fig_num)
plt.pcolormesh(x, y, masked_pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap,
rasterized=True, linewidth=0)
title += " - Contour Name: {}".format(mask_name)
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
c = input("ENTER=continue, Q=quit, M=median, R=repeat, P=previous, N=start over. ")
if c.startswith("q"):
break
elif c.startswith("m"):
i = int(pet_image.shape[2] / 2) - 1
elif c.startswith("r"):
i -= 1
elif c.startswith("p"):
i -= 2
if i < -1:
i = -1
elif c.startswith("n"):
i = -1
i += 1
def plot_pet_medians(pet_image, pixel_spacing, mask, patient="?", mask_name="?",
median=0, fig_num=0):
"""
Plot pet_medians and project mask. median can be 0, 1 or 2
"""
# create axis for plotting
pixel_shape = pet_image.shape
x = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[1], pixel_spacing[1])
z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if median == 2:
x, y = x, y
median_pet_image = pet_image[:, :, int(pet_image.shape[2] / 2)]
projected_mask = mask[:, :, 0]
for i in range(mask.shape[2]):
projected_mask += mask[:, :, i]
elif median == 1:
x, y = y, z
median_pet_image = pet_image[:, int(pet_image.shape[1] / 2), :]
projected_mask = mask[:, 0, :]
for i in range(mask.shape[1]):
projected_mask += mask[:, i, :]
elif median == 0:
x, y = x, z
median_pet_image = pet_image[int(pet_image.shape[0] / 2), :, :]
projected_mask = mask[0, :, :]
for i in range(mask.shape[0]):
projected_mask += mask[i, :, :]
print(median_pet_image.shape)
masked_pet_image = np.ma.masked_array(median_pet_image, projected_mask)
if median == 0 or median == 1:
masked_pet_image = np.rot90(masked_pet_image)
median_pet_image = np.rot90(median_pet_image)
# normalize values
vmin = np.min(pet_image)
vmax = np.max(pet_image)
cmap = plt.cm.gray
cmap.set_bad('r', 1)
# show images
fig = plt.figure(fig_num)
plt.clf()
ax = fig.add_subplot(121)
ax.pcolormesh(x, y, median_pet_image, vmin=vmin, vmax=vmax, cmap=cmap)
ax.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax = fig.add_subplot(122)
ax.pcolormesh(x, y, masked_pet_image, vmin=vmin, vmax=vmax, cmap=cmap,
rasterized=True, linewidth=0)
ax.set_aspect('equal')
plt.xticks([])
plt.yticks([])
title = "Patient: {} - Slice: {}/{}".format(patient, i + 1, pet_image.shape[2])
title += " - Contour Name: {}".format(mask_name)
fig.canvas.set_window_title("Figure {} - {}".format(fig_num, title))
def plot_pet_image(pet_image, yz_slice_pos, xz_slice_pos, xy_slice_pos, pixel_shape,
pixel_spacing, mask=None):
"""
The transparent option makes all zeros transparent, and all ones red (expects image with only
1s and 0s)
"""
# create axis for plotting
x = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[0], pixel_spacing[0])
y = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[1], pixel_spacing[1])
z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])
if mask is not None:
pet_image = np.ma.masked_array(pet_image, mask)
# create slices that will be shown
yz_slice = pet_image[yz_slice_pos, :, :]
xz_slice = pet_image[:, xz_slice_pos, :]
xy_slice = pet_image[:, :, xy_slice_pos]
vmin = min(np.min(yz_slice), np.min(xz_slice), np.min(xy_slice))
vmax = max(np.max(yz_slice), np.max(xz_slice), np.max(xy_slice))
yz_slice = np.rot90(yz_slice)
xz_slice = np.fliplr(np.rot90(xz_slice))
# normalize values
vmin = min(np.min(yz_slice), np.min(xz_slice), np.min(xy_slice))
vmax = max(np.max(yz_slice), np.max(xz_slice), np.max(xy_slice))
cmap = plt.cm.gray
cmap.set_bad('r', 1)
# show images
plt.figure(0)
plt.clf()
plt.subplot(221)
plt.pcolormesh(y, z, yz_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.ylabel('z')
plt.subplot(222)
plt.pcolormesh(x, z, xz_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('x')
plt.subplot(223)
plt.pcolormesh(x, y, xy_slice, vmin=vmin, vmax=vmax, cmap=cmap)
plt.xlabel('y')
plt.ylabel('x')
plt.subplot(224)
plt.axis([0, 5, 0, 4.5])
plt.axis('off')
plt.text(1, 3, "x: {:.4f}".format(yz_slice_pos * pixel_spacing[0]), fontsize=15)
plt.text(1, 2, "y: {:.4f}".format(xz_slice_pos * pixel_spacing[1]), fontsize=15)
plt.text(1, 1, "z: {:.4f}".format(xy_slice_pos * pixel_spacing[2]), fontsize=15)
return vmin, vmax
def find_centroid(image, discretize=False):
# finds centroid of 2D or 3D image
if len(image.shape) == 2:
w, h = image.shape
cumulative = 0
centroid = [0, 0]
for x in range(w):
for y in range(h):
centroid[0] += image[x, y] * x
centroid[1] += image[x, y] * y
cumulative += image[x, y]
centroid = centroid[0] / cumulative, centroid[1] / cumulative
if discretize:
centroid = tuple([np.round(c) for c in centroid])
return centroid
elif len(image.shape) == 3:
w, h, d = image.shape
cumulative = 0 | for y in range(h):
for z in range(d):
centroid[0] += image[x, y, z] * x
centroid[1] += image[x, y, z] * y
centroid[2] += image[x, y, z] * z
cumulative += image[x, y, z]
centroid = centroid[0] / cumulative, centroid[1] / cumulative, centroid[2] / cumulative
if discretize:
centroid = tuple([np.round(c) for c in centroid])
return centroid
return None
def get_pet_location(patient, options):
"""
This function holds the exceptional patients: those that have a weird number of PT folders and
have to be specified manually
"""
patient_dictionary = {
"11111774": "FoR_008/Series_001_PT_001",
"11102077": "FoR_005/Series_002_PT_001",
"20100039": "FoR_005/Series_004_PT_001",
"20100052": "FoR_005/Series_004_PT_001",
"20090735": "FoR_002/Series_001_PT_001",
"11112002": "FoR_006/Series_002_PT_001",
"11110941": "FoR_002/Series_001_PT_001",
"20092802": "FoR_006/Series_001_PT_001"
}
if patient not in patient_dictionary:
return None
for op in options:
if op.endswith(patient_dictionary[patient]):
return op
print("Problem found in the dictionary, ignoring patient")
return None
def get_volumes(patient, pet_folder, struct_folders, number, volumes, plot_data=False):
"""
volumes is where the function writes the volumes found
it is a dictionary, where keys are the names of the patients, and each value is a list
where the first element is always the original 3D PET image, and the following are the
contours of the volumes. Every contour is a dict with 4 fields: a mask (3D map of 1s and 0s),
the contour label, a range (the 2 3D position of the opposite corners of the tumor box)
and the folder where the contour was found.
"""
print("--------------------------------------------------------------------------------------")
print("Patient {:02d}: {}".format(number, patient))
# get all dicom image's paths
dicom_images = [pet_folder+"/"+f for f in os.listdir(pet_folder) if f.lower().endswith(".dcm")]
dicom_images.sort()
# get information from dicom header
dicom_info = dicom.read_file(dicom_images[0])
pixel_shape = (int(dicom_info.Rows), int(dicom_info.Columns), int(dicom_info.NumberOfSlices))
pixel_spacing = (float(dicom_info.PixelSpacing[0]), float(dicom_info.PixelSpacing[1]),
float(dicom_info.SliceThickness))
print(" Pixel spacing: {}".format(pixel_spacing))
# create 3D array for pet image
pet_image = np.zeros(pixel_shape, dtype=dicom_info.pixel_array.dtype)
for i, dicom_img in enumerate(dicom_images):
ds = dicom.read_file(dicom_img)
pet_image[:, :, i] = ds.pixel_array
# create contours structure
mtv_variables = []
for struct_folder in struct_folders:
# extract contours labels and index from lvol.txt
lvoltxt_file = struct_folder + "/lvol.txt"
with open(lvoltxt_file) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if ("mtv" in line.lower() and ("cervix" in line.lower() or "tumor" in line.lower()) and
"nodal" not in line.lower() and "nodes" not in line.lower() and
"ring" not in line.lower() and "opt" not in line.lower()):
struct = line.strip().split("|")
mtv_variables.append((int(struct[0]), struct[-1], struct_folder))
# return nothing if no mtv contours were found
if len(mtv_variables) == 0:
return [], volumes, []
# add contours to original image and plot it
prev_folder = None
patient_volumes = [pet_image]
print(" Possible MTV contours:")
for mtv_idx, mtv_label, mtv_folder in mtv_variables:
# read and transform data from nii file
if prev_folder != mtv_folder:
# only read mtv_folder if it has changed
nii_obj = nib.load(mtv_folder + "/lvol.nii")
nii_data = nii_obj.get_data()
volume = np.zeros(nii_data.shape[:3], dtype=int)
for i in range(nii_data.shape[-1]):
volume += nii_data[:, :, :, 0, i] << (8 * i)
volume = np.swapaxes(volume, 0, 1)
volume = np.flip(volume, 2)
print(" * Structures folder: {}".format(mtv_folder.split("/")[-1]))
print(" MTV_index:", mtv_idx)
print(" MTV_label:", mtv_label.split("/")[-1])
prev_folder = mtv_folder
# create 3D matrix with 1s where ROI is and 0s everwhere else
try:
tumor_volume = (np.bitwise_and(volume, 2 ** mtv_idx) > 0) * 1
except TypeError:
print("Error while reading volume for index: {}, label: {}!".format(mtv_idx,
mtv_label))
patient_volumes.append(())
continue
# find bounding box for volume
mask_range = [[pixel_shape[0], pixel_shape[1], pixel_shape[2]], [-1, -1, -1]]
tumor_exists = False
for xx in range(pixel_shape[0]):
for yy in range(pixel_shape[1]):
for zz in range(pixel_shape[2]):
if tumor_volume[xx, yy, zz]:
tumor_exists = True
mask_range[0][0] = min(mask_range[0][0], xx)
mask_range[0][1] = min(mask_range[0][1], yy)
mask_range[0][2] = min(mask_range[0][2], zz)
mask_range[1][0] = max(mask_range[1][0], xx)
mask_range[1][1] = max(mask_range[1][1], yy)
mask_range[1][2] = max(mask_range[1][2], zz)
# continue if the mask is all 0s
if not tumor_exists:
print("Volume not found for index: {}, label: {}!".format(mtv_idx, mtv_label))
patient_volumes.append(())
continue
# Get ROI
current_volume = pet_image[mask_range[0][0]:mask_range[1][0]+1,
mask_range[0][1]:mask_range[1][1]+1,
mask_range[0][2]:mask_range[1][2]+1]
current_mask = tumor_volume[mask_range[0][0]:mask_range[1][0]+1,
mask_range[0][1]:mask_range[1][1]+1,
mask_range[0][2]:mask_range[1][2]+1]
# Add volumes to patient_volumes
patient_volumes.append((current_mask, mtv_label, mask_range, mtv_folder))
# Plot volumes
if plot_data:
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=0, fig_num=0,
patient=patient, mask_name=mtv_label.split("/")[-1])
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=1, fig_num=1,
patient=patient, mask_name=mtv_label.split("/")[-1])
plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=2, fig_num=2,
patient=patient, mask_name=mtv_label.split("/")[-1])
input("press ENTER to continue... ")
plot_pet_volume(current_volume, pixel_shape, pixel_spacing, mask=current_mask,
patient=patient, mask_name=mtv_label.split("/")[-1])
volumes[patient] = patient_volumes
return mtv_variables, volumes, pixel_spacing
def parse_arguments(root_path):
"""Parse arguments in code."""
parser = argparse.ArgumentParser(description="The goal of this code is to loop through all "
"the patients in the folder root_path (default: '{}') and "
"show their PET images and their respective MTV shapes "
"(if the plot argument is toggled). The code also saves a "
"file 'volumes.pkl' with the full volumes and MTV shapes. "
"This .pkl file can then be read by "
"'parse_volumes_dataset.py' to generate the final numpy "
"dataset.".format(root_path))
parser.add_argument('-p', '--plot', default=False, action="store_true",
help="show figures before saving them")
parser.add_argument('-rp', '--root_path', default=None, type=str,
help="root path to search for files (default is '{}')".format(root_path))
parser.add_argument('--patients', default=None, type=str,
help="enter the list of patients that you want to see and save, separated"
"with spaces, and surroud them with ' or \" (i.e. 11111874 or "
"'02092013 11110482')")
return parser.parse_args()
if __name__ == "__main__":
# path for all patients
root_path = "/home/dani/Documents/disease-detection/Cervical Radiomic Images"
args = parse_arguments(root_path)
if args.root_path is not None:
root_path = args.root_path
# get all patients in dataset
patient_folders = sorted(next(os.walk(root_path))[1])
if args.patients is not None:
tmp_patients = []
my_patients = args.patients.split()
for patient in patient_folders:
if patient in my_patients:
tmp_patients.append(patient)
patient_folders = tmp_patients
# create structure to ignore patients that have an unexpected folder structure
ignored_patients = {p: False for p in patient_folders}
num_ignored_patients = 0
# loop to get PET folders (contain dicom images) and all structure folders (contain nii files)
pet_folders = {}
num_pet_folders = 0
struct_folders = {}
num_struct_folders = 0
for patient in patient_folders:
pet_scans_per_patient = 0
path = "{}/{}".format(root_path, patient)
FoR_folders = [f for f in next(os.walk(path))[1] if f.startswith("FoR_")]
for folder in FoR_folders:
FoR_path = "{}/{}".format(path, folder)
PT_folders = [FoR_path + "/" + f for f in next(os.walk(FoR_path))[1] if f.find("PT") > -1]
num_pet_folders += len(PT_folders)
pet_scans_per_patient += len(PT_folders)
if patient not in pet_folders:
pet_folders[patient] = []
pet_folders[patient] += PT_folders
if pet_scans_per_patient != 1:
location = get_pet_location(patient, pet_folders[patient])
if location is not None:
pet_folders[patient] = [location]
pet_scans_per_patient = 1
if pet_scans_per_patient != 1:
num_ignored_patients += 1
if pet_scans_per_patient == 0:
print("Patient {} has {} PET images.\nThis patient will be ignored!\n"
"".format(patient, pet_scans_per_patient))
ignored_patients[patient] = "Too few PET images: {}".format(pet_scans_per_patient)
else:
print("Patient {} has {} PET images in: \n{}\nThis patient will be ignored!\n"
"".format(patient, pet_scans_per_patient, "\n".join(pet_folders[patient])))
ignored_patients[patient] = "Too many PET images: {}".format(pet_scans_per_patient)
else:
path = pet_folders[patient][0]
s_folders = [path + "/" + f for f in next(os.walk(path))[1] if f.startswith("struct")]
num_struct_folders += len(s_folders)
struct_folders[patient] = s_folders
print("{} patient folders found.".format(len(patient_folders)))
print("{} PET folders found.".format(num_pet_folders))
print("{} structures folders found.".format(num_struct_folders))
print("{} patients ignored.".format(num_ignored_patients))
# Get all volumes and save them
plt.ion()
contour_names = set()
i = 0
volumes = {}
all_pixel_spacing = []
for patient in patient_folders:
if ignored_patients[patient]: # skip ignored patients
continue
i += 1
# This function does all the volumes extraction, and also plots the tumors
mtv_variables, volumes, spacing = get_volumes(patient, pet_folders[patient][0],
struct_folders[patient], i, volumes,
plot_data=args.plot)
if len(spacing) == 3:
all_pixel_spacing.append(spacing)
# Track all the names found
for mtv_idx, mtv_label, mtv_folder in mtv_variables:
contour_names.add(mtv_label)
# If no contour is detected, add patient to ignored set
if len(mtv_variables) == 0 or len(volumes[patient]) <= 1:
ignored_patients[patient] = "No valid MTV contour found"
num_ignored_patients += 1
print("Patient", patient, "has no MTV contour. \nThis patient will be ignored!\n")
if patient in volumes:
volumes.pop(patient)
plt.ioff()
if args.plot:
all_pixel_spacing = np.array(all_pixel_spacing)
plt.plot(all_pixel_spacing[:, 0], label="width (x)")
plt.plot(all_pixel_spacing[:, 1], label="lenght (y)")
plt.plot(all_pixel_spacing[:, 2], label="height (z)")
plt.legend()
plt.title("Dimensions 3D pixels PET images")
plt.show()
# Print some statistics and data from the extraction
print("UNIQUE LABELS:")
for c in contour_names:
print(c)
print(" ")
print("DATASET STRUCTURE:")
patients = sorted(volumes.keys())
for i, patient in enumerate(patients):
print("Patient {}: {}".format(i, patient))
contents = [volumes[patient][0]]
prev_folder = None
for info in volumes[patient][1:]:
if len(info) == 0:
continue
contents.append(info)
current_mask, mtv_label, mask_range, mtv_folder = info
if prev_folder != mtv_folder:
print("Folder: {}".format(mtv_folder.split("/")[-1]))
prev_folder = mtv_folder
print(mtv_label, " ", mask_range)
print(" ")
volumes[patient] = contents
print("IGNORED PATIENTS:")
i = 0
for patient in ignored_patients:
if ignored_patients[patient] is False:
continue
print("Patient {}: {}".format(i, patient))
print("Reason: {}".format(ignored_patients[patient]))
i += 1
# Save the volumes
print(" ")
if os.path.isfile('volumes.pkl'):
answer = ""
while len(answer) <= 0 or answer[0].strip().lower() != "y":
print("Continuing will overwrite the existing 'volumes.pkl' file.")
answer = input("Type 'y' to overwrite data or Ctrl-C to abort.\n>> ")
print(" ")
print("Saving data, this may take a few minutes")
with open('volumes.pkl', 'wb') as f:
pickle.dump(volumes, f)
print("Data saved in 'volumes.pkl'.") | centroid = [0, 0, 0]
for x in range(w): | random_line_split |
variants.ts | //****************************
// sheet processing magic
//****************************
import warning from 'warning';
export * from './animations/index';
export * from './mediaq';
export * from './when-used';
//****************************
// TYPINGS
//****************************
// consts
export const enum Consts {
$system = '$system',
$switch = '$switch',
path = '#path',
rulesetName = '#rulesetName',
//canModify = '#canModify',
data = '#data',
dataObservedBits = 'observedBits',
}
//****************************
// SHEET HIEARCHY
export interface SheetWithAddIns extends Sheet {
//[Consts.canModify]?
$system: AddIns
}
export type AddIns = { [addInsName: string]: Sheets }
export type Sheets = { [sheetName: string]: Sheet }
export type Sheet = { [rulesetName: string]: Ruleset }
export interface Ruleset extends Node {
[Consts.rulesetName]?: string
}
export type Node = { [ruleName: string]: Node | any }
//****************************
// FILTERS
export type UsedRulesetNames = { [rulesetName: string]: boolean }
export interface AddInRulesetPar {
usedRulesetNames?: UsedRulesetNames
addInSheet: Sheet
}
export type RulesetPatchGetter = (par: AddInRulesetPar) => Ruleset[]
export type RulesetPatchGetters = { [addInName: string]: RulesetPatchGetter }
export type PropsPatchGetter = (par: {}, patches: Array<{}>) => void
export type PropsPatchGetters = { [addInName: string]: PropsPatchGetter }
//****************************
// OTHER
// hook for custom addIn processing
export type FinishAddIn = (addInItem: {}) => void
export type FinishAddIns = { [addInName: string]: FinishAddIn }
//****************************
// EXPORTS FOR PROPS
//****************************
export const finishProps = (root: Sheet, onFinishAddInProps: FinishAddIns) => {
root = linearize(root) // in-place processing of $before, $web, $native and $after ruleset props
extractPropsPatches(root) as SheetWithAddIns // move props (starting with $, e.g. $mediaq) to root.$system
// move $-props (for which onFinishAddInProps exists) to root.$system.addIns
if (root.$system && onFinishAddInProps)
for (const p in root.$system) {
const finish = onFinishAddInProps[p]
if (finish) {
finish(root.$system[p])
//const addIns = root.$system.addIns || (root.$system.addIns = {})
//addIns[p] = finish(root.$system[p])
//delete root.$system[p]
}
}
return root
}
export const getPropsPatch = (addInsRoot: AddIns /*addInsRoot is not mutated*/, propsPatchGetters: PropsPatchGetters) => {
if (!propsPatchGetters) return null
const res = []
for (const p in addInsRoot) {
const addIn = addInsRoot[p], proc = propsPatchGetters[p]
if (!addIn || !proc) continue
proc(addIn, res)
}
return res.length === 0 ? null : res
}
//****************************
// EXPORTS FOR SHEET
//****************************
//export const setCanModify = (root: SheetWithAddIns) => root[Consts.canModify] = true
// transform sheet to mergable and patchable form. !!! root is mutated !!!
export const toPatchableAndMergeable = (root: Sheet) => {
root = linearize(root) // in-place processing of $before, $web, $native and $after ruleset props
const res = extractPatches(root, root as SheetWithAddIns, []) as SheetWithAddIns // extract addIn props of ruleset (starting with $, e.g. $mediaq) etc.
return res
}
// merging patchable and mergeable sheets
export const mergeSheets = (sheet: SheetWithAddIns, modifiers: SheetWithAddIns[], canModify: boolean) => {
// deep merge
if (modifiers && modifiers.length >= 1) sheet = canModify ? deepMerges(sheet, modifiers) : immutableMerge([sheet, ...modifiers]) | return sheet
}
export const mergeSheetsAndFinish = (sheet: SheetWithAddIns, modifiers: SheetWithAddIns[], onFinishAddInClasses: FinishAddIns, canModify?: boolean) => {
// deep merge
sheet = mergeSheets(sheet, modifiers, canModify)
sheet = finishAddInsClasses(sheet, onFinishAddInClasses, canModify)
nameRulesets(sheet)
return sheet
}
// merge rulesets in component code (and apply addIn patches)
export const mergeRulesetsForCode = (sheet: SheetWithAddIns, rulesetPatchGetters: RulesetPatchGetters, rulesets: Ruleset[]) => {
if (!rulesets || (rulesets = rulesets.filter(r => !!r)).length === 0) return null
const addIns = sheet.$system
// get used ruleset's (for $whenUses processing)
const $switch = addIns && addIns.$switch
let usedRulesets: UsedRulesetNames = null
if ($switch) rulesets.forEach(ruleset => {
if (!ruleset || !ruleset[Consts.rulesetName]) return
(usedRulesets || (usedRulesets = {}))[ruleset[Consts.rulesetName]] = true
})
// apply patch to rulesets
let firstIsReadOnly = true
let patchedRulesets = rulesets
const patches = addIns && getPatches(addIns, rulesetPatchGetters, usedRulesets) // compute actual patches (based on addIn filters and usedRulesets)
if (patches) {
patchedRulesets = []
rulesets.forEach((ruleset, idx) => {
if (!ruleset) return
if (!ruleset[Consts.rulesetName]) { patchedRulesets.push(ruleset); return } // not named ruleset
const myPatches = patches.filter(p => p.patchPath[0] === ruleset[Consts.rulesetName]) // filter patches for this ruleset
if (myPatches.length === 0) { patchedRulesets.push(ruleset); return } // no patches
ruleset = deepMerge({}, ruleset) // deep clone
if (idx === 0) firstIsReadOnly = false // first ruleset is not readonly (=> can delete $rulesetName prop)
myPatches.forEach(patch => {
const patchPlace = findPath(ruleset, patch.patchPath, 1) // find sub-object of ruleset
deepMerges(patchPlace, patch.rulesets) // path it
})
patchedRulesets.push(ruleset)
})
}
if (patchedRulesets.length === 0) return null
// merging of used rulesets
let res: Ruleset = patchedRulesets.length === 1 ? patchedRulesets[0] : (firstIsReadOnly ? immutableMerge(patchedRulesets) : deepMerges(patchedRulesets[0], patchedRulesets.slice(1)))
// remove $rulesetName from result
if (res[Consts.rulesetName]) {
if (res === patchedRulesets[0] && firstIsReadOnly) res = { ...res }
delete res[Consts.rulesetName]
}
return res
}
//****************************
// HELPER EXPORTS
//****************************
export const filterRulesetNames = (sheet: Sheet) => Object.keys(sheet).filter(k => k.charAt(0) != '#')
//see processAddIn
//https://stackoverflow.com/questions/1173549/how-to-determine-if-an-object-is-an-object-literal-in-javascript
export const isObject = obj => !!(obj && typeof obj === 'object' && !obj.$$typeof && Object.getPrototypeOf(obj) === Object.prototype)
//****************************
// PRIVATE
//****************************
const nameRulesets = (sheet: SheetWithAddIns) => {
//if (!sheet.$system) return
const ignore = { '$': true, '#': true }
//if (sheet.$system)
for (const p in sheet) if (!ignore[p.charAt(0)]) sheet[p][Consts.rulesetName] = p
}
const finishAddInsClasses = (sheet: SheetWithAddIns, onFinishAddInClasses: FinishAddIns, canModify: boolean) => {
if (!sheet.$system || !onFinishAddInClasses) return sheet
//const canModify = sheet[Consts.canModify]
// clone when needed
if (!canModify) sheet = { ...sheet, $system: { ...sheet.$system } }
for (const addInName in sheet.$system) {
const proc = onFinishAddInClasses[addInName]
if (proc) {
let addInItem = sheet.$system[addInName]
if (!canModify) addInItem = sheet.$system[addInName] = { ...addInItem } // clone
proc(addInItem)
}
}
return sheet
}
//****************************
// GET PATCHES
type Patch = { patchPath: string[], rulesets: Node[] }
// For mergeRulesets: compute actual patches (based on addIn filters and usedRulesets)
// addInsRoot is not mutated
const getPatches = (addInsRoot: AddIns, addInRulesetFilters: RulesetPatchGetters, usedRulesets: UsedRulesetNames) => {
let res: Patch[]
try {
res = getPatchLow(addInsRoot, addInRulesetFilters, usedRulesets, false) // optimistic: addIns are not recured => addInsRoot is not mutated
} catch (error) {
if (error != getPatchLowWithDeepClone) throw error
res = getPatchLow(deepMerge({}, addInsRoot), addInRulesetFilters, usedRulesets, true) // recursion occurred => make deep copy of addInsRoot a try again
}
return res
}
const getPatchLow = (addInsRoot: AddIns /*addInsRoot is not mutated*/, rulesetPatchGetters: RulesetPatchGetters, usedRulesetNames: UsedRulesetNames, canModify: boolean) => {
if (!addInsRoot || !rulesetPatchGetters) return null
let rootPatches: Patch[] = [] // patches of top level sheet rulesets
let addInPatches: Patch[] = [] // pathes for inner addIns rulesets
for (const addInName in addInsRoot) {
const addInSheets = addInsRoot[addInName] // addInKey is e.g $switch, $mediaq...
// get addIn ruleset filter
const filter = rulesetPatchGetters[addInName]
if (!filter) continue
//warning(filter, `Missing filter for ${addInName} addIn`)
for (const sheetName in addInSheets) {
// prepare patch for single patch, patchKey = e.g. "root/:active", "add-ins/$switch/add-ins/$mediaq/root/:active/480-640/b/:hover", atc
const addInSheet = addInSheets[sheetName]
const patchPath = addInSheet[Consts.data].path as any as string[]
const isAddIn = patchPath[0] === Consts.$system // path starts with addIns/...
if (!canModify && isAddIn) throw getPatchLowWithDeepClone // cannot modify and patch of addIn occurred => try again with canModify=true (I think that aAddIn recursion will be rare)
//const rulesets = filter({ addInSheet: addInSheets as any, usedRulesetNames })
const rulesets = filter({ addInSheet, usedRulesetNames })
if (!rulesets || rulesets.length === 0) continue
//const items = { path: patchPath, items: [] }
const patch: Patch = { patchPath, rulesets }
const addPatches = isAddIn ? addInPatches : rootPatches
addPatches.push(patch)
}
}
if (rootPatches.length === 0) return null
if (addInPatches.length === 0) return rootPatches
// deep merge addInPatches
addInPatches.sort((p1, p2) => p2.patchPath.length - p1.patchPath.length)
addInPatches.forEach(p => {
const patchPlace = findPath(addInsRoot, p.patchPath, 1)
if (patchPlace) deepMerges(patchPlace, p.rulesets)
})
// for DUMP
if (window.__DEV__) {
const develop = {
rootPatches,
addInPatches
}
}
// return root patches
return rootPatches
}
const getPatchLowWithDeepClone = 'getPatchLowWithDeepClone'
const findPath = (root: Node, path: string[], startIdx?: number) => {
for (let i = startIdx || 0; i < path.length; i++) root = root[path[i]] as Node
return root
}
//****************************
// LINEARIZE
const linearProps = ['$before', '$self', '$web', '$native', '$after']
// process $before, $web, $native and $after props. !!! root is mutated !!!
export const linearize = (root: Node) => {
let single: Node = null, array: Node[] = null, self: Node = null
linearProps.forEach((p, idx) => {
// ignore wrong platform
if (window.isWeb && p === '$native' || !window.isWeb && p === '$web') { delete root[p]; return }
// get value
let value
if (p === '$self')
self = value = root
else {
value = root[p]; if (!value) return
delete root[p]
warning(isObject(value), 'Object expected for $before, $web, $native or $after props')
value = linearize(value)
}
// use value
if (!single) single = value // first
else if (!array) array = [value] //second
else array.push(value) // third and more
})
for (const pp in self) {
const value = self[pp]
if (isObject(value)) self[pp] = linearize(value as Node)
}
return array ? deepMerges(single, array) : single
}
//****************************
// EXTRACT PATCHES FROM SHEET TO ADD INS
const extractPropsPatches = (node: Node) => {
const addIns = node.$system || (node.$system = {})
for (const nodePropName in node) {
if (nodePropName.charAt(0) !== '$' || nodePropName === '$system') continue
addIns[nodePropName] = node[nodePropName]
delete node[nodePropName]
}
return node
}
// extrach $??? addIn parts of ruleset and put them to root.addIns
const extractPatches = (node: Node, root: SheetWithAddIns, nodePath: string[]) => {
for (const nodePropName in node) {
if (nodePropName === '$system') continue
const subNode = node[nodePropName]
if (!isObject(subNode)) continue
if (nodePropName.charAt(0) === '$') {
delete node[nodePropName]
if (nodePath.length === 0) {
const addIns = root.$system || (root.$system = {})
addIns[nodePropName] = subNode
} else
processAddIn(subNode as Node, nodePropName, node, root, nodePath)
} else
node[nodePropName] = extractPatches(subNode as Node, root, [...nodePath, nodePropName])
}
return node
}
const processAddIn = (addInNode, addInName, parentNode, root, addInNodePath) => {
// adjust addIn, e.g. root.addIns.$switch
const addIns = root.$system || (root.$system = {})
const addIn = addIns[addInName] || (addIns[addInName] = {})
// path
const actPathStr = addInNodePath.join('/')
const path = [Consts.$system, addInName, actPathStr]
// create addIn value
const oldValue = addIn[actPathStr]
const newValue = extractPatches(addInNode, root, path)
const newNode = addIn[actPathStr] = oldValue ? deepMerge(oldValue, newValue) : newValue
// extends with path
newNode[Consts.data] = { path: addInNodePath }
}
//****************************
// DEEP MERGES
// !!! modify target !!!
export const deepMerges = (target, sources: Node[]) => {
if (!sources || sources.length === 0) return target
sources.forEach(source => deepMerge(target, source))
return target
}
//simple deep merge. !!! modify target !!!
export const deepMerge = (target, source) => {
if (!source) return target
for (const key in source) {
const sourcep = source[key], targetp = target[key], sourceObj = isObject(sourcep), targetObj = isObject(targetp)
warning(!targetp || sourceObj === targetObj, 'deepMerge: cannot merge object and non object')
target[key] = sourceObj ? deepMerge(targetp || {}, sourcep) : sourcep
}
return target
}
// deep merge for case when first source (sources[0]) is large object (e.g. component sheet) and other sources are small patches of sources[0]
export const immutableMerge = (sources: Node[]) => {
if (!sources) return null
if (sources.length === 1) return sources[0]
let count = 0
let isToMerge = false
let dest = null
const objectsToMerge: { [propName: string]: Array<Node> } = {} // array of objects for object property
sources.forEach(src => {
if (!src) return
count++
switch (count) {
case 1: dest = src; break // first
case 2: dest = { ...dest } // !!! does not break thi case, continue with merging on flat clone
default: // merge flat cloned dest with src
for (const propName in src) {
if (propName.charAt(0) === '#') continue
const destp = dest[propName], srcp = src[propName] as Node
if (!destp) { dest[propName] = srcp; continue } // set first scalar or object
const isDestpObj = isObject(destp), isSrcpObj = isObject(srcp)
warning(isSrcpObj === isDestpObj, 'Cannot merge object with non-object')
if (!isSrcpObj) { dest[propName] = srcp; continue } // override scalar
// two or more objects for 'propName' property
isToMerge = true
let canModifyp = objectsToMerge[propName]
if (!canModifyp) canModifyp = objectsToMerge[propName] = [dest[propName]] // second object => put first (which is in dest[propName])
canModifyp.push(srcp) // push second or more
}
}
})
if (isToMerge) // some prop has at least two object for merging => recursion
for (const p in objectsToMerge) dest[p] = immutableMerge(objectsToMerge[p])
return dest
} | random_line_split | |
DLModeler.py | from processing.DLDataEngineering import DLDataEngineering
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import numpy as np
import h5py
import os
from scipy.ndimage import gaussian_filter
#Deep learning packages
import tensorflow as tf
#from tensorflow import keras
from tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply
from tensorflow.keras.backend import max
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#from tensorflow import keras
from sklearn.metrics import f1_score,roc_auc_score
import matplotlib.pyplot as plt
import cartopy.feature as cf
import cartopy.crs as ccrs
import cartopy
from keras_unet_collection import models, base, utils
class DLModeler(object):
def __init__(self,model_path,hf_path,num_examples,
class_percentages,predictors,model_args,
model_type):
self.model_path = model_path
self.hf_path = hf_path
self.num_examples = num_examples
self.class_percentages = class_percentages
self.model_args = model_args
self.model_type = model_type
long_predictors = []
#Shorten predictor names
for predictor in predictors:
if "_" in predictor:
predictor_name = predictor.split('_')[0].upper() + predictor.split('_')[-1]
elif " " in predictor:
predictor_name = ''.join([v[0].upper() for v in predictor.split()])
else: predictor_name = predictor
long_predictors.append(predictor_name)
self.predictors = np.array(long_predictors)
#Class to read data and standardize
self.dldataeng = DLDataEngineering(self.model_path,self.hf_path,
self.num_examples,self.class_percentages,self.predictors,
self.model_args)
return
def train_models(self,member,train_dates,valid_dates):
"""
Function that reads and extracts pre-processed 2d member data
from an ensemble to train a convolutional neural net (cnn) or
UNET.
The model data is standardized before being input to the cnn,
with the observation data in the shape (# examples, # classes).
Args:
member (str): ensemble member data that trains a DL model
"""
train_data, train_label = self.dldataeng.extract_training_data(member,
train_dates,self.model_type)
#valid_data, valid_label = self.dldataeng.extract_validation_data(member,valid_dates,self.model_type)
valid_data, valid_label = [],[]
if self.model_type == 'CNN':
onehot_encoder = OneHotEncoder(sparse=False,categories='auto')
encoded_label = onehot_encoder.fit_transform(train_label.reshape(-1, 1))
self.train_CNN(member,train_data,encoded_label,valid_data,valid_label)
elif 'UNET' in self.model_type:
#train_label[train_label >= 50.] = 50.
#log_train_label = np.log((train_label+1.0))
self.train_UNET(member,train_data,train_label,valid_data,valid_label)
return
def | (self,member,trainX,trainY,validX,validY):
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
'''
if os.path.exists(model_file):
del trainX,trainY,validX,validY
unet = tf.keras.models.load_model(model_file,compile=False)
print(f'\nOpening {model_file}\n')
#self.validate_UNET(model,validX,validY,threshold_file)
return
'''
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
#print('Validation data shape {0}'.format(np.shape(validX)))
#print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_obj_params = {'input_size':np.shape(trainX[0]),'n_labels':1,
'stack_num_down':2, 'stack_num_up':1, 'activation':'LeakyReLU',
'output_activation':'ReLU', 'batch_norm':False, 'pool':True,
'unpool':False, 'name':f'{self.model_type}'}
if self.model_type == 'UNET':
model_obj_params['filter_num'] = [16, 32, 64, 128]# 256]
unet_model_obj = models.unet_2d
compile_params = {'loss': 'mean_squared_error'}
else:
compile_params = {'loss': ['mean_squared_error',
'mean_squared_error','mean_squared_error',
'mean_squared_error','mean_squared_error'],
'loss_weights':[0.25, 0.25, 0.25, 0.25, 1.0]}
if self.model_type == 'UNET2plus':
plus_model_params = {'filter_num':[16, 32, 64, 128, 256],
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_plus_2d
elif self.model_type == 'UNET3plus':
plus_model_params = {'filter_num_downi':[16, 32, 64, 128, 256],
'filter_num_skip':'auto', 'filter_num_aggregate':'auto',
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_3plus_2d
try: unet_model = unet_model_obj(**model_obj_params)
except:
print(f"{self.model_type} Model type not found.")
return
unet_model.compile(**compile_params,optimizer=tf.keras.optimizers.Adam(lr=1e-4))
print(unet_model.summary())
#Augment data
aug = ImageDataGenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
#Fit UNET
n_epochs = 15
bs = 256
conv_hist = unet_model.fit(
aug.flow(trainX,trainY,batch_size=bs),
steps_per_epoch=len(trainX)/bs,
epochs=n_epochs,verbose=1)
'''
pred_s = trainX[0].reshape(1,input_shape[0],
input_shape[1],input_shape[2])
prediction = unet.predict(pred_s)[0,:,:,:]
print(prediction.shape)
plt.imshow(prediction)
plt.colorbar()
plt.show()
return
'''
#Save trained model
unet_model.save(model_file)
print(f'Writing out {model_file}')
#Clear graphs
tf.keras.backend.clear_session()
#self.validate_UNET(model,validX,validY,threshold_file)
return
def train_CNN(self,member,input_data):
"""
Function to train a convolutional neural net (CNN) for random
training data and associated labels.
Args:
member (str): Ensemble member
trainX (tuple): Tuple of (train data, train labels,
validation data, validation labels)
"""
trainX,trainY,validX,validY = input_data
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
print('Validation data shape {0}'.format(np.shape(validX)))
print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'
print(model_file)
if not os.path.exists(model_file):
# Clear graphs
tf.keras.backend.clear_session()
#Initiliaze Convolutional Neural Net (CNN)
model = models.Sequential()
input_shape = np.shape(trainX[0])
#First layer: input shape (y,x,# variables)
#Add noise
model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))
for filters in [32,64,128]:
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.MaxPooling2D())
#Flatten the last convolutional layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.Dense(4,activation='softmax'))
#Compile neural net
model.compile(optimizer='adam',loss='categorical_crossentropy',
metrics=[tf.keras.metrics.AUC()])
print(model.summary())
#fit neural net
n_epochs = 10
bs = 256
#augment data
aug = imagedatagenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
train_generator = aug.flow(trainx,trainy,batch_size=bs)
conv_hist = model.fit(
train_generator,steps_per_epoch=len(trainx) // bs,
epochs=n_epochs,verbose=1,class_weight=self.class_percentages)
#save trained model
model.save(model_file)
print(f'Writing out {model_file}')
else:
model = tf.keras.models.load_model(model_file)
print(f'\nOpening {model_file}\n')
del trainY,trainX
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if os.path.exists(threshold_file):
del validX,validY
return
self.validate_CNN(model,validX,validY,threshold_file)
return
def validate_CNN(self,model,validX,validY,threshold_file):
print()
#Predict on validation data
cnn_preds = model.predict(validX)
sev_hail = cnn_preds[:,2]
sig_hail = cnn_preds[:,3]
#combine the severe hail and sig severe hail classes
sev_prob_preds = sev_hail+sig_hail
print('Max probability',np.nanmax(sev_prob_preds))
#classify labels as severe hail or no hail
true_preds = np.where(validY >= 2, 1, 0)
del validX, validY
df_best_score = pd.DataFrame(np.zeros((1,1)),columns=['Size Threshold'])
#Find threshold with the highest validation AUC score
auc_score = []
thresholds = np.arange(0.1,1.01,0.02)
for t in thresholds:
threshold_preds = np.where(sev_prob_preds >= t,1,0)
auc_score.append(roc_auc_score(true_preds, threshold_preds))
print(auc_score)
#output threshold with highest AUC
df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]
print(df_best_score)
df_best_score.to_csv(threshold_file)
print(f'Writing out {threshold_file}')
return
def predict_model(self,member,patch_map_conversion_indices,
total_map_shape,subset_map_shape,date,patch_radius,forecast_grid_path,#):
lon_grid,lat_grid):
"""
Function that opens a pre-trained convolutional neural net (cnn).
and predicts hail probability forecasts for a single ensemble member.
Args:
Right now only includes severe hail prediction, not sig-severe
"""
##################
# Load in any saved DL model files
##################
#Clear any saved DL graphs
tf.keras.backend.clear_session()
#Load DL model
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
DL_model = tf.keras.models.load_model(model_file,compile=False)
if self.model_type == 'CNN':
#Use minimum prob threshold chosen with validation data
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if not os.path.exists(threshold_file):
print('No thresholds found')
return
prob_thresh = 0 #pd.read_csv(threshold_file).loc[0,'size_threshold']+0.05
print(prob_thresh)
total_count = 0
##################
#Extract forecast data (#hours, #patches, nx, ny, #variables)
##################
forecast_data = self.dldataeng.read_files('forecast',member,date,[None],[None])
if forecast_data is None:
print('No forecast data found')
return
##################
# Standardize hourly data
##################
standard_forecast_data = np.array([self.dldataeng.standardize_data(member,forecast_data[hour])
for hour in np.arange(forecast_data.shape[0])])
del forecast_data
##################
# Produce gridded hourly hail forecast
##################
total_grid = np.empty( (standard_forecast_data.shape[0],
total_map_shape[0]*total_map_shape[1]) )*np.nan
for hour in np.arange(standard_forecast_data.shape[0]):
print(hour)
#Predict probability of severe hail
DL_prediction = np.array(DL_model.predict(standard_forecast_data[hour]))
######
# Will need to fix CNN code to reflect the conversion inds are in
#patches x (patch_radius*patch_radius) instead of (patches*radius*radius)
#####
if self.model_type == 'CNN':
severe_proba_indices = np.where( (cnn_preds[:,2]+cnn_preds[:,3]) >= prob_thresh)[0]
severe_patches = np.zeros(subset_map_shape)
#If no hourly severe hail predicted, continue
if len(severe_proba_indices) <1 : continue
severe_patches[severe_proba_indices] = np.full((patch_radius,patch_radius), 1)
total_grid[hour,map_conversion_inds] = severe_patches.ravel()
print(hour,len(severe_proba_indices),np.nanmax((cnn_preds[:,2]+cnn_preds[:,3])))
total_count += len(severe_proba_indices)
print('Total severe probs:',total_count)
print()
elif 'UNET' in self.model_type:
for patch in np.arange(standard_forecast_data.shape[1]):
patch_indices = patch_map_conversion_indices[patch]
#Gets rid of overlapping edges
overlap_pt = 4
# If unet3+ then the last output tensor is the correct one
if DL_prediction.ndim > 4:
hourly_patch_data = DL_prediction[-1,patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
else:
hourly_patch_data = DL_prediction[patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
total_grid[hour,patch_indices] = hourly_patch_data
del DL_prediction
del standard_forecast_data
output_data=total_grid.reshape((total_grid.shape[0],)+total_map_shape)
date_outpath = forecast_grid_path + f'{date[0][:-5]}/'
#Output gridded forecasts
if not os.path.exists(date_outpath): os.makedirs(date_outpath)
gridded_out_file = date_outpath + f'{member}_{date[0]}_forecast_grid.h5'
print(f'Writing out {gridded_out_file}')
with h5py.File(gridded_out_file, 'w') as hf:
hf.create_dataset("data",data=output_data,
compression='gzip',compression_opts=6)
return
def dice_loss(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.math.sigmoid(y_pred)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)
return 1 - numerator / denominator
'''
From: https://idiotdeveloper.com/unet-segmentation-in-tensorflow/
'''
def down_block(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
p = layers.MaxPooling2D((2,2))(c)
return c, p
def up_block(x, skip, filters, kernel_size=(3, 3)):
up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)
concat = layers.Concatenate()([up, skip])
c = layers.Conv2D(filters, kernel_size, padding='same')(concat)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
def bottleneck(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
| train_UNET | identifier_name |
DLModeler.py | from processing.DLDataEngineering import DLDataEngineering
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import numpy as np
import h5py
import os
from scipy.ndimage import gaussian_filter
#Deep learning packages
import tensorflow as tf
#from tensorflow import keras
from tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply
from tensorflow.keras.backend import max
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#from tensorflow import keras
from sklearn.metrics import f1_score,roc_auc_score
import matplotlib.pyplot as plt
import cartopy.feature as cf
import cartopy.crs as ccrs
import cartopy
from keras_unet_collection import models, base, utils
class DLModeler(object):
def __init__(self,model_path,hf_path,num_examples,
class_percentages,predictors,model_args,
model_type):
self.model_path = model_path
self.hf_path = hf_path
self.num_examples = num_examples
self.class_percentages = class_percentages
self.model_args = model_args
self.model_type = model_type
long_predictors = []
#Shorten predictor names
for predictor in predictors:
if "_" in predictor:
predictor_name = predictor.split('_')[0].upper() + predictor.split('_')[-1]
elif " " in predictor:
predictor_name = ''.join([v[0].upper() for v in predictor.split()])
else: predictor_name = predictor
long_predictors.append(predictor_name)
self.predictors = np.array(long_predictors)
#Class to read data and standardize
self.dldataeng = DLDataEngineering(self.model_path,self.hf_path,
self.num_examples,self.class_percentages,self.predictors,
self.model_args)
return
def train_models(self,member,train_dates,valid_dates):
"""
Function that reads and extracts pre-processed 2d member data
from an ensemble to train a convolutional neural net (cnn) or
UNET.
The model data is standardized before being input to the cnn,
with the observation data in the shape (# examples, # classes).
Args:
member (str): ensemble member data that trains a DL model
"""
train_data, train_label = self.dldataeng.extract_training_data(member,
train_dates,self.model_type)
#valid_data, valid_label = self.dldataeng.extract_validation_data(member,valid_dates,self.model_type)
valid_data, valid_label = [],[]
if self.model_type == 'CNN':
onehot_encoder = OneHotEncoder(sparse=False,categories='auto')
encoded_label = onehot_encoder.fit_transform(train_label.reshape(-1, 1))
self.train_CNN(member,train_data,encoded_label,valid_data,valid_label)
elif 'UNET' in self.model_type:
#train_label[train_label >= 50.] = 50.
#log_train_label = np.log((train_label+1.0))
self.train_UNET(member,train_data,train_label,valid_data,valid_label)
return
def train_UNET(self,member,trainX,trainY,validX,validY):
|
def train_CNN(self,member,input_data):
"""
Function to train a convolutional neural net (CNN) for random
training data and associated labels.
Args:
member (str): Ensemble member
trainX (tuple): Tuple of (train data, train labels,
validation data, validation labels)
"""
trainX,trainY,validX,validY = input_data
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
print('Validation data shape {0}'.format(np.shape(validX)))
print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'
print(model_file)
if not os.path.exists(model_file):
# Clear graphs
tf.keras.backend.clear_session()
#Initiliaze Convolutional Neural Net (CNN)
model = models.Sequential()
input_shape = np.shape(trainX[0])
#First layer: input shape (y,x,# variables)
#Add noise
model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))
for filters in [32,64,128]:
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.MaxPooling2D())
#Flatten the last convolutional layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.Dense(4,activation='softmax'))
#Compile neural net
model.compile(optimizer='adam',loss='categorical_crossentropy',
metrics=[tf.keras.metrics.AUC()])
print(model.summary())
#fit neural net
n_epochs = 10
bs = 256
#augment data
aug = imagedatagenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
train_generator = aug.flow(trainx,trainy,batch_size=bs)
conv_hist = model.fit(
train_generator,steps_per_epoch=len(trainx) // bs,
epochs=n_epochs,verbose=1,class_weight=self.class_percentages)
#save trained model
model.save(model_file)
print(f'Writing out {model_file}')
else:
model = tf.keras.models.load_model(model_file)
print(f'\nOpening {model_file}\n')
del trainY,trainX
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if os.path.exists(threshold_file):
del validX,validY
return
self.validate_CNN(model,validX,validY,threshold_file)
return
def validate_CNN(self,model,validX,validY,threshold_file):
print()
#Predict on validation data
cnn_preds = model.predict(validX)
sev_hail = cnn_preds[:,2]
sig_hail = cnn_preds[:,3]
#combine the severe hail and sig severe hail classes
sev_prob_preds = sev_hail+sig_hail
print('Max probability',np.nanmax(sev_prob_preds))
#classify labels as severe hail or no hail
true_preds = np.where(validY >= 2, 1, 0)
del validX, validY
df_best_score = pd.DataFrame(np.zeros((1,1)),columns=['Size Threshold'])
#Find threshold with the highest validation AUC score
auc_score = []
thresholds = np.arange(0.1,1.01,0.02)
for t in thresholds:
threshold_preds = np.where(sev_prob_preds >= t,1,0)
auc_score.append(roc_auc_score(true_preds, threshold_preds))
print(auc_score)
#output threshold with highest AUC
df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]
print(df_best_score)
df_best_score.to_csv(threshold_file)
print(f'Writing out {threshold_file}')
return
def predict_model(self,member,patch_map_conversion_indices,
total_map_shape,subset_map_shape,date,patch_radius,forecast_grid_path,#):
lon_grid,lat_grid):
"""
Function that opens a pre-trained convolutional neural net (cnn).
and predicts hail probability forecasts for a single ensemble member.
Args:
Right now only includes severe hail prediction, not sig-severe
"""
##################
# Load in any saved DL model files
##################
#Clear any saved DL graphs
tf.keras.backend.clear_session()
#Load DL model
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
DL_model = tf.keras.models.load_model(model_file,compile=False)
if self.model_type == 'CNN':
#Use minimum prob threshold chosen with validation data
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if not os.path.exists(threshold_file):
print('No thresholds found')
return
prob_thresh = 0 #pd.read_csv(threshold_file).loc[0,'size_threshold']+0.05
print(prob_thresh)
total_count = 0
##################
#Extract forecast data (#hours, #patches, nx, ny, #variables)
##################
forecast_data = self.dldataeng.read_files('forecast',member,date,[None],[None])
if forecast_data is None:
print('No forecast data found')
return
##################
# Standardize hourly data
##################
standard_forecast_data = np.array([self.dldataeng.standardize_data(member,forecast_data[hour])
for hour in np.arange(forecast_data.shape[0])])
del forecast_data
##################
# Produce gridded hourly hail forecast
##################
total_grid = np.empty( (standard_forecast_data.shape[0],
total_map_shape[0]*total_map_shape[1]) )*np.nan
for hour in np.arange(standard_forecast_data.shape[0]):
print(hour)
#Predict probability of severe hail
DL_prediction = np.array(DL_model.predict(standard_forecast_data[hour]))
######
# Will need to fix CNN code to reflect the conversion inds are in
#patches x (patch_radius*patch_radius) instead of (patches*radius*radius)
#####
if self.model_type == 'CNN':
severe_proba_indices = np.where( (cnn_preds[:,2]+cnn_preds[:,3]) >= prob_thresh)[0]
severe_patches = np.zeros(subset_map_shape)
#If no hourly severe hail predicted, continue
if len(severe_proba_indices) <1 : continue
severe_patches[severe_proba_indices] = np.full((patch_radius,patch_radius), 1)
total_grid[hour,map_conversion_inds] = severe_patches.ravel()
print(hour,len(severe_proba_indices),np.nanmax((cnn_preds[:,2]+cnn_preds[:,3])))
total_count += len(severe_proba_indices)
print('Total severe probs:',total_count)
print()
elif 'UNET' in self.model_type:
for patch in np.arange(standard_forecast_data.shape[1]):
patch_indices = patch_map_conversion_indices[patch]
#Gets rid of overlapping edges
overlap_pt = 4
# If unet3+ then the last output tensor is the correct one
if DL_prediction.ndim > 4:
hourly_patch_data = DL_prediction[-1,patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
else:
hourly_patch_data = DL_prediction[patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
total_grid[hour,patch_indices] = hourly_patch_data
del DL_prediction
del standard_forecast_data
output_data=total_grid.reshape((total_grid.shape[0],)+total_map_shape)
date_outpath = forecast_grid_path + f'{date[0][:-5]}/'
#Output gridded forecasts
if not os.path.exists(date_outpath): os.makedirs(date_outpath)
gridded_out_file = date_outpath + f'{member}_{date[0]}_forecast_grid.h5'
print(f'Writing out {gridded_out_file}')
with h5py.File(gridded_out_file, 'w') as hf:
hf.create_dataset("data",data=output_data,
compression='gzip',compression_opts=6)
return
def dice_loss(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.math.sigmoid(y_pred)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)
return 1 - numerator / denominator
'''
From: https://idiotdeveloper.com/unet-segmentation-in-tensorflow/
'''
def down_block(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
p = layers.MaxPooling2D((2,2))(c)
return c, p
def up_block(x, skip, filters, kernel_size=(3, 3)):
up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)
concat = layers.Concatenate()([up, skip])
c = layers.Conv2D(filters, kernel_size, padding='same')(concat)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
def bottleneck(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
| model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
'''
if os.path.exists(model_file):
del trainX,trainY,validX,validY
unet = tf.keras.models.load_model(model_file,compile=False)
print(f'\nOpening {model_file}\n')
#self.validate_UNET(model,validX,validY,threshold_file)
return
'''
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
#print('Validation data shape {0}'.format(np.shape(validX)))
#print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_obj_params = {'input_size':np.shape(trainX[0]),'n_labels':1,
'stack_num_down':2, 'stack_num_up':1, 'activation':'LeakyReLU',
'output_activation':'ReLU', 'batch_norm':False, 'pool':True,
'unpool':False, 'name':f'{self.model_type}'}
if self.model_type == 'UNET':
model_obj_params['filter_num'] = [16, 32, 64, 128]# 256]
unet_model_obj = models.unet_2d
compile_params = {'loss': 'mean_squared_error'}
else:
compile_params = {'loss': ['mean_squared_error',
'mean_squared_error','mean_squared_error',
'mean_squared_error','mean_squared_error'],
'loss_weights':[0.25, 0.25, 0.25, 0.25, 1.0]}
if self.model_type == 'UNET2plus':
plus_model_params = {'filter_num':[16, 32, 64, 128, 256],
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_plus_2d
elif self.model_type == 'UNET3plus':
plus_model_params = {'filter_num_downi':[16, 32, 64, 128, 256],
'filter_num_skip':'auto', 'filter_num_aggregate':'auto',
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_3plus_2d
try: unet_model = unet_model_obj(**model_obj_params)
except:
print(f"{self.model_type} Model type not found.")
return
unet_model.compile(**compile_params,optimizer=tf.keras.optimizers.Adam(lr=1e-4))
print(unet_model.summary())
#Augment data
aug = ImageDataGenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
#Fit UNET
n_epochs = 15
bs = 256
conv_hist = unet_model.fit(
aug.flow(trainX,trainY,batch_size=bs),
steps_per_epoch=len(trainX)/bs,
epochs=n_epochs,verbose=1)
'''
pred_s = trainX[0].reshape(1,input_shape[0],
input_shape[1],input_shape[2])
prediction = unet.predict(pred_s)[0,:,:,:]
print(prediction.shape)
plt.imshow(prediction)
plt.colorbar()
plt.show()
return
'''
#Save trained model
unet_model.save(model_file)
print(f'Writing out {model_file}')
#Clear graphs
tf.keras.backend.clear_session()
#self.validate_UNET(model,validX,validY,threshold_file)
return | identifier_body |
DLModeler.py | from processing.DLDataEngineering import DLDataEngineering
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import numpy as np
import h5py
import os
from scipy.ndimage import gaussian_filter
#Deep learning packages
import tensorflow as tf
#from tensorflow import keras
from tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply
from tensorflow.keras.backend import max
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#from tensorflow import keras
from sklearn.metrics import f1_score,roc_auc_score
import matplotlib.pyplot as plt
import cartopy.feature as cf
import cartopy.crs as ccrs
import cartopy
from keras_unet_collection import models, base, utils
class DLModeler(object):
def __init__(self,model_path,hf_path,num_examples,
class_percentages,predictors,model_args,
model_type):
self.model_path = model_path
self.hf_path = hf_path
self.num_examples = num_examples
self.class_percentages = class_percentages
self.model_args = model_args
self.model_type = model_type
long_predictors = []
#Shorten predictor names
for predictor in predictors:
|
self.predictors = np.array(long_predictors)
#Class to read data and standardize
self.dldataeng = DLDataEngineering(self.model_path,self.hf_path,
self.num_examples,self.class_percentages,self.predictors,
self.model_args)
return
def train_models(self,member,train_dates,valid_dates):
"""
Function that reads and extracts pre-processed 2d member data
from an ensemble to train a convolutional neural net (cnn) or
UNET.
The model data is standardized before being input to the cnn,
with the observation data in the shape (# examples, # classes).
Args:
member (str): ensemble member data that trains a DL model
"""
train_data, train_label = self.dldataeng.extract_training_data(member,
train_dates,self.model_type)
#valid_data, valid_label = self.dldataeng.extract_validation_data(member,valid_dates,self.model_type)
valid_data, valid_label = [],[]
if self.model_type == 'CNN':
onehot_encoder = OneHotEncoder(sparse=False,categories='auto')
encoded_label = onehot_encoder.fit_transform(train_label.reshape(-1, 1))
self.train_CNN(member,train_data,encoded_label,valid_data,valid_label)
elif 'UNET' in self.model_type:
#train_label[train_label >= 50.] = 50.
#log_train_label = np.log((train_label+1.0))
self.train_UNET(member,train_data,train_label,valid_data,valid_label)
return
def train_UNET(self,member,trainX,trainY,validX,validY):
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
'''
if os.path.exists(model_file):
del trainX,trainY,validX,validY
unet = tf.keras.models.load_model(model_file,compile=False)
print(f'\nOpening {model_file}\n')
#self.validate_UNET(model,validX,validY,threshold_file)
return
'''
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
#print('Validation data shape {0}'.format(np.shape(validX)))
#print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_obj_params = {'input_size':np.shape(trainX[0]),'n_labels':1,
'stack_num_down':2, 'stack_num_up':1, 'activation':'LeakyReLU',
'output_activation':'ReLU', 'batch_norm':False, 'pool':True,
'unpool':False, 'name':f'{self.model_type}'}
if self.model_type == 'UNET':
model_obj_params['filter_num'] = [16, 32, 64, 128]# 256]
unet_model_obj = models.unet_2d
compile_params = {'loss': 'mean_squared_error'}
else:
compile_params = {'loss': ['mean_squared_error',
'mean_squared_error','mean_squared_error',
'mean_squared_error','mean_squared_error'],
'loss_weights':[0.25, 0.25, 0.25, 0.25, 1.0]}
if self.model_type == 'UNET2plus':
plus_model_params = {'filter_num':[16, 32, 64, 128, 256],
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_plus_2d
elif self.model_type == 'UNET3plus':
plus_model_params = {'filter_num_downi':[16, 32, 64, 128, 256],
'filter_num_skip':'auto', 'filter_num_aggregate':'auto',
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_3plus_2d
try: unet_model = unet_model_obj(**model_obj_params)
except:
print(f"{self.model_type} Model type not found.")
return
unet_model.compile(**compile_params,optimizer=tf.keras.optimizers.Adam(lr=1e-4))
print(unet_model.summary())
#Augment data
aug = ImageDataGenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
#Fit UNET
n_epochs = 15
bs = 256
conv_hist = unet_model.fit(
aug.flow(trainX,trainY,batch_size=bs),
steps_per_epoch=len(trainX)/bs,
epochs=n_epochs,verbose=1)
'''
pred_s = trainX[0].reshape(1,input_shape[0],
input_shape[1],input_shape[2])
prediction = unet.predict(pred_s)[0,:,:,:]
print(prediction.shape)
plt.imshow(prediction)
plt.colorbar()
plt.show()
return
'''
#Save trained model
unet_model.save(model_file)
print(f'Writing out {model_file}')
#Clear graphs
tf.keras.backend.clear_session()
#self.validate_UNET(model,validX,validY,threshold_file)
return
def train_CNN(self,member,input_data):
"""
Function to train a convolutional neural net (CNN) for random
training data and associated labels.
Args:
member (str): Ensemble member
trainX (tuple): Tuple of (train data, train labels,
validation data, validation labels)
"""
trainX,trainY,validX,validY = input_data
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
print('Validation data shape {0}'.format(np.shape(validX)))
print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'
print(model_file)
if not os.path.exists(model_file):
# Clear graphs
tf.keras.backend.clear_session()
#Initiliaze Convolutional Neural Net (CNN)
model = models.Sequential()
input_shape = np.shape(trainX[0])
#First layer: input shape (y,x,# variables)
#Add noise
model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))
for filters in [32,64,128]:
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.MaxPooling2D())
#Flatten the last convolutional layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.Dense(4,activation='softmax'))
#Compile neural net
model.compile(optimizer='adam',loss='categorical_crossentropy',
metrics=[tf.keras.metrics.AUC()])
print(model.summary())
#fit neural net
n_epochs = 10
bs = 256
#augment data
aug = imagedatagenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
train_generator = aug.flow(trainx,trainy,batch_size=bs)
conv_hist = model.fit(
train_generator,steps_per_epoch=len(trainx) // bs,
epochs=n_epochs,verbose=1,class_weight=self.class_percentages)
#save trained model
model.save(model_file)
print(f'Writing out {model_file}')
else:
model = tf.keras.models.load_model(model_file)
print(f'\nOpening {model_file}\n')
del trainY,trainX
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if os.path.exists(threshold_file):
del validX,validY
return
self.validate_CNN(model,validX,validY,threshold_file)
return
def validate_CNN(self,model,validX,validY,threshold_file):
print()
#Predict on validation data
cnn_preds = model.predict(validX)
sev_hail = cnn_preds[:,2]
sig_hail = cnn_preds[:,3]
#combine the severe hail and sig severe hail classes
sev_prob_preds = sev_hail+sig_hail
print('Max probability',np.nanmax(sev_prob_preds))
#classify labels as severe hail or no hail
true_preds = np.where(validY >= 2, 1, 0)
del validX, validY
df_best_score = pd.DataFrame(np.zeros((1,1)),columns=['Size Threshold'])
#Find threshold with the highest validation AUC score
auc_score = []
thresholds = np.arange(0.1,1.01,0.02)
for t in thresholds:
threshold_preds = np.where(sev_prob_preds >= t,1,0)
auc_score.append(roc_auc_score(true_preds, threshold_preds))
print(auc_score)
#output threshold with highest AUC
df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]
print(df_best_score)
df_best_score.to_csv(threshold_file)
print(f'Writing out {threshold_file}')
return
def predict_model(self,member,patch_map_conversion_indices,
total_map_shape,subset_map_shape,date,patch_radius,forecast_grid_path,#):
lon_grid,lat_grid):
"""
Function that opens a pre-trained convolutional neural net (cnn).
and predicts hail probability forecasts for a single ensemble member.
Args:
Right now only includes severe hail prediction, not sig-severe
"""
##################
# Load in any saved DL model files
##################
#Clear any saved DL graphs
tf.keras.backend.clear_session()
#Load DL model
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
DL_model = tf.keras.models.load_model(model_file,compile=False)
if self.model_type == 'CNN':
#Use minimum prob threshold chosen with validation data
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if not os.path.exists(threshold_file):
print('No thresholds found')
return
prob_thresh = 0 #pd.read_csv(threshold_file).loc[0,'size_threshold']+0.05
print(prob_thresh)
total_count = 0
##################
#Extract forecast data (#hours, #patches, nx, ny, #variables)
##################
forecast_data = self.dldataeng.read_files('forecast',member,date,[None],[None])
if forecast_data is None:
print('No forecast data found')
return
##################
# Standardize hourly data
##################
standard_forecast_data = np.array([self.dldataeng.standardize_data(member,forecast_data[hour])
for hour in np.arange(forecast_data.shape[0])])
del forecast_data
##################
# Produce gridded hourly hail forecast
##################
total_grid = np.empty( (standard_forecast_data.shape[0],
total_map_shape[0]*total_map_shape[1]) )*np.nan
for hour in np.arange(standard_forecast_data.shape[0]):
print(hour)
#Predict probability of severe hail
DL_prediction = np.array(DL_model.predict(standard_forecast_data[hour]))
######
# Will need to fix CNN code to reflect the conversion inds are in
#patches x (patch_radius*patch_radius) instead of (patches*radius*radius)
#####
if self.model_type == 'CNN':
severe_proba_indices = np.where( (cnn_preds[:,2]+cnn_preds[:,3]) >= prob_thresh)[0]
severe_patches = np.zeros(subset_map_shape)
#If no hourly severe hail predicted, continue
if len(severe_proba_indices) <1 : continue
severe_patches[severe_proba_indices] = np.full((patch_radius,patch_radius), 1)
total_grid[hour,map_conversion_inds] = severe_patches.ravel()
print(hour,len(severe_proba_indices),np.nanmax((cnn_preds[:,2]+cnn_preds[:,3])))
total_count += len(severe_proba_indices)
print('Total severe probs:',total_count)
print()
elif 'UNET' in self.model_type:
for patch in np.arange(standard_forecast_data.shape[1]):
patch_indices = patch_map_conversion_indices[patch]
#Gets rid of overlapping edges
overlap_pt = 4
# If unet3+ then the last output tensor is the correct one
if DL_prediction.ndim > 4:
hourly_patch_data = DL_prediction[-1,patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
else:
hourly_patch_data = DL_prediction[patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
total_grid[hour,patch_indices] = hourly_patch_data
del DL_prediction
del standard_forecast_data
output_data=total_grid.reshape((total_grid.shape[0],)+total_map_shape)
date_outpath = forecast_grid_path + f'{date[0][:-5]}/'
#Output gridded forecasts
if not os.path.exists(date_outpath): os.makedirs(date_outpath)
gridded_out_file = date_outpath + f'{member}_{date[0]}_forecast_grid.h5'
print(f'Writing out {gridded_out_file}')
with h5py.File(gridded_out_file, 'w') as hf:
hf.create_dataset("data",data=output_data,
compression='gzip',compression_opts=6)
return
def dice_loss(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.math.sigmoid(y_pred)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)
return 1 - numerator / denominator
'''
From: https://idiotdeveloper.com/unet-segmentation-in-tensorflow/
'''
def down_block(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
p = layers.MaxPooling2D((2,2))(c)
return c, p
def up_block(x, skip, filters, kernel_size=(3, 3)):
up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)
concat = layers.Concatenate()([up, skip])
c = layers.Conv2D(filters, kernel_size, padding='same')(concat)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
def bottleneck(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
| if "_" in predictor:
predictor_name = predictor.split('_')[0].upper() + predictor.split('_')[-1]
elif " " in predictor:
predictor_name = ''.join([v[0].upper() for v in predictor.split()])
else: predictor_name = predictor
long_predictors.append(predictor_name) | conditional_block |
DLModeler.py | from processing.DLDataEngineering import DLDataEngineering
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import numpy as np
import h5py
import os
from scipy.ndimage import gaussian_filter
#Deep learning packages
import tensorflow as tf
#from tensorflow import keras
from tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply
from tensorflow.keras.backend import max
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#from tensorflow import keras
from sklearn.metrics import f1_score,roc_auc_score
import matplotlib.pyplot as plt
import cartopy.feature as cf
import cartopy.crs as ccrs
import cartopy
from keras_unet_collection import models, base, utils
class DLModeler(object):
def __init__(self,model_path,hf_path,num_examples,
class_percentages,predictors,model_args,
model_type):
self.model_path = model_path
self.hf_path = hf_path
self.num_examples = num_examples
self.class_percentages = class_percentages
self.model_args = model_args
self.model_type = model_type
long_predictors = []
#Shorten predictor names
for predictor in predictors:
if "_" in predictor:
predictor_name = predictor.split('_')[0].upper() + predictor.split('_')[-1]
elif " " in predictor:
predictor_name = ''.join([v[0].upper() for v in predictor.split()])
else: predictor_name = predictor
long_predictors.append(predictor_name)
self.predictors = np.array(long_predictors)
#Class to read data and standardize
self.dldataeng = DLDataEngineering(self.model_path,self.hf_path,
self.num_examples,self.class_percentages,self.predictors,
self.model_args)
return
def train_models(self,member,train_dates,valid_dates):
"""
Function that reads and extracts pre-processed 2d member data
from an ensemble to train a convolutional neural net (cnn) or
UNET.
The model data is standardized before being input to the cnn,
with the observation data in the shape (# examples, # classes).
Args:
member (str): ensemble member data that trains a DL model
"""
train_data, train_label = self.dldataeng.extract_training_data(member,
train_dates,self.model_type)
#valid_data, valid_label = self.dldataeng.extract_validation_data(member,valid_dates,self.model_type)
valid_data, valid_label = [],[]
if self.model_type == 'CNN':
onehot_encoder = OneHotEncoder(sparse=False,categories='auto')
encoded_label = onehot_encoder.fit_transform(train_label.reshape(-1, 1))
self.train_CNN(member,train_data,encoded_label,valid_data,valid_label)
elif 'UNET' in self.model_type:
#train_label[train_label >= 50.] = 50.
#log_train_label = np.log((train_label+1.0))
self.train_UNET(member,train_data,train_label,valid_data,valid_label)
return
def train_UNET(self,member,trainX,trainY,validX,validY):
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
'''
if os.path.exists(model_file):
del trainX,trainY,validX,validY
unet = tf.keras.models.load_model(model_file,compile=False)
print(f'\nOpening {model_file}\n')
#self.validate_UNET(model,validX,validY,threshold_file)
return
'''
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
#print('Validation data shape {0}'.format(np.shape(validX)))
#print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_obj_params = {'input_size':np.shape(trainX[0]),'n_labels':1,
'stack_num_down':2, 'stack_num_up':1, 'activation':'LeakyReLU',
'output_activation':'ReLU', 'batch_norm':False, 'pool':True,
'unpool':False, 'name':f'{self.model_type}'}
if self.model_type == 'UNET':
model_obj_params['filter_num'] = [16, 32, 64, 128]# 256]
unet_model_obj = models.unet_2d
compile_params = {'loss': 'mean_squared_error'}
else:
compile_params = {'loss': ['mean_squared_error',
'mean_squared_error','mean_squared_error',
'mean_squared_error','mean_squared_error'],
'loss_weights':[0.25, 0.25, 0.25, 0.25, 1.0]}
if self.model_type == 'UNET2plus':
plus_model_params = {'filter_num':[16, 32, 64, 128, 256],
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_plus_2d
elif self.model_type == 'UNET3plus':
plus_model_params = {'filter_num_downi':[16, 32, 64, 128, 256],
'filter_num_skip':'auto', 'filter_num_aggregate':'auto',
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_3plus_2d
try: unet_model = unet_model_obj(**model_obj_params)
except:
print(f"{self.model_type} Model type not found.")
return
unet_model.compile(**compile_params,optimizer=tf.keras.optimizers.Adam(lr=1e-4))
print(unet_model.summary())
#Augment data
aug = ImageDataGenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
#Fit UNET
n_epochs = 15
bs = 256
conv_hist = unet_model.fit(
aug.flow(trainX,trainY,batch_size=bs),
steps_per_epoch=len(trainX)/bs,
epochs=n_epochs,verbose=1)
'''
pred_s = trainX[0].reshape(1,input_shape[0],
input_shape[1],input_shape[2])
prediction = unet.predict(pred_s)[0,:,:,:]
print(prediction.shape)
plt.imshow(prediction)
plt.colorbar()
plt.show()
return
'''
#Save trained model
unet_model.save(model_file)
print(f'Writing out {model_file}')
#Clear graphs
tf.keras.backend.clear_session()
#self.validate_UNET(model,validX,validY,threshold_file)
return
def train_CNN(self,member,input_data):
"""
Function to train a convolutional neural net (CNN) for random
training data and associated labels.
Args:
member (str): Ensemble member
trainX (tuple): Tuple of (train data, train labels,
validation data, validation labels)
"""
trainX,trainY,validX,validY = input_data
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
print('Validation data shape {0}'.format(np.shape(validX)))
print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'
print(model_file)
if not os.path.exists(model_file):
# Clear graphs
tf.keras.backend.clear_session()
#Initiliaze Convolutional Neural Net (CNN)
model = models.Sequential()
input_shape = np.shape(trainX[0])
#First layer: input shape (y,x,# variables)
#Add noise
model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))
for filters in [32,64,128]:
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.MaxPooling2D())
#Flatten the last convolutional layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.Dense(4,activation='softmax'))
#Compile neural net
model.compile(optimizer='adam',loss='categorical_crossentropy',
metrics=[tf.keras.metrics.AUC()])
print(model.summary())
#fit neural net
n_epochs = 10
bs = 256
#augment data
aug = imagedatagenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
train_generator = aug.flow(trainx,trainy,batch_size=bs)
conv_hist = model.fit(
train_generator,steps_per_epoch=len(trainx) // bs,
epochs=n_epochs,verbose=1,class_weight=self.class_percentages)
#save trained model
model.save(model_file)
print(f'Writing out {model_file}')
else:
model = tf.keras.models.load_model(model_file)
print(f'\nOpening {model_file}\n')
del trainY,trainX
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if os.path.exists(threshold_file):
del validX,validY
return
self.validate_CNN(model,validX,validY,threshold_file)
return
def validate_CNN(self,model,validX,validY,threshold_file):
print()
#Predict on validation data
cnn_preds = model.predict(validX)
sev_hail = cnn_preds[:,2]
sig_hail = cnn_preds[:,3]
#combine the severe hail and sig severe hail classes
sev_prob_preds = sev_hail+sig_hail
print('Max probability',np.nanmax(sev_prob_preds))
#classify labels as severe hail or no hail
true_preds = np.where(validY >= 2, 1, 0)
del validX, validY
df_best_score = pd.DataFrame(np.zeros((1,1)),columns=['Size Threshold'])
#Find threshold with the highest validation AUC score
auc_score = []
thresholds = np.arange(0.1,1.01,0.02)
for t in thresholds:
threshold_preds = np.where(sev_prob_preds >= t,1,0)
auc_score.append(roc_auc_score(true_preds, threshold_preds))
print(auc_score)
#output threshold with highest AUC
df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]
print(df_best_score)
df_best_score.to_csv(threshold_file)
print(f'Writing out {threshold_file}')
return
def predict_model(self,member,patch_map_conversion_indices,
total_map_shape,subset_map_shape,date,patch_radius,forecast_grid_path,#):
lon_grid,lat_grid):
"""
Function that opens a pre-trained convolutional neural net (cnn).
and predicts hail probability forecasts for a single ensemble member.
Args:
Right now only includes severe hail prediction, not sig-severe
"""
##################
# Load in any saved DL model files
##################
#Clear any saved DL graphs
tf.keras.backend.clear_session()
#Load DL model
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
DL_model = tf.keras.models.load_model(model_file,compile=False)
if self.model_type == 'CNN':
#Use minimum prob threshold chosen with validation data
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if not os.path.exists(threshold_file):
print('No thresholds found') | total_count = 0
##################
#Extract forecast data (#hours, #patches, nx, ny, #variables)
##################
forecast_data = self.dldataeng.read_files('forecast',member,date,[None],[None])
if forecast_data is None:
print('No forecast data found')
return
##################
# Standardize hourly data
##################
standard_forecast_data = np.array([self.dldataeng.standardize_data(member,forecast_data[hour])
for hour in np.arange(forecast_data.shape[0])])
del forecast_data
##################
# Produce gridded hourly hail forecast
##################
total_grid = np.empty( (standard_forecast_data.shape[0],
total_map_shape[0]*total_map_shape[1]) )*np.nan
for hour in np.arange(standard_forecast_data.shape[0]):
print(hour)
#Predict probability of severe hail
DL_prediction = np.array(DL_model.predict(standard_forecast_data[hour]))
######
# Will need to fix CNN code to reflect the conversion inds are in
#patches x (patch_radius*patch_radius) instead of (patches*radius*radius)
#####
if self.model_type == 'CNN':
severe_proba_indices = np.where( (cnn_preds[:,2]+cnn_preds[:,3]) >= prob_thresh)[0]
severe_patches = np.zeros(subset_map_shape)
#If no hourly severe hail predicted, continue
if len(severe_proba_indices) <1 : continue
severe_patches[severe_proba_indices] = np.full((patch_radius,patch_radius), 1)
total_grid[hour,map_conversion_inds] = severe_patches.ravel()
print(hour,len(severe_proba_indices),np.nanmax((cnn_preds[:,2]+cnn_preds[:,3])))
total_count += len(severe_proba_indices)
print('Total severe probs:',total_count)
print()
elif 'UNET' in self.model_type:
for patch in np.arange(standard_forecast_data.shape[1]):
patch_indices = patch_map_conversion_indices[patch]
#Gets rid of overlapping edges
overlap_pt = 4
# If unet3+ then the last output tensor is the correct one
if DL_prediction.ndim > 4:
hourly_patch_data = DL_prediction[-1,patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
else:
hourly_patch_data = DL_prediction[patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
total_grid[hour,patch_indices] = hourly_patch_data
del DL_prediction
del standard_forecast_data
output_data=total_grid.reshape((total_grid.shape[0],)+total_map_shape)
date_outpath = forecast_grid_path + f'{date[0][:-5]}/'
#Output gridded forecasts
if not os.path.exists(date_outpath): os.makedirs(date_outpath)
gridded_out_file = date_outpath + f'{member}_{date[0]}_forecast_grid.h5'
print(f'Writing out {gridded_out_file}')
with h5py.File(gridded_out_file, 'w') as hf:
hf.create_dataset("data",data=output_data,
compression='gzip',compression_opts=6)
return
def dice_loss(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.math.sigmoid(y_pred)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)
return 1 - numerator / denominator
'''
From: https://idiotdeveloper.com/unet-segmentation-in-tensorflow/
'''
def down_block(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
p = layers.MaxPooling2D((2,2))(c)
return c, p
def up_block(x, skip, filters, kernel_size=(3, 3)):
up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)
concat = layers.Concatenate()([up, skip])
c = layers.Conv2D(filters, kernel_size, padding='same')(concat)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
def bottleneck(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c | return
prob_thresh = 0 #pd.read_csv(threshold_file).loc[0,'size_threshold']+0.05
print(prob_thresh) | random_line_split |
nodes.ts | import { CachedTransform, IsNaN, emptyArray, ToJSON, AsObj } from 'js-vextensions';
import { GetData, SplitStringBySlash_Cached, SlicePath, GetDataAsync, CachedTransform_WithStore } from 'Utils/FrameworkOverrides';
import { PathSegmentToNodeID } from 'Store/main/mapViews';
import { GetNodeL2, GetNodeL3 } from './nodes/$node';
import { MapNode, MapNodeL2, MapNodeL3, globalRootNodeID } from './nodes/@MapNode';
import { MapNodeType, MapNodeType_Info } from './nodes/@MapNodeType';
import { IsUserCreatorOrMod, CanGetBasicPermissions, HasAdminPermissions } from './userExtras';
import { PermissionGroupSet } from './userExtras/@UserExtraInfo';
import { GetUserAccessLevel, MeID } from './users';
export enum HolderType {
Truth = 10,
Relevance = 20,
}
export type NodeMap = {[key: string]: MapNode};
export function GetNodeMap(): NodeMap {
return GetData('nodes');
}
export function GetNodes(): MapNode[] {
const nodeMap = GetNodeMap();
return CachedTransform('GetNodes', [], nodeMap, () => (nodeMap ? nodeMap.VValues(true) : []));
}
export function GetNodesL2(): MapNodeL2[] {
const nodes = GetNodes();
return CachedTransform('GetNodes', [], nodes, () => nodes.map(a => GetNodeL2(a)));
}
/* export function GetNodes_Enhanced(): MapNode[] {
let nodeMap = GetNodeMap();
return CachedTransform("GetNodes_Enhanced", [], nodeMap, ()=>nodeMap ? nodeMap.VValues(true) : []);
} */
export function GetNode(id: string) {
// Assert(id != null && !IsNaN(id), "Node-id cannot be null or NaN.");
if (id == null || IsNaN(id)) return null;
return GetData('nodes', id) as MapNode;
}
/* export async function GetNodeAsync(id: string) {
return await GetDataAsync("nodes", id) as MapNode;
} */
export function GetParentCount(node: MapNode) {
return (node.parents || {}).VKeys(true).length;
}
export function GetChildCount(node: MapNode) {
return (node.children || {}).VKeys(true).length;
}
export function IsRootNode(node: MapNode) {
if (IsNodeSubnode(node)) return false;
return node.type == MapNodeType.Category && GetParentCount(node) == 0;
}
export function IsNodeSubnode(node: MapNode) {
return node.layerPlusAnchorParents != null;
}
export function GetParentPath(childPath: string) {
return SplitStringBySlash_Cached(childPath).slice(0, -1).join('/');
}
export function GetParentNodeID(path: string) {
const pathNodes = SplitStringBySlash_Cached(path);
if (pathNodes.Last()[0] == '*') return null;
const parentNodeStr = pathNodes.XFromLast(1);
return parentNodeStr ? PathSegmentToNodeID(parentNodeStr) : null;
}
export function GetParentNode(childPath: string) {
return GetNode(GetParentNodeID(childPath));
}
export function GetParentNodeL2(childPath: string) {
return GetNodeL2(GetParentNodeID(childPath));
}
export function GetParentNodeL3(childPath: string) {
return GetNodeL3(GetParentPath(childPath));
}
export function GetNodeID(path: string) {
const ownNodeStr = SplitStringBySlash_Cached(path).LastOrX();
return ownNodeStr ? PathSegmentToNodeID(ownNodeStr) : null;
}
export function GetNodeParents(node: MapNode) {
const parents = (node.parents || {}).VKeys(true).map(id => GetNode(id));
return CachedTransform('GetNodeParents', [node._key], parents, () => parents);
}
export async function GetNodeParentsAsync(node: MapNode) {
return await Promise.all(node.parents.VKeys(true).map(parentID => GetDataAsync('nodes', parentID))) as MapNode[];
}
export function GetNodeParentsL2(node: MapNode) {
const parentsL2 = GetNodeParents(node).map(parent => (parent ? GetNodeL2(parent) : null));
return CachedTransform('GetNodeParentsL2', [], parentsL2, () => parentsL2);
}
export function GetNodeParentsL3(node: MapNode, path: string) {
const parentsL3 = GetNodeParents(node).map(parent => (parent ? GetNodeL3(SlicePath(path, 1)) : null));
return CachedTransform('GetNodeParentsL3', [path], parentsL3, () => parentsL3);
}
/* export function GetNodeChildIDs(nodeID: string) {
let node = GetNode(nodeID);
// any time the childIDs changes, we know the node object changes as well; so just cache childIDs on node
if (node["@childIDs"] == null)
node.VSet("@childIDs", (node.children || {}).VKeys(true).map(id=>parseInt(id)), {prop: {}});
return node["@childIDs"];
} */
export function GetNodeChildren(node: MapNode) {
// special case, for demo map
if (node.children && node.children[0] instanceof MapNode) {
return node.children as any as MapNode[];
}
const children = (node.children || {}).VKeys(true).map(id => GetNode(id));
return CachedTransform('GetNodeChildren', [node._key], children, () => children);
}
export async function GetNodeChildrenAsync(node: MapNode) {
return await Promise.all(node.children.VKeys(true).map(id => GetDataAsync('nodes', id))) as MapNode[];
}
export function GetNodeChildrenL2(node: MapNode) {
const nodeChildren = GetNodeChildren(node);
const nodeChildrenL2 = nodeChildren.map(child => (child ? GetNodeL2(child) : null));
return CachedTransform('GetNodeChildrenL2', [], nodeChildrenL2, () => nodeChildrenL2);
}
export function GetNodeChildrenL3(node: MapNode, path?: string, filterForPath = false): MapNodeL3[] {
if (node == null) return emptyArray;
return CachedTransform_WithStore('GetNodeChildrenL3', [node._key, path, filterForPath], node.children, () => {
path = path || `${node._key}`;
const nodeChildrenL2 = GetNodeChildrenL2(node);
let nodeChildrenL3 = nodeChildrenL2.map(child => (child ? GetNodeL3(`${path}/${child._key}`) : null));
if (filterForPath) {
nodeChildrenL3 = nodeChildrenL3.filter((child) => {
// if null, keep (so receiver knows there's an entry here, but it's still loading)
if (child == null) return true;
// filter out any nodes whose access-level is higher than our own
if (child.current.accessLevel > GetUserAccessLevel(MeID())) return false;
// hide nodes that don't have the required premise-count
// if (!IsNodeVisibleToNonModNonCreators(child, GetNodeChildren(child)) && !IsUserCreatorOrMod(MeID(), child)) return false;
return true;
});
}
return nodeChildrenL3;
});
}
export function GetHolderType(childType: MapNodeType, parentType: MapNodeType) {
if (parentType == MapNodeType.Argument) {
if (childType == MapNodeType.Argument) return HolderType.Relevance;
} else if (parentType == MapNodeType.Claim) {
if (childType == MapNodeType.Argument) return HolderType.Truth;
}
return null;
}
export function ForLink_GetError(parentType: MapNodeType, childType: MapNodeType) {
const parentTypeInfo = MapNodeType_Info.for[parentType].childTypes;
if (!parentTypeInfo.Contains(childType)) return `The child's type (${MapNodeType[childType]}) is not valid for the parent's type (${MapNodeType[parentType]}).`;
}
export function ForNewLink_GetError(parentID: string, newChild: Pick<MapNode, '_key' | 'type'>, permissions: PermissionGroupSet, newHolderType?: HolderType) {
if (!CanGetBasicPermissions(permissions)) return "You're not signed in, or lack basic permissions.";
const parent = GetNode(parentID);
if (parent == null) return 'Parent data not found.';
// const parentPathIDs = SplitStringBySlash_Cached(parentPath).map(a => a.ToInt());
// if (map.name == "Global" && parentPathIDs.length == 1) return false; // if parent is l1(root), don't accept new children
if (parent._key == globalRootNodeID && !HasAdminPermissions(permissions)) return 'Only admins can add children to the global-root.';
// if in global map, parent is l2, and user is not a mod (and not node creator), don't accept new children
// if (parentPathIDs[0] == globalRootNodeID && parentPathIDs.length == 2 && !HasModPermissions(permissions) && parent.creator != MeID()) return false;
if (parent._key == newChild._key) return 'Cannot link node as its own child.';
const isAlreadyChild = (parent.children || {}).VKeys(true).Contains(`${newChild._key}`);
// if new-holder-type is not specified, consider "any" and so don't check
if (newHolderType !== undefined) {
const currentHolderType = GetHolderType(newChild.type, parent.type);
if (isAlreadyChild && currentHolderType == newHolderType) return false; // if already a child of this parent, reject (unless it's a claim, in which case allow, as can be)
}
return ForLink_GetError(parent.type, newChild.type);
}
export function ForUnlink_GetError(userID: string, node: MapNodeL2, asPartOfCut = false) {
const baseText = `Cannot unlink node #${node._key}, since `;
if (!IsUserCreatorOrMod(userID, node)) return `${baseText}you are not its owner. (or a mod)`; | }
export function ForDelete_GetError(userID: string, node: MapNodeL2, subcommandInfo?: {asPartOfMapDelete?: boolean, childrenToIgnore?: string[]}) {
const baseText = `Cannot delete node #${node._key}, since `;
if (!IsUserCreatorOrMod(userID, node)) return `${baseText}you are not the owner of this node. (or a mod)`;
if (GetParentCount(node) > 1) return `${baseText}it has more than one parent. Try unlinking it instead.`;
if (IsRootNode(node) && !AsObj(subcommandInfo).asPartOfMapDelete) return `${baseText}it's the root-node of a map.`;
const nodeChildren = GetNodeChildrenL2(node);
if (nodeChildren.Any(a => a == null)) return '[still loading children...]';
if (nodeChildren.map(a => a._key).Except(AsObj(subcommandInfo).childrenToIgnore || []).length) {
return `Cannot delete this node (#${node._key}) until all its children have been unlinked or deleted.`;
}
return null;
}
export function ForCut_GetError(userID: string, node: MapNodeL2) {
return ForUnlink_GetError(userID, node, true);
}
export function ForCopy_GetError(userID: string, node: MapNode) {
if (!CanGetBasicPermissions(userID)) return "You're not signed in, or lack basic permissions.";
if (IsRootNode(node)) return 'Cannot copy the root-node of a map.';
if (IsNodeSubnode(node)) return 'Cannot copy a subnode.';
return null;
}
/* export function GetUnlinkErrorMessage(parent: MapNode, child: MapNode) {
//let childNodes = node.children.Select(a=>nodes[a]);
let parentNodes = nodes.filter(a=>a.children && a.children[node._id]);
if (parentNodes.length <= 1)
} */ | if (!asPartOfCut && (node.parents || {}).VKeys(true).length <= 1) return `${baseText}doing so would orphan it. Try deleting it instead.`;
if (IsRootNode(node)) return `${baseText}it's the root-node of a map.`;
if (IsNodeSubnode(node)) return `${baseText}it's a subnode. Try deleting it instead.`;
return null; | random_line_split |
nodes.ts | import { CachedTransform, IsNaN, emptyArray, ToJSON, AsObj } from 'js-vextensions';
import { GetData, SplitStringBySlash_Cached, SlicePath, GetDataAsync, CachedTransform_WithStore } from 'Utils/FrameworkOverrides';
import { PathSegmentToNodeID } from 'Store/main/mapViews';
import { GetNodeL2, GetNodeL3 } from './nodes/$node';
import { MapNode, MapNodeL2, MapNodeL3, globalRootNodeID } from './nodes/@MapNode';
import { MapNodeType, MapNodeType_Info } from './nodes/@MapNodeType';
import { IsUserCreatorOrMod, CanGetBasicPermissions, HasAdminPermissions } from './userExtras';
import { PermissionGroupSet } from './userExtras/@UserExtraInfo';
import { GetUserAccessLevel, MeID } from './users';
export enum HolderType {
Truth = 10,
Relevance = 20,
}
export type NodeMap = {[key: string]: MapNode};
export function GetNodeMap(): NodeMap {
return GetData('nodes');
}
export function GetNodes(): MapNode[] {
const nodeMap = GetNodeMap();
return CachedTransform('GetNodes', [], nodeMap, () => (nodeMap ? nodeMap.VValues(true) : []));
}
export function GetNodesL2(): MapNodeL2[] {
const nodes = GetNodes();
return CachedTransform('GetNodes', [], nodes, () => nodes.map(a => GetNodeL2(a)));
}
/* export function GetNodes_Enhanced(): MapNode[] {
let nodeMap = GetNodeMap();
return CachedTransform("GetNodes_Enhanced", [], nodeMap, ()=>nodeMap ? nodeMap.VValues(true) : []);
} */
export function GetNode(id: string) {
// Assert(id != null && !IsNaN(id), "Node-id cannot be null or NaN.");
if (id == null || IsNaN(id)) return null;
return GetData('nodes', id) as MapNode;
}
/* export async function GetNodeAsync(id: string) {
return await GetDataAsync("nodes", id) as MapNode;
} */
export function GetParentCount(node: MapNode) {
return (node.parents || {}).VKeys(true).length;
}
export function GetChildCount(node: MapNode) {
return (node.children || {}).VKeys(true).length;
}
export function IsRootNode(node: MapNode) {
if (IsNodeSubnode(node)) return false;
return node.type == MapNodeType.Category && GetParentCount(node) == 0;
}
export function IsNodeSubnode(node: MapNode) {
return node.layerPlusAnchorParents != null;
}
export function GetParentPath(childPath: string) {
return SplitStringBySlash_Cached(childPath).slice(0, -1).join('/');
}
export function GetParentNodeID(path: string) {
const pathNodes = SplitStringBySlash_Cached(path);
if (pathNodes.Last()[0] == '*') return null;
const parentNodeStr = pathNodes.XFromLast(1);
return parentNodeStr ? PathSegmentToNodeID(parentNodeStr) : null;
}
export function GetParentNode(childPath: string) {
return GetNode(GetParentNodeID(childPath));
}
export function GetParentNodeL2(childPath: string) {
return GetNodeL2(GetParentNodeID(childPath));
}
export function GetParentNodeL3(childPath: string) {
return GetNodeL3(GetParentPath(childPath));
}
export function GetNodeID(path: string) {
const ownNodeStr = SplitStringBySlash_Cached(path).LastOrX();
return ownNodeStr ? PathSegmentToNodeID(ownNodeStr) : null;
}
export function GetNodeParents(node: MapNode) {
const parents = (node.parents || {}).VKeys(true).map(id => GetNode(id));
return CachedTransform('GetNodeParents', [node._key], parents, () => parents);
}
export async function GetNodeParentsAsync(node: MapNode) {
return await Promise.all(node.parents.VKeys(true).map(parentID => GetDataAsync('nodes', parentID))) as MapNode[];
}
export function GetNodeParentsL2(node: MapNode) {
const parentsL2 = GetNodeParents(node).map(parent => (parent ? GetNodeL2(parent) : null));
return CachedTransform('GetNodeParentsL2', [], parentsL2, () => parentsL2);
}
export function GetNodeParentsL3(node: MapNode, path: string) {
const parentsL3 = GetNodeParents(node).map(parent => (parent ? GetNodeL3(SlicePath(path, 1)) : null));
return CachedTransform('GetNodeParentsL3', [path], parentsL3, () => parentsL3);
}
/* export function GetNodeChildIDs(nodeID: string) {
let node = GetNode(nodeID);
// any time the childIDs changes, we know the node object changes as well; so just cache childIDs on node
if (node["@childIDs"] == null)
node.VSet("@childIDs", (node.children || {}).VKeys(true).map(id=>parseInt(id)), {prop: {}});
return node["@childIDs"];
} */
export function GetNodeChildren(node: MapNode) {
// special case, for demo map
if (node.children && node.children[0] instanceof MapNode) {
return node.children as any as MapNode[];
}
const children = (node.children || {}).VKeys(true).map(id => GetNode(id));
return CachedTransform('GetNodeChildren', [node._key], children, () => children);
}
export async function GetNodeChildrenAsync(node: MapNode) {
return await Promise.all(node.children.VKeys(true).map(id => GetDataAsync('nodes', id))) as MapNode[];
}
export function GetNodeChildrenL2(node: MapNode) {
const nodeChildren = GetNodeChildren(node);
const nodeChildrenL2 = nodeChildren.map(child => (child ? GetNodeL2(child) : null));
return CachedTransform('GetNodeChildrenL2', [], nodeChildrenL2, () => nodeChildrenL2);
}
export function GetNodeChildrenL3(node: MapNode, path?: string, filterForPath = false): MapNodeL3[] {
if (node == null) return emptyArray;
return CachedTransform_WithStore('GetNodeChildrenL3', [node._key, path, filterForPath], node.children, () => {
path = path || `${node._key}`;
const nodeChildrenL2 = GetNodeChildrenL2(node);
let nodeChildrenL3 = nodeChildrenL2.map(child => (child ? GetNodeL3(`${path}/${child._key}`) : null));
if (filterForPath) {
nodeChildrenL3 = nodeChildrenL3.filter((child) => {
// if null, keep (so receiver knows there's an entry here, but it's still loading)
if (child == null) return true;
// filter out any nodes whose access-level is higher than our own
if (child.current.accessLevel > GetUserAccessLevel(MeID())) return false;
// hide nodes that don't have the required premise-count
// if (!IsNodeVisibleToNonModNonCreators(child, GetNodeChildren(child)) && !IsUserCreatorOrMod(MeID(), child)) return false;
return true;
});
}
return nodeChildrenL3;
});
}
export function GetHolderType(childType: MapNodeType, parentType: MapNodeType) {
if (parentType == MapNodeType.Argument) {
if (childType == MapNodeType.Argument) return HolderType.Relevance;
} else if (parentType == MapNodeType.Claim) {
if (childType == MapNodeType.Argument) return HolderType.Truth;
}
return null;
}
export function ForLink_GetError(parentType: MapNodeType, childType: MapNodeType) {
const parentTypeInfo = MapNodeType_Info.for[parentType].childTypes;
if (!parentTypeInfo.Contains(childType)) return `The child's type (${MapNodeType[childType]}) is not valid for the parent's type (${MapNodeType[parentType]}).`;
}
export function | (parentID: string, newChild: Pick<MapNode, '_key' | 'type'>, permissions: PermissionGroupSet, newHolderType?: HolderType) {
if (!CanGetBasicPermissions(permissions)) return "You're not signed in, or lack basic permissions.";
const parent = GetNode(parentID);
if (parent == null) return 'Parent data not found.';
// const parentPathIDs = SplitStringBySlash_Cached(parentPath).map(a => a.ToInt());
// if (map.name == "Global" && parentPathIDs.length == 1) return false; // if parent is l1(root), don't accept new children
if (parent._key == globalRootNodeID && !HasAdminPermissions(permissions)) return 'Only admins can add children to the global-root.';
// if in global map, parent is l2, and user is not a mod (and not node creator), don't accept new children
// if (parentPathIDs[0] == globalRootNodeID && parentPathIDs.length == 2 && !HasModPermissions(permissions) && parent.creator != MeID()) return false;
if (parent._key == newChild._key) return 'Cannot link node as its own child.';
const isAlreadyChild = (parent.children || {}).VKeys(true).Contains(`${newChild._key}`);
// if new-holder-type is not specified, consider "any" and so don't check
if (newHolderType !== undefined) {
const currentHolderType = GetHolderType(newChild.type, parent.type);
if (isAlreadyChild && currentHolderType == newHolderType) return false; // if already a child of this parent, reject (unless it's a claim, in which case allow, as can be)
}
return ForLink_GetError(parent.type, newChild.type);
}
export function ForUnlink_GetError(userID: string, node: MapNodeL2, asPartOfCut = false) {
const baseText = `Cannot unlink node #${node._key}, since `;
if (!IsUserCreatorOrMod(userID, node)) return `${baseText}you are not its owner. (or a mod)`;
if (!asPartOfCut && (node.parents || {}).VKeys(true).length <= 1) return `${baseText}doing so would orphan it. Try deleting it instead.`;
if (IsRootNode(node)) return `${baseText}it's the root-node of a map.`;
if (IsNodeSubnode(node)) return `${baseText}it's a subnode. Try deleting it instead.`;
return null;
}
export function ForDelete_GetError(userID: string, node: MapNodeL2, subcommandInfo?: {asPartOfMapDelete?: boolean, childrenToIgnore?: string[]}) {
const baseText = `Cannot delete node #${node._key}, since `;
if (!IsUserCreatorOrMod(userID, node)) return `${baseText}you are not the owner of this node. (or a mod)`;
if (GetParentCount(node) > 1) return `${baseText}it has more than one parent. Try unlinking it instead.`;
if (IsRootNode(node) && !AsObj(subcommandInfo).asPartOfMapDelete) return `${baseText}it's the root-node of a map.`;
const nodeChildren = GetNodeChildrenL2(node);
if (nodeChildren.Any(a => a == null)) return '[still loading children...]';
if (nodeChildren.map(a => a._key).Except(AsObj(subcommandInfo).childrenToIgnore || []).length) {
return `Cannot delete this node (#${node._key}) until all its children have been unlinked or deleted.`;
}
return null;
}
export function ForCut_GetError(userID: string, node: MapNodeL2) {
return ForUnlink_GetError(userID, node, true);
}
export function ForCopy_GetError(userID: string, node: MapNode) {
if (!CanGetBasicPermissions(userID)) return "You're not signed in, or lack basic permissions.";
if (IsRootNode(node)) return 'Cannot copy the root-node of a map.';
if (IsNodeSubnode(node)) return 'Cannot copy a subnode.';
return null;
}
/* export function GetUnlinkErrorMessage(parent: MapNode, child: MapNode) {
//let childNodes = node.children.Select(a=>nodes[a]);
let parentNodes = nodes.filter(a=>a.children && a.children[node._id]);
if (parentNodes.length <= 1)
} */
| ForNewLink_GetError | identifier_name |
nodes.ts | import { CachedTransform, IsNaN, emptyArray, ToJSON, AsObj } from 'js-vextensions';
import { GetData, SplitStringBySlash_Cached, SlicePath, GetDataAsync, CachedTransform_WithStore } from 'Utils/FrameworkOverrides';
import { PathSegmentToNodeID } from 'Store/main/mapViews';
import { GetNodeL2, GetNodeL3 } from './nodes/$node';
import { MapNode, MapNodeL2, MapNodeL3, globalRootNodeID } from './nodes/@MapNode';
import { MapNodeType, MapNodeType_Info } from './nodes/@MapNodeType';
import { IsUserCreatorOrMod, CanGetBasicPermissions, HasAdminPermissions } from './userExtras';
import { PermissionGroupSet } from './userExtras/@UserExtraInfo';
import { GetUserAccessLevel, MeID } from './users';
export enum HolderType {
Truth = 10,
Relevance = 20,
}
export type NodeMap = {[key: string]: MapNode};
export function GetNodeMap(): NodeMap |
export function GetNodes(): MapNode[] {
const nodeMap = GetNodeMap();
return CachedTransform('GetNodes', [], nodeMap, () => (nodeMap ? nodeMap.VValues(true) : []));
}
export function GetNodesL2(): MapNodeL2[] {
const nodes = GetNodes();
return CachedTransform('GetNodes', [], nodes, () => nodes.map(a => GetNodeL2(a)));
}
/* export function GetNodes_Enhanced(): MapNode[] {
let nodeMap = GetNodeMap();
return CachedTransform("GetNodes_Enhanced", [], nodeMap, ()=>nodeMap ? nodeMap.VValues(true) : []);
} */
export function GetNode(id: string) {
// Assert(id != null && !IsNaN(id), "Node-id cannot be null or NaN.");
if (id == null || IsNaN(id)) return null;
return GetData('nodes', id) as MapNode;
}
/* export async function GetNodeAsync(id: string) {
return await GetDataAsync("nodes", id) as MapNode;
} */
export function GetParentCount(node: MapNode) {
return (node.parents || {}).VKeys(true).length;
}
export function GetChildCount(node: MapNode) {
return (node.children || {}).VKeys(true).length;
}
export function IsRootNode(node: MapNode) {
if (IsNodeSubnode(node)) return false;
return node.type == MapNodeType.Category && GetParentCount(node) == 0;
}
export function IsNodeSubnode(node: MapNode) {
return node.layerPlusAnchorParents != null;
}
export function GetParentPath(childPath: string) {
return SplitStringBySlash_Cached(childPath).slice(0, -1).join('/');
}
export function GetParentNodeID(path: string) {
const pathNodes = SplitStringBySlash_Cached(path);
if (pathNodes.Last()[0] == '*') return null;
const parentNodeStr = pathNodes.XFromLast(1);
return parentNodeStr ? PathSegmentToNodeID(parentNodeStr) : null;
}
export function GetParentNode(childPath: string) {
return GetNode(GetParentNodeID(childPath));
}
export function GetParentNodeL2(childPath: string) {
return GetNodeL2(GetParentNodeID(childPath));
}
export function GetParentNodeL3(childPath: string) {
return GetNodeL3(GetParentPath(childPath));
}
export function GetNodeID(path: string) {
const ownNodeStr = SplitStringBySlash_Cached(path).LastOrX();
return ownNodeStr ? PathSegmentToNodeID(ownNodeStr) : null;
}
export function GetNodeParents(node: MapNode) {
const parents = (node.parents || {}).VKeys(true).map(id => GetNode(id));
return CachedTransform('GetNodeParents', [node._key], parents, () => parents);
}
export async function GetNodeParentsAsync(node: MapNode) {
return await Promise.all(node.parents.VKeys(true).map(parentID => GetDataAsync('nodes', parentID))) as MapNode[];
}
export function GetNodeParentsL2(node: MapNode) {
const parentsL2 = GetNodeParents(node).map(parent => (parent ? GetNodeL2(parent) : null));
return CachedTransform('GetNodeParentsL2', [], parentsL2, () => parentsL2);
}
export function GetNodeParentsL3(node: MapNode, path: string) {
const parentsL3 = GetNodeParents(node).map(parent => (parent ? GetNodeL3(SlicePath(path, 1)) : null));
return CachedTransform('GetNodeParentsL3', [path], parentsL3, () => parentsL3);
}
/* export function GetNodeChildIDs(nodeID: string) {
let node = GetNode(nodeID);
// any time the childIDs changes, we know the node object changes as well; so just cache childIDs on node
if (node["@childIDs"] == null)
node.VSet("@childIDs", (node.children || {}).VKeys(true).map(id=>parseInt(id)), {prop: {}});
return node["@childIDs"];
} */
export function GetNodeChildren(node: MapNode) {
// special case, for demo map
if (node.children && node.children[0] instanceof MapNode) {
return node.children as any as MapNode[];
}
const children = (node.children || {}).VKeys(true).map(id => GetNode(id));
return CachedTransform('GetNodeChildren', [node._key], children, () => children);
}
export async function GetNodeChildrenAsync(node: MapNode) {
return await Promise.all(node.children.VKeys(true).map(id => GetDataAsync('nodes', id))) as MapNode[];
}
export function GetNodeChildrenL2(node: MapNode) {
const nodeChildren = GetNodeChildren(node);
const nodeChildrenL2 = nodeChildren.map(child => (child ? GetNodeL2(child) : null));
return CachedTransform('GetNodeChildrenL2', [], nodeChildrenL2, () => nodeChildrenL2);
}
export function GetNodeChildrenL3(node: MapNode, path?: string, filterForPath = false): MapNodeL3[] {
if (node == null) return emptyArray;
return CachedTransform_WithStore('GetNodeChildrenL3', [node._key, path, filterForPath], node.children, () => {
path = path || `${node._key}`;
const nodeChildrenL2 = GetNodeChildrenL2(node);
let nodeChildrenL3 = nodeChildrenL2.map(child => (child ? GetNodeL3(`${path}/${child._key}`) : null));
if (filterForPath) {
nodeChildrenL3 = nodeChildrenL3.filter((child) => {
// if null, keep (so receiver knows there's an entry here, but it's still loading)
if (child == null) return true;
// filter out any nodes whose access-level is higher than our own
if (child.current.accessLevel > GetUserAccessLevel(MeID())) return false;
// hide nodes that don't have the required premise-count
// if (!IsNodeVisibleToNonModNonCreators(child, GetNodeChildren(child)) && !IsUserCreatorOrMod(MeID(), child)) return false;
return true;
});
}
return nodeChildrenL3;
});
}
export function GetHolderType(childType: MapNodeType, parentType: MapNodeType) {
if (parentType == MapNodeType.Argument) {
if (childType == MapNodeType.Argument) return HolderType.Relevance;
} else if (parentType == MapNodeType.Claim) {
if (childType == MapNodeType.Argument) return HolderType.Truth;
}
return null;
}
export function ForLink_GetError(parentType: MapNodeType, childType: MapNodeType) {
const parentTypeInfo = MapNodeType_Info.for[parentType].childTypes;
if (!parentTypeInfo.Contains(childType)) return `The child's type (${MapNodeType[childType]}) is not valid for the parent's type (${MapNodeType[parentType]}).`;
}
export function ForNewLink_GetError(parentID: string, newChild: Pick<MapNode, '_key' | 'type'>, permissions: PermissionGroupSet, newHolderType?: HolderType) {
if (!CanGetBasicPermissions(permissions)) return "You're not signed in, or lack basic permissions.";
const parent = GetNode(parentID);
if (parent == null) return 'Parent data not found.';
// const parentPathIDs = SplitStringBySlash_Cached(parentPath).map(a => a.ToInt());
// if (map.name == "Global" && parentPathIDs.length == 1) return false; // if parent is l1(root), don't accept new children
if (parent._key == globalRootNodeID && !HasAdminPermissions(permissions)) return 'Only admins can add children to the global-root.';
// if in global map, parent is l2, and user is not a mod (and not node creator), don't accept new children
// if (parentPathIDs[0] == globalRootNodeID && parentPathIDs.length == 2 && !HasModPermissions(permissions) && parent.creator != MeID()) return false;
if (parent._key == newChild._key) return 'Cannot link node as its own child.';
const isAlreadyChild = (parent.children || {}).VKeys(true).Contains(`${newChild._key}`);
// if new-holder-type is not specified, consider "any" and so don't check
if (newHolderType !== undefined) {
const currentHolderType = GetHolderType(newChild.type, parent.type);
if (isAlreadyChild && currentHolderType == newHolderType) return false; // if already a child of this parent, reject (unless it's a claim, in which case allow, as can be)
}
return ForLink_GetError(parent.type, newChild.type);
}
export function ForUnlink_GetError(userID: string, node: MapNodeL2, asPartOfCut = false) {
const baseText = `Cannot unlink node #${node._key}, since `;
if (!IsUserCreatorOrMod(userID, node)) return `${baseText}you are not its owner. (or a mod)`;
if (!asPartOfCut && (node.parents || {}).VKeys(true).length <= 1) return `${baseText}doing so would orphan it. Try deleting it instead.`;
if (IsRootNode(node)) return `${baseText}it's the root-node of a map.`;
if (IsNodeSubnode(node)) return `${baseText}it's a subnode. Try deleting it instead.`;
return null;
}
export function ForDelete_GetError(userID: string, node: MapNodeL2, subcommandInfo?: {asPartOfMapDelete?: boolean, childrenToIgnore?: string[]}) {
const baseText = `Cannot delete node #${node._key}, since `;
if (!IsUserCreatorOrMod(userID, node)) return `${baseText}you are not the owner of this node. (or a mod)`;
if (GetParentCount(node) > 1) return `${baseText}it has more than one parent. Try unlinking it instead.`;
if (IsRootNode(node) && !AsObj(subcommandInfo).asPartOfMapDelete) return `${baseText}it's the root-node of a map.`;
const nodeChildren = GetNodeChildrenL2(node);
if (nodeChildren.Any(a => a == null)) return '[still loading children...]';
if (nodeChildren.map(a => a._key).Except(AsObj(subcommandInfo).childrenToIgnore || []).length) {
return `Cannot delete this node (#${node._key}) until all its children have been unlinked or deleted.`;
}
return null;
}
export function ForCut_GetError(userID: string, node: MapNodeL2) {
return ForUnlink_GetError(userID, node, true);
}
export function ForCopy_GetError(userID: string, node: MapNode) {
if (!CanGetBasicPermissions(userID)) return "You're not signed in, or lack basic permissions.";
if (IsRootNode(node)) return 'Cannot copy the root-node of a map.';
if (IsNodeSubnode(node)) return 'Cannot copy a subnode.';
return null;
}
/* export function GetUnlinkErrorMessage(parent: MapNode, child: MapNode) {
//let childNodes = node.children.Select(a=>nodes[a]);
let parentNodes = nodes.filter(a=>a.children && a.children[node._id]);
if (parentNodes.length <= 1)
} */
| {
return GetData('nodes');
} | identifier_body |
mod.rs | mod expr;
mod static_init;
mod stmt;
use std::collections::{HashMap, VecDeque};
use std::convert::TryFrom;
use crate::data::{prelude::*, types::FunctionType, Initializer, Scope, StorageClass};
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
settings,
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext, Signature};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
scope: Scope<InternedStr, Id>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
/// Compile a program from a high level IR to a Cranelift Module
pub(crate) fn compile<B: Backend>(
module: Module<B>,
program: Vec<Locatable<Declaration>>,
debug: bool,
) -> (Result<Module<B>, CompileError>, VecDeque<CompileWarning>) {
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug);
for decl in program {
let current = match (decl.data.symbol.ctype.clone(), decl.data.init) {
(Type::Function(func_type), None) => compiler
.declare_func(
decl.data.symbol.id,
&func_type.signature(compiler.module.isa()),
decl.data.symbol.storage_class,
false,
)
.map(|_| ()),
(Type::Void, _) => unreachable!("parser let an incomplete type through"),
(Type::Function(func_type), Some(Initializer::FunctionBody(stmts))) => compiler
.compile_func(
decl.data.symbol.id,
func_type,
decl.data.symbol.storage_class,
stmts,
decl.location,
),
(_, Some(Initializer::FunctionBody(_))) => {
unreachable!("only functions should have a function body")
}
(_, init) => compiler.store_static(decl.data.symbol, init, decl.location),
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
}
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
scope: Scope::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(
&mut self,
id: InternedStr,
signature: &Signature,
sc: StorageClass,
is_definition: bool,
) -> CompileResult<FuncId> {
use crate::get_str;
if !is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.scope.get(&id) {
return Ok(*func_id);
}
}
let linkage = match sc {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.scope.insert(id, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
if let Type::Function(ftype) = decl.symbol.ctype {
self.declare_func(
decl.symbol.id,
&ftype.signature(self.module.isa()),
decl.symbol.storage_class,
false,
)?;
return Ok(());
}
let u64_size = match decl.symbol.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.scope.insert(decl.symbol.id, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn | (
&mut self,
params: Vec<Symbol>,
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (param, ir_val) in params.into_iter().zip(ir_vals) {
let u64_size = match param.ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.scope.insert(param.id, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
id: InternedStr,
func_type: FunctionType,
sc: StorageClass,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let signature = func_type.signature(self.module.isa());
let func_id = self.declare_func(id.clone(), &signature, sc, true)?;
self.scope.enter();
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(func_type.params, func_start, &location, &mut builder)?;
}
self.compile_all(stmts, &mut builder)?;
if !builder.is_filled() {
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
self.scope.exit();
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("{}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
if let Err(err) = self.module.define_function(func_id, &mut ctx) {
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
impl FunctionType {
fn has_params(&self) -> bool {
!(self.params.len() == 1 && self.params[0].ctype == Type::Void)
}
}
| store_stack_params | identifier_name |
mod.rs | mod expr;
mod static_init;
mod stmt;
use std::collections::{HashMap, VecDeque};
use std::convert::TryFrom;
use crate::data::{prelude::*, types::FunctionType, Initializer, Scope, StorageClass};
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
settings,
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext, Signature};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
scope: Scope<InternedStr, Id>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
/// Compile a program from a high level IR to a Cranelift Module
pub(crate) fn compile<B: Backend>(
module: Module<B>,
program: Vec<Locatable<Declaration>>,
debug: bool,
) -> (Result<Module<B>, CompileError>, VecDeque<CompileWarning>) {
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug);
for decl in program {
let current = match (decl.data.symbol.ctype.clone(), decl.data.init) {
(Type::Function(func_type), None) => compiler
.declare_func(
decl.data.symbol.id,
&func_type.signature(compiler.module.isa()),
decl.data.symbol.storage_class,
false,
)
.map(|_| ()),
(Type::Void, _) => unreachable!("parser let an incomplete type through"),
(Type::Function(func_type), Some(Initializer::FunctionBody(stmts))) => compiler
.compile_func(
decl.data.symbol.id,
func_type,
decl.data.symbol.storage_class,
stmts,
decl.location,
),
(_, Some(Initializer::FunctionBody(_))) => {
unreachable!("only functions should have a function body")
}
(_, init) => compiler.store_static(decl.data.symbol, init, decl.location),
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
}
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
scope: Scope::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(
&mut self,
id: InternedStr,
signature: &Signature,
sc: StorageClass,
is_definition: bool,
) -> CompileResult<FuncId> {
use crate::get_str;
if !is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.scope.get(&id) {
return Ok(*func_id);
}
}
let linkage = match sc {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.scope.insert(id, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
if let Type::Function(ftype) = decl.symbol.ctype {
self.declare_func(
decl.symbol.id,
&ftype.signature(self.module.isa()),
decl.symbol.storage_class,
false,
)?;
return Ok(());
}
let u64_size = match decl.symbol.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable { | location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.scope.insert(decl.symbol.id, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: Vec<Symbol>,
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (param, ir_val) in params.into_iter().zip(ir_vals) {
let u64_size = match param.ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.scope.insert(param.id, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
id: InternedStr,
func_type: FunctionType,
sc: StorageClass,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let signature = func_type.signature(self.module.isa());
let func_id = self.declare_func(id.clone(), &signature, sc, true)?;
self.scope.enter();
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(func_type.params, func_start, &location, &mut builder)?;
}
self.compile_all(stmts, &mut builder)?;
if !builder.is_filled() {
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
self.scope.exit();
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("{}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
if let Err(err) = self.module.define_function(func_id, &mut ctx) {
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
impl FunctionType {
fn has_params(&self) -> bool {
!(self.params.len() == 1 && self.params[0].ctype == Type::Void)
}
} | data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(), | random_line_split |
mod.rs | mod expr;
mod static_init;
mod stmt;
use std::collections::{HashMap, VecDeque};
use std::convert::TryFrom;
use crate::data::{prelude::*, types::FunctionType, Initializer, Scope, StorageClass};
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
settings,
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext, Signature};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
scope: Scope<InternedStr, Id>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
/// Compile a program from a high level IR to a Cranelift Module
pub(crate) fn compile<B: Backend>(
module: Module<B>,
program: Vec<Locatable<Declaration>>,
debug: bool,
) -> (Result<Module<B>, CompileError>, VecDeque<CompileWarning>) {
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug);
for decl in program {
let current = match (decl.data.symbol.ctype.clone(), decl.data.init) {
(Type::Function(func_type), None) => compiler
.declare_func(
decl.data.symbol.id,
&func_type.signature(compiler.module.isa()),
decl.data.symbol.storage_class,
false,
)
.map(|_| ()),
(Type::Void, _) => unreachable!("parser let an incomplete type through"),
(Type::Function(func_type), Some(Initializer::FunctionBody(stmts))) => compiler
.compile_func(
decl.data.symbol.id,
func_type,
decl.data.symbol.storage_class,
stmts,
decl.location,
),
(_, Some(Initializer::FunctionBody(_))) => {
unreachable!("only functions should have a function body")
}
(_, init) => compiler.store_static(decl.data.symbol, init, decl.location),
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
}
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
scope: Scope::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(
&mut self,
id: InternedStr,
signature: &Signature,
sc: StorageClass,
is_definition: bool,
) -> CompileResult<FuncId> {
use crate::get_str;
if !is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.scope.get(&id) {
return Ok(*func_id);
}
}
let linkage = match sc {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.scope.insert(id, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
if let Type::Function(ftype) = decl.symbol.ctype {
self.declare_func(
decl.symbol.id,
&ftype.signature(self.module.isa()),
decl.symbol.storage_class,
false,
)?;
return Ok(());
}
let u64_size = match decl.symbol.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.scope.insert(decl.symbol.id, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: Vec<Symbol>,
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (param, ir_val) in params.into_iter().zip(ir_vals) {
let u64_size = match param.ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.scope.insert(param.id, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
id: InternedStr,
func_type: FunctionType,
sc: StorageClass,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let signature = func_type.signature(self.module.isa());
let func_id = self.declare_func(id.clone(), &signature, sc, true)?;
self.scope.enter();
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(func_type.params, func_start, &location, &mut builder)?;
}
self.compile_all(stmts, &mut builder)?;
if !builder.is_filled() {
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else |
}
self.scope.exit();
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("{}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
if let Err(err) = self.module.define_function(func_id, &mut ctx) {
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
impl FunctionType {
fn has_params(&self) -> bool {
!(self.params.len() == 1 && self.params[0].ctype == Type::Void)
}
}
| {
// void function, return nothing
builder.ins().return_(&[]);
} | conditional_block |
mod.rs | mod expr;
mod static_init;
mod stmt;
use std::collections::{HashMap, VecDeque};
use std::convert::TryFrom;
use crate::data::{prelude::*, types::FunctionType, Initializer, Scope, StorageClass};
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
settings,
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext, Signature};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
scope: Scope<InternedStr, Id>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
/// Compile a program from a high level IR to a Cranelift Module
pub(crate) fn compile<B: Backend>(
module: Module<B>,
program: Vec<Locatable<Declaration>>,
debug: bool,
) -> (Result<Module<B>, CompileError>, VecDeque<CompileWarning>) {
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug);
for decl in program {
let current = match (decl.data.symbol.ctype.clone(), decl.data.init) {
(Type::Function(func_type), None) => compiler
.declare_func(
decl.data.symbol.id,
&func_type.signature(compiler.module.isa()),
decl.data.symbol.storage_class,
false,
)
.map(|_| ()),
(Type::Void, _) => unreachable!("parser let an incomplete type through"),
(Type::Function(func_type), Some(Initializer::FunctionBody(stmts))) => compiler
.compile_func(
decl.data.symbol.id,
func_type,
decl.data.symbol.storage_class,
stmts,
decl.location,
),
(_, Some(Initializer::FunctionBody(_))) => {
unreachable!("only functions should have a function body")
}
(_, init) => compiler.store_static(decl.data.symbol, init, decl.location),
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
}
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
scope: Scope::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(
&mut self,
id: InternedStr,
signature: &Signature,
sc: StorageClass,
is_definition: bool,
) -> CompileResult<FuncId> {
use crate::get_str;
if !is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.scope.get(&id) {
return Ok(*func_id);
}
}
let linkage = match sc {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.scope.insert(id, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
if let Type::Function(ftype) = decl.symbol.ctype {
self.declare_func(
decl.symbol.id,
&ftype.signature(self.module.isa()),
decl.symbol.storage_class,
false,
)?;
return Ok(());
}
let u64_size = match decl.symbol.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.scope.insert(decl.symbol.id, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: Vec<Symbol>,
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> |
fn compile_func(
&mut self,
id: InternedStr,
func_type: FunctionType,
sc: StorageClass,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let signature = func_type.signature(self.module.isa());
let func_id = self.declare_func(id.clone(), &signature, sc, true)?;
self.scope.enter();
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(func_type.params, func_start, &location, &mut builder)?;
}
self.compile_all(stmts, &mut builder)?;
if !builder.is_filled() {
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
self.scope.exit();
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("{}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
if let Err(err) = self.module.define_function(func_id, &mut ctx) {
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
impl FunctionType {
fn has_params(&self) -> bool {
!(self.params.len() == 1 && self.params[0].ctype == Type::Void)
}
}
| {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (param, ir_val) in params.into_iter().zip(ir_vals) {
let u64_size = match param.ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.scope.insert(param.id, Id::Local(slot));
}
Ok(())
} | identifier_body |
main.py | """An endpoint to run the speed benchmarks from."""
import numpy as np
import scipy as sp
import scipy.sparse
import matplotlib.pyplot as plt
import os
import argparse
import itertools
from contexttimer import Timer
from csindexer import indexer as csindexer
# Use absolute paths to avoid any issues.
project_dir = os.path.dirname(os.path.realpath(__file__))
# Create argument parser.
parser = argparse.ArgumentParser(description="Endpoint for running tests on"
" the compressed sparse indexer.")
parser.add_argument('dependent',
type=str,
default='rows',
help="The varaible to use on the x-axis when plotting"
" against time.")
parser.add_argument('--sort',
type=int,
nargs='+',
default=[0],
help="Assume the indexer is sorted (1) or not (0).")
parser.add_argument('--n-threads',
type=int,
nargs='+',
default=[1],
help="Total threads to use. Set as -1 to use maximum.")
parser.add_argument('--sparse-format',
type=str,
nargs='+',
default=['CSR'],
help="Whether to use CSR or CSC storage format.")
parser.add_argument('--n',
type=int,
nargs='+',
default=[],
help="Total rows and columns of the sparse matrix, forcing"
" it to be square. This can be useful if we want to"
" change both rows and columns on the x-axis. Leave"
" as [] to ignore.")
parser.add_argument('--rows',
type=int,
nargs='+',
default=[100],
help="Total rows of the sparse matrix.")
parser.add_argument('--cols',
type=int,
nargs='+',
default=[100],
help="Total columns of the sparse matrix.")
parser.add_argument('--nnz',
type=int,
nargs='+',
default=[100],
help="Total non-zero values in sparse matrix.")
parser.add_argument('--n-indexers',
type=int,
nargs='+',
default=[100],
help="Total number of points in the indexer.")
parser.add_argument('--search-type',
type=str,
nargs='+',
default=['binary'],
help="Whether to use binary, interpolation or joint"
" search, or the scipy indexer.")
parser.add_argument('--operation',
type=str,
nargs='+',
default=['get'],
help="Whether to use a get or add operation.")
parser.add_argument('--save',
action='store_true',
help="Whether to save the plot to ./figures.")
parser.add_argument('--figure-name',
type=str,
default='my_figure.png',
help="What to call the plot.")
parser.add_argument('--debug',
action='store_true',
help="Print the configuration when creating model.")
parser.add_argument('--random-seed',
type=int,
default=np.random.randint(0, 2**32 - 1),
help="Value of random seed")
FLAGS, unparsed = parser.parse_known_args()
np.random.seed(FLAGS.random_seed)
config = FLAGS.__dict__.copy()
def index_time(sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,
search_type, operation, debug):
|
if __name__ == "__main__":
# Dependent variable gets plotted on x-axis, all others are separate lines
# on the plot.
# Get the list of separate models to be plotted.
if config['n'] != []:
# Override the rows and cols using n (so the sparse matrix is square).
variables = ['sort', 'n_threads', 'sparse_format', 'n',
'nnz', 'n_indexers', 'search_type', 'operation']
else:
variables = ['sort', 'n_threads', 'sparse_format', 'rows', 'cols',
'nnz', 'n_indexers', 'search_type', 'operation']
dependent = config['dependent']
variables.remove(dependent)
models = list(itertools.product(*[config[i] for i in variables]))
# Convert models into dictionaries.
models = [dict(zip(variables, i)) for i in models]
# Now loop over the dependent variable and get timings for each model.
times = np.empty([len(config[dependent]), len(models)])
for i, x in enumerate(config[dependent]):
for j, model in enumerate(models):
m = model.copy()
m[dependent] = x
if 'n' in m:
m['rows'] = m['n']
m['cols'] = m['n']
times[i, j], _ = index_time(m['sort'], m['n_threads'],
m['sparse_format'], m['rows'],
m['cols'], m['nnz'], m['n_indexers'],
m['search_type'], m['operation'],
config['debug'])
# Finally plot each model.
## Get the maximum time seen.
max_time = times.max()
plt.figure(figsize=(20,20))
plt.ylim(0, max_time)
## Plot each model.
for j, model in enumerate(models):
plt.plot(config[dependent], times[:, j])
plt.xlabel(dependent)
## For the legend only use variables that have changed i.e. more than 1
## input.
used_variables = [i for i in variables if len(config[i]) != 1]
unused_variables = [i for i in variables if len(config[i]) == 1]
models_legend = [dict(zip(used_variables,
[model[i] for i in used_variables]))
for model in models]
plt.legend(models_legend)
models_title = dict(zip(unused_variables,
[config[i] for i in unused_variables]))
title = "Sparse indexing %s vs time (%s)" % (dependent, models_title)
plt.title(title)
if config['save']:
fname = project_dir + '/figures/' + config['figure_name']
plt.savefig(fname)
plt.show()
| """A function for timing our cxindexer and scipy indexer. It first creates
sparse matrices, sorts if necessary, runs indexers on both and returns
the times."""
if debug:
print("Benchmarking:\n\tSORT = %s\n\tN_THREADS = %s\n\tSPARSE_FORMAT ="
" %s\n\tROWS = %s\n\tCOLS = %s\n\tNNZ = %s\n\tN_INDEXERS ="
" %s\n\t" "SEARCH_TYPE = %s\n\tOPERATION = %s"
% (sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,
search_type, operation))
# Generate matrix.
with Timer() as t:
M = sp.sparse.rand(rows, cols, density=nnz/(rows*cols))
if debug:
print("\tTime to generate sparse matrix: %s" % t.elapsed)
# Generate indexer.
with Timer() as t:
indexer = {}
idx = np.random.choice(M.nnz, n_indexers, replace=True)
indexer['row'] = M.row[idx]
indexer['col'] = M.col[idx]
indexer['data'] = np.random.rand(idx.size).astype(np.float64)
if debug:
print("\tTime to generate indexer: %s" % t.elapsed)
# Convert sparse matrix.
with Timer() as t:
if sparse_format == 'CSR':
M = sp.sparse.csr_matrix(M)
elif sparse_format == 'CSC':
M = sp.sparse.csc_matrix(M)
else:
raise Exception("sparse_format must be either CSR or CSC.")
if debug:
print("\tTime to convert sparse matrix: %s" % t.elapsed)
# Sort.
with Timer() as t:
if sort:
if sparse_format == 'CSR':
# Sort indices according to row first
sort_idx = np.lexsort((indexer['col'], indexer['row']))
elif sparse_format == 'CSC':
# Sort indices according to col first
sort_idx = np.lexsort((indexer['row'], indexer['col']))
else:
sort_idx = np.arange(indexer['row'].size)
unsort_idx = np.argsort(sort_idx)
if debug:
print("\tTime to sort indexer: %s" % t.elapsed)
sort_time = t.elapsed
# Time the csindexer.
with Timer() as t:
if search_type == 'scipy':
## Run the Scipy function.
with Timer() as t:
if operation == 'get':
data_py = np.squeeze(np.array(M[indexer['row'][sort_idx],
indexer['col'][sort_idx]]))
data_py = data_py[unsort_idx]
elif operation == 'add':
M_sp = M.copy()
idx_coo = sp.sparse.coo_matrix(
(indexer['data'][sort_idx],
(indexer['row'][sort_idx], indexer['col'][sort_idx])),
shape=(rows, cols))
M_sp += idx_coo
else:
raise Exception("Operation must be either get or add.")
else:
## Run the Cython function.
if operation == 'get':
### Don't need to copy M as it doesn't get modified but do have
### to copy indexer['data'] as it does.
data_cs = indexer['data'].copy()
M_cs = M
csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),
np.array(indexer['col'][sort_idx]), data_cs,
operation, search_type, n_threads, debug)
### Unsort to get final result.
data_cs = data_cs[unsort_idx]
elif operation == 'add':
### Copy M, don't copy indexer['data'].
data_cs = indexer['data']
M_cs = M.copy()
csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),
np.array(indexer['col'][sort_idx]),
np.array(data_cs[sort_idx]), operation,
search_type,
n_threads, debug)
else:
raise Exception("Operation must be either get or add.")
if debug:
print("\tTime for indexing: %s" % t.elapsed)
computation_time = t.elapsed
return computation_time, sort_time | identifier_body |
main.py | """An endpoint to run the speed benchmarks from."""
import numpy as np
import scipy as sp
import scipy.sparse
import matplotlib.pyplot as plt
import os
import argparse
import itertools
from contexttimer import Timer
from csindexer import indexer as csindexer
# Use absolute paths to avoid any issues.
project_dir = os.path.dirname(os.path.realpath(__file__))
# Create argument parser.
parser = argparse.ArgumentParser(description="Endpoint for running tests on"
" the compressed sparse indexer.")
parser.add_argument('dependent',
type=str,
default='rows',
help="The varaible to use on the x-axis when plotting"
" against time.")
parser.add_argument('--sort',
type=int,
nargs='+',
default=[0],
help="Assume the indexer is sorted (1) or not (0).")
parser.add_argument('--n-threads',
type=int,
nargs='+',
default=[1],
help="Total threads to use. Set as -1 to use maximum.")
parser.add_argument('--sparse-format',
type=str,
nargs='+',
default=['CSR'],
help="Whether to use CSR or CSC storage format.")
parser.add_argument('--n',
type=int,
nargs='+',
default=[],
help="Total rows and columns of the sparse matrix, forcing"
" it to be square. This can be useful if we want to"
" change both rows and columns on the x-axis. Leave"
" as [] to ignore.")
parser.add_argument('--rows',
type=int,
nargs='+',
default=[100],
help="Total rows of the sparse matrix.")
parser.add_argument('--cols',
type=int,
nargs='+',
default=[100],
help="Total columns of the sparse matrix.")
parser.add_argument('--nnz',
type=int,
nargs='+',
default=[100],
help="Total non-zero values in sparse matrix.")
parser.add_argument('--n-indexers',
type=int,
nargs='+',
default=[100],
help="Total number of points in the indexer.")
parser.add_argument('--search-type',
type=str,
nargs='+',
default=['binary'],
help="Whether to use binary, interpolation or joint"
" search, or the scipy indexer.")
parser.add_argument('--operation',
type=str,
nargs='+',
default=['get'],
help="Whether to use a get or add operation.")
parser.add_argument('--save',
action='store_true',
help="Whether to save the plot to ./figures.")
parser.add_argument('--figure-name',
type=str,
default='my_figure.png',
help="What to call the plot.")
parser.add_argument('--debug',
action='store_true',
help="Print the configuration when creating model.")
parser.add_argument('--random-seed',
type=int,
default=np.random.randint(0, 2**32 - 1),
help="Value of random seed")
FLAGS, unparsed = parser.parse_known_args()
np.random.seed(FLAGS.random_seed)
config = FLAGS.__dict__.copy()
def | (sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,
search_type, operation, debug):
"""A function for timing our cxindexer and scipy indexer. It first creates
sparse matrices, sorts if necessary, runs indexers on both and returns
the times."""
if debug:
print("Benchmarking:\n\tSORT = %s\n\tN_THREADS = %s\n\tSPARSE_FORMAT ="
" %s\n\tROWS = %s\n\tCOLS = %s\n\tNNZ = %s\n\tN_INDEXERS ="
" %s\n\t" "SEARCH_TYPE = %s\n\tOPERATION = %s"
% (sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,
search_type, operation))
# Generate matrix.
with Timer() as t:
M = sp.sparse.rand(rows, cols, density=nnz/(rows*cols))
if debug:
print("\tTime to generate sparse matrix: %s" % t.elapsed)
# Generate indexer.
with Timer() as t:
indexer = {}
idx = np.random.choice(M.nnz, n_indexers, replace=True)
indexer['row'] = M.row[idx]
indexer['col'] = M.col[idx]
indexer['data'] = np.random.rand(idx.size).astype(np.float64)
if debug:
print("\tTime to generate indexer: %s" % t.elapsed)
# Convert sparse matrix.
with Timer() as t:
if sparse_format == 'CSR':
M = sp.sparse.csr_matrix(M)
elif sparse_format == 'CSC':
M = sp.sparse.csc_matrix(M)
else:
raise Exception("sparse_format must be either CSR or CSC.")
if debug:
print("\tTime to convert sparse matrix: %s" % t.elapsed)
# Sort.
with Timer() as t:
if sort:
if sparse_format == 'CSR':
# Sort indices according to row first
sort_idx = np.lexsort((indexer['col'], indexer['row']))
elif sparse_format == 'CSC':
# Sort indices according to col first
sort_idx = np.lexsort((indexer['row'], indexer['col']))
else:
sort_idx = np.arange(indexer['row'].size)
unsort_idx = np.argsort(sort_idx)
if debug:
print("\tTime to sort indexer: %s" % t.elapsed)
sort_time = t.elapsed
# Time the csindexer.
with Timer() as t:
if search_type == 'scipy':
## Run the Scipy function.
with Timer() as t:
if operation == 'get':
data_py = np.squeeze(np.array(M[indexer['row'][sort_idx],
indexer['col'][sort_idx]]))
data_py = data_py[unsort_idx]
elif operation == 'add':
M_sp = M.copy()
idx_coo = sp.sparse.coo_matrix(
(indexer['data'][sort_idx],
(indexer['row'][sort_idx], indexer['col'][sort_idx])),
shape=(rows, cols))
M_sp += idx_coo
else:
raise Exception("Operation must be either get or add.")
else:
## Run the Cython function.
if operation == 'get':
### Don't need to copy M as it doesn't get modified but do have
### to copy indexer['data'] as it does.
data_cs = indexer['data'].copy()
M_cs = M
csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),
np.array(indexer['col'][sort_idx]), data_cs,
operation, search_type, n_threads, debug)
### Unsort to get final result.
data_cs = data_cs[unsort_idx]
elif operation == 'add':
### Copy M, don't copy indexer['data'].
data_cs = indexer['data']
M_cs = M.copy()
csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),
np.array(indexer['col'][sort_idx]),
np.array(data_cs[sort_idx]), operation,
search_type,
n_threads, debug)
else:
raise Exception("Operation must be either get or add.")
if debug:
print("\tTime for indexing: %s" % t.elapsed)
computation_time = t.elapsed
return computation_time, sort_time
if __name__ == "__main__":
# Dependent variable gets plotted on x-axis, all others are separate lines
# on the plot.
# Get the list of separate models to be plotted.
if config['n'] != []:
# Override the rows and cols using n (so the sparse matrix is square).
variables = ['sort', 'n_threads', 'sparse_format', 'n',
'nnz', 'n_indexers', 'search_type', 'operation']
else:
variables = ['sort', 'n_threads', 'sparse_format', 'rows', 'cols',
'nnz', 'n_indexers', 'search_type', 'operation']
dependent = config['dependent']
variables.remove(dependent)
models = list(itertools.product(*[config[i] for i in variables]))
# Convert models into dictionaries.
models = [dict(zip(variables, i)) for i in models]
# Now loop over the dependent variable and get timings for each model.
times = np.empty([len(config[dependent]), len(models)])
for i, x in enumerate(config[dependent]):
for j, model in enumerate(models):
m = model.copy()
m[dependent] = x
if 'n' in m:
m['rows'] = m['n']
m['cols'] = m['n']
times[i, j], _ = index_time(m['sort'], m['n_threads'],
m['sparse_format'], m['rows'],
m['cols'], m['nnz'], m['n_indexers'],
m['search_type'], m['operation'],
config['debug'])
# Finally plot each model.
## Get the maximum time seen.
max_time = times.max()
plt.figure(figsize=(20,20))
plt.ylim(0, max_time)
## Plot each model.
for j, model in enumerate(models):
plt.plot(config[dependent], times[:, j])
plt.xlabel(dependent)
## For the legend only use variables that have changed i.e. more than 1
## input.
used_variables = [i for i in variables if len(config[i]) != 1]
unused_variables = [i for i in variables if len(config[i]) == 1]
models_legend = [dict(zip(used_variables,
[model[i] for i in used_variables]))
for model in models]
plt.legend(models_legend)
models_title = dict(zip(unused_variables,
[config[i] for i in unused_variables]))
title = "Sparse indexing %s vs time (%s)" % (dependent, models_title)
plt.title(title)
if config['save']:
fname = project_dir + '/figures/' + config['figure_name']
plt.savefig(fname)
plt.show()
| index_time | identifier_name |
main.py | """An endpoint to run the speed benchmarks from."""
import numpy as np
import scipy as sp
import scipy.sparse
import matplotlib.pyplot as plt
import os
import argparse
import itertools
from contexttimer import Timer
from csindexer import indexer as csindexer
# Use absolute paths to avoid any issues.
project_dir = os.path.dirname(os.path.realpath(__file__))
# Create argument parser.
parser = argparse.ArgumentParser(description="Endpoint for running tests on"
" the compressed sparse indexer.")
parser.add_argument('dependent',
type=str,
default='rows',
help="The varaible to use on the x-axis when plotting"
" against time.")
parser.add_argument('--sort',
type=int,
nargs='+',
default=[0],
help="Assume the indexer is sorted (1) or not (0).")
parser.add_argument('--n-threads',
type=int,
nargs='+',
default=[1],
help="Total threads to use. Set as -1 to use maximum.")
parser.add_argument('--sparse-format',
type=str,
nargs='+',
default=['CSR'],
help="Whether to use CSR or CSC storage format.")
parser.add_argument('--n',
type=int,
nargs='+',
default=[],
help="Total rows and columns of the sparse matrix, forcing"
" it to be square. This can be useful if we want to"
" change both rows and columns on the x-axis. Leave"
" as [] to ignore.")
parser.add_argument('--rows',
type=int,
nargs='+',
default=[100],
help="Total rows of the sparse matrix.")
parser.add_argument('--cols',
type=int,
nargs='+',
default=[100],
help="Total columns of the sparse matrix.")
parser.add_argument('--nnz',
type=int,
nargs='+',
default=[100],
help="Total non-zero values in sparse matrix.")
parser.add_argument('--n-indexers',
type=int,
nargs='+',
default=[100],
help="Total number of points in the indexer.")
parser.add_argument('--search-type',
type=str,
nargs='+',
default=['binary'],
help="Whether to use binary, interpolation or joint"
" search, or the scipy indexer.")
parser.add_argument('--operation',
type=str,
nargs='+',
default=['get'],
help="Whether to use a get or add operation.")
parser.add_argument('--save',
action='store_true',
help="Whether to save the plot to ./figures.")
parser.add_argument('--figure-name',
type=str,
default='my_figure.png',
help="What to call the plot.")
parser.add_argument('--debug',
action='store_true',
help="Print the configuration when creating model.")
parser.add_argument('--random-seed',
type=int,
default=np.random.randint(0, 2**32 - 1),
help="Value of random seed")
FLAGS, unparsed = parser.parse_known_args()
np.random.seed(FLAGS.random_seed)
config = FLAGS.__dict__.copy()
def index_time(sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,
search_type, operation, debug):
"""A function for timing our cxindexer and scipy indexer. It first creates
sparse matrices, sorts if necessary, runs indexers on both and returns
the times."""
if debug:
print("Benchmarking:\n\tSORT = %s\n\tN_THREADS = %s\n\tSPARSE_FORMAT ="
" %s\n\tROWS = %s\n\tCOLS = %s\n\tNNZ = %s\n\tN_INDEXERS ="
" %s\n\t" "SEARCH_TYPE = %s\n\tOPERATION = %s"
% (sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,
search_type, operation))
# Generate matrix.
with Timer() as t:
M = sp.sparse.rand(rows, cols, density=nnz/(rows*cols))
if debug:
print("\tTime to generate sparse matrix: %s" % t.elapsed)
# Generate indexer.
with Timer() as t:
indexer = {}
idx = np.random.choice(M.nnz, n_indexers, replace=True)
indexer['row'] = M.row[idx]
indexer['col'] = M.col[idx]
indexer['data'] = np.random.rand(idx.size).astype(np.float64)
if debug:
print("\tTime to generate indexer: %s" % t.elapsed)
# Convert sparse matrix.
with Timer() as t:
if sparse_format == 'CSR':
|
elif sparse_format == 'CSC':
M = sp.sparse.csc_matrix(M)
else:
raise Exception("sparse_format must be either CSR or CSC.")
if debug:
print("\tTime to convert sparse matrix: %s" % t.elapsed)
# Sort.
with Timer() as t:
if sort:
if sparse_format == 'CSR':
# Sort indices according to row first
sort_idx = np.lexsort((indexer['col'], indexer['row']))
elif sparse_format == 'CSC':
# Sort indices according to col first
sort_idx = np.lexsort((indexer['row'], indexer['col']))
else:
sort_idx = np.arange(indexer['row'].size)
unsort_idx = np.argsort(sort_idx)
if debug:
print("\tTime to sort indexer: %s" % t.elapsed)
sort_time = t.elapsed
# Time the csindexer.
with Timer() as t:
if search_type == 'scipy':
## Run the Scipy function.
with Timer() as t:
if operation == 'get':
data_py = np.squeeze(np.array(M[indexer['row'][sort_idx],
indexer['col'][sort_idx]]))
data_py = data_py[unsort_idx]
elif operation == 'add':
M_sp = M.copy()
idx_coo = sp.sparse.coo_matrix(
(indexer['data'][sort_idx],
(indexer['row'][sort_idx], indexer['col'][sort_idx])),
shape=(rows, cols))
M_sp += idx_coo
else:
raise Exception("Operation must be either get or add.")
else:
## Run the Cython function.
if operation == 'get':
### Don't need to copy M as it doesn't get modified but do have
### to copy indexer['data'] as it does.
data_cs = indexer['data'].copy()
M_cs = M
csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),
np.array(indexer['col'][sort_idx]), data_cs,
operation, search_type, n_threads, debug)
### Unsort to get final result.
data_cs = data_cs[unsort_idx]
elif operation == 'add':
### Copy M, don't copy indexer['data'].
data_cs = indexer['data']
M_cs = M.copy()
csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),
np.array(indexer['col'][sort_idx]),
np.array(data_cs[sort_idx]), operation,
search_type,
n_threads, debug)
else:
raise Exception("Operation must be either get or add.")
if debug:
print("\tTime for indexing: %s" % t.elapsed)
computation_time = t.elapsed
return computation_time, sort_time
if __name__ == "__main__":
# Dependent variable gets plotted on x-axis, all others are separate lines
# on the plot.
# Get the list of separate models to be plotted.
if config['n'] != []:
# Override the rows and cols using n (so the sparse matrix is square).
variables = ['sort', 'n_threads', 'sparse_format', 'n',
'nnz', 'n_indexers', 'search_type', 'operation']
else:
variables = ['sort', 'n_threads', 'sparse_format', 'rows', 'cols',
'nnz', 'n_indexers', 'search_type', 'operation']
dependent = config['dependent']
variables.remove(dependent)
models = list(itertools.product(*[config[i] for i in variables]))
# Convert models into dictionaries.
models = [dict(zip(variables, i)) for i in models]
# Now loop over the dependent variable and get timings for each model.
times = np.empty([len(config[dependent]), len(models)])
for i, x in enumerate(config[dependent]):
for j, model in enumerate(models):
m = model.copy()
m[dependent] = x
if 'n' in m:
m['rows'] = m['n']
m['cols'] = m['n']
times[i, j], _ = index_time(m['sort'], m['n_threads'],
m['sparse_format'], m['rows'],
m['cols'], m['nnz'], m['n_indexers'],
m['search_type'], m['operation'],
config['debug'])
# Finally plot each model.
## Get the maximum time seen.
max_time = times.max()
plt.figure(figsize=(20,20))
plt.ylim(0, max_time)
## Plot each model.
for j, model in enumerate(models):
plt.plot(config[dependent], times[:, j])
plt.xlabel(dependent)
## For the legend only use variables that have changed i.e. more than 1
## input.
used_variables = [i for i in variables if len(config[i]) != 1]
unused_variables = [i for i in variables if len(config[i]) == 1]
models_legend = [dict(zip(used_variables,
[model[i] for i in used_variables]))
for model in models]
plt.legend(models_legend)
models_title = dict(zip(unused_variables,
[config[i] for i in unused_variables]))
title = "Sparse indexing %s vs time (%s)" % (dependent, models_title)
plt.title(title)
if config['save']:
fname = project_dir + '/figures/' + config['figure_name']
plt.savefig(fname)
plt.show()
| M = sp.sparse.csr_matrix(M) | conditional_block |
main.py | """An endpoint to run the speed benchmarks from."""
import numpy as np
import scipy as sp
import scipy.sparse
import matplotlib.pyplot as plt
import os
import argparse
import itertools
from contexttimer import Timer
from csindexer import indexer as csindexer
# Use absolute paths to avoid any issues.
project_dir = os.path.dirname(os.path.realpath(__file__))
# Create argument parser.
parser = argparse.ArgumentParser(description="Endpoint for running tests on"
" the compressed sparse indexer.")
parser.add_argument('dependent',
type=str,
default='rows',
help="The varaible to use on the x-axis when plotting"
" against time.")
parser.add_argument('--sort',
type=int,
nargs='+',
default=[0],
help="Assume the indexer is sorted (1) or not (0).")
parser.add_argument('--n-threads',
type=int,
nargs='+',
default=[1],
help="Total threads to use. Set as -1 to use maximum.")
parser.add_argument('--sparse-format',
type=str,
nargs='+',
default=['CSR'],
help="Whether to use CSR or CSC storage format.")
parser.add_argument('--n',
type=int,
nargs='+',
default=[],
help="Total rows and columns of the sparse matrix, forcing"
" it to be square. This can be useful if we want to"
" change both rows and columns on the x-axis. Leave"
" as [] to ignore.")
parser.add_argument('--rows',
type=int,
nargs='+',
default=[100],
help="Total rows of the sparse matrix.")
parser.add_argument('--cols',
type=int,
nargs='+',
default=[100],
help="Total columns of the sparse matrix.")
parser.add_argument('--nnz',
type=int,
nargs='+',
default=[100],
help="Total non-zero values in sparse matrix.")
parser.add_argument('--n-indexers',
type=int,
nargs='+',
default=[100],
help="Total number of points in the indexer.")
parser.add_argument('--search-type',
type=str,
nargs='+',
default=['binary'],
help="Whether to use binary, interpolation or joint"
" search, or the scipy indexer.")
parser.add_argument('--operation',
type=str,
nargs='+',
default=['get'],
help="Whether to use a get or add operation.")
parser.add_argument('--save',
action='store_true',
help="Whether to save the plot to ./figures.")
parser.add_argument('--figure-name',
type=str,
default='my_figure.png',
help="What to call the plot.")
parser.add_argument('--debug',
action='store_true',
help="Print the configuration when creating model.")
parser.add_argument('--random-seed',
type=int,
default=np.random.randint(0, 2**32 - 1),
help="Value of random seed")
FLAGS, unparsed = parser.parse_known_args()
np.random.seed(FLAGS.random_seed)
config = FLAGS.__dict__.copy()
def index_time(sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,
search_type, operation, debug):
"""A function for timing our cxindexer and scipy indexer. It first creates
sparse matrices, sorts if necessary, runs indexers on both and returns
the times."""
if debug:
print("Benchmarking:\n\tSORT = %s\n\tN_THREADS = %s\n\tSPARSE_FORMAT ="
" %s\n\tROWS = %s\n\tCOLS = %s\n\tNNZ = %s\n\tN_INDEXERS ="
" %s\n\t" "SEARCH_TYPE = %s\n\tOPERATION = %s"
% (sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,
search_type, operation))
# Generate matrix.
with Timer() as t:
M = sp.sparse.rand(rows, cols, density=nnz/(rows*cols))
if debug:
print("\tTime to generate sparse matrix: %s" % t.elapsed)
# Generate indexer.
with Timer() as t:
indexer = {}
idx = np.random.choice(M.nnz, n_indexers, replace=True)
indexer['row'] = M.row[idx]
indexer['col'] = M.col[idx]
indexer['data'] = np.random.rand(idx.size).astype(np.float64)
if debug:
print("\tTime to generate indexer: %s" % t.elapsed)
# Convert sparse matrix.
with Timer() as t:
if sparse_format == 'CSR': | elif sparse_format == 'CSC':
M = sp.sparse.csc_matrix(M)
else:
raise Exception("sparse_format must be either CSR or CSC.")
if debug:
print("\tTime to convert sparse matrix: %s" % t.elapsed)
# Sort.
with Timer() as t:
if sort:
if sparse_format == 'CSR':
# Sort indices according to row first
sort_idx = np.lexsort((indexer['col'], indexer['row']))
elif sparse_format == 'CSC':
# Sort indices according to col first
sort_idx = np.lexsort((indexer['row'], indexer['col']))
else:
sort_idx = np.arange(indexer['row'].size)
unsort_idx = np.argsort(sort_idx)
if debug:
print("\tTime to sort indexer: %s" % t.elapsed)
sort_time = t.elapsed
# Time the csindexer.
with Timer() as t:
if search_type == 'scipy':
## Run the Scipy function.
with Timer() as t:
if operation == 'get':
data_py = np.squeeze(np.array(M[indexer['row'][sort_idx],
indexer['col'][sort_idx]]))
data_py = data_py[unsort_idx]
elif operation == 'add':
M_sp = M.copy()
idx_coo = sp.sparse.coo_matrix(
(indexer['data'][sort_idx],
(indexer['row'][sort_idx], indexer['col'][sort_idx])),
shape=(rows, cols))
M_sp += idx_coo
else:
raise Exception("Operation must be either get or add.")
else:
## Run the Cython function.
if operation == 'get':
### Don't need to copy M as it doesn't get modified but do have
### to copy indexer['data'] as it does.
data_cs = indexer['data'].copy()
M_cs = M
csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),
np.array(indexer['col'][sort_idx]), data_cs,
operation, search_type, n_threads, debug)
### Unsort to get final result.
data_cs = data_cs[unsort_idx]
elif operation == 'add':
### Copy M, don't copy indexer['data'].
data_cs = indexer['data']
M_cs = M.copy()
csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),
np.array(indexer['col'][sort_idx]),
np.array(data_cs[sort_idx]), operation,
search_type,
n_threads, debug)
else:
raise Exception("Operation must be either get or add.")
if debug:
print("\tTime for indexing: %s" % t.elapsed)
computation_time = t.elapsed
return computation_time, sort_time
if __name__ == "__main__":
# Dependent variable gets plotted on x-axis, all others are separate lines
# on the plot.
# Get the list of separate models to be plotted.
if config['n'] != []:
# Override the rows and cols using n (so the sparse matrix is square).
variables = ['sort', 'n_threads', 'sparse_format', 'n',
'nnz', 'n_indexers', 'search_type', 'operation']
else:
variables = ['sort', 'n_threads', 'sparse_format', 'rows', 'cols',
'nnz', 'n_indexers', 'search_type', 'operation']
dependent = config['dependent']
variables.remove(dependent)
models = list(itertools.product(*[config[i] for i in variables]))
# Convert models into dictionaries.
models = [dict(zip(variables, i)) for i in models]
# Now loop over the dependent variable and get timings for each model.
times = np.empty([len(config[dependent]), len(models)])
for i, x in enumerate(config[dependent]):
for j, model in enumerate(models):
m = model.copy()
m[dependent] = x
if 'n' in m:
m['rows'] = m['n']
m['cols'] = m['n']
times[i, j], _ = index_time(m['sort'], m['n_threads'],
m['sparse_format'], m['rows'],
m['cols'], m['nnz'], m['n_indexers'],
m['search_type'], m['operation'],
config['debug'])
# Finally plot each model.
## Get the maximum time seen.
max_time = times.max()
plt.figure(figsize=(20,20))
plt.ylim(0, max_time)
## Plot each model.
for j, model in enumerate(models):
plt.plot(config[dependent], times[:, j])
plt.xlabel(dependent)
## For the legend only use variables that have changed i.e. more than 1
## input.
used_variables = [i for i in variables if len(config[i]) != 1]
unused_variables = [i for i in variables if len(config[i]) == 1]
models_legend = [dict(zip(used_variables,
[model[i] for i in used_variables]))
for model in models]
plt.legend(models_legend)
models_title = dict(zip(unused_variables,
[config[i] for i in unused_variables]))
title = "Sparse indexing %s vs time (%s)" % (dependent, models_title)
plt.title(title)
if config['save']:
fname = project_dir + '/figures/' + config['figure_name']
plt.savefig(fname)
plt.show() | M = sp.sparse.csr_matrix(M) | random_line_split |
feature-select.py | import os
import sys
import pandas as pd
import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.svm import SVC
from sklearn import preprocessing
# from sklearn.externals import joblib
import seaborn
import matplotlib.pyplot as plt
import tensorflow as tf
def stop():
while 1:
pass
def where_is_nan(data): #查数据中缺省的位置 pd的格式
temp = 0
print(sys._getframe().f_back.f_lineno)
for columname in data:
if(np.isnan(data.loc[:,columname]).any()):
temp = 1
print(columname)
print("\tsum = ",np.isnan(data.loc[:,columname]).sum(),"\n\tpos = ",np.where(np.isnan(data.loc[:,columname]) == True)[0])
if temp == 0 :
print("have no nan\n")
def drop_index_rule(data_src,rule):
drop_pos = data_src.loc[rule,:].index.values
data_src.drop(drop_pos,axis=0,inplace=True) #去除年月日
def data_classify(data_src, label_name, data_train_proportion): #把乱的数据根据标签分好类 变成有序数据 再分成训练集和测试集两部分 分为两个list返回 data_src存放从文件读来得原始数据 label_name存放的是用于分类的列的名字 data_train_proportion是训练集占总的比列
labels_class_num = data_src.loc[:,label_name].nunique()
labels_classes = data_src.loc[:,label_name].unique()
order=np.argsort(labels_classes)
labels_classes = labels_classes[order]
temp_train = []
temp_test = []
for lab in labels_classes:
classfy_pos = data_src.loc[data_src[label_name] == lab,:].index.values
data_classed = data_src.loc[classfy_pos,:]
data_classed = data_classed.reset_index() #重设索引
data_classed.drop(['index'],axis=1,inplace=True) #去除多余索引
row_num,col_num = data_classed.shape
train_row_num = row_num * data_train_proportion
train_data = data_classed.loc[0:(train_row_num)]
test_data = data_classed.loc[train_row_num:]
train_data = train_data.reset_index() #重设索引
train_data.drop(['index'],axis=1,inplace=True) #去除多余索引
test_data = test_data.reset_index() #重设索引
test_data.drop(['index'],axis=1,inplace=True) #去除多余索引
temp_train.append(train_data)
temp_test.append(test_data)
return temp_train,temp_test
def data_expand(data_list,row_expand_aim_num): #数据扩增 解决样本不平衡 待扩增的数据以list的新式放进去 row_expand_aim_num是想要扩增为多少行
# data_list[0] = pd.concat( [data_list[0], data_list[0]], axis = 0)
for count in range(len(data_list)):
row_num = data_list[count].shape[0]
if row_num < row_expand_aim_num:
err = row_expand_aim_num - row_num
temp = data_list[count].sample(n = err, replace=True)
data_list[count] = pd.concat( [data_list[count], temp], axis = 0)
data_list[count] = data_list[count].reset_index() #重设索引
data_list[count].drop(['index' | cat( [temp, data_list[count]], axis = 0)
temp = temp.reset_index() #重设索引
temp.drop(['index'],axis=1,inplace=True) #去除多余索引
return temp
def get_next_batch(all_data,batch_size,step):
row_num = all_data.shape[0]
batch_num = row_num/batch_size
batch_count = step%batch_num
begin = int(batch_count * batch_size)
end = int((batch_count + 1) * batch_size)
return all_data[begin:end]
base_dataset = pd.read_csv("./src-data/happiness_train_complete.csv",encoding='ISO-8859-1')
drop_list = [] #去除字符串形式的数据
for col in base_dataset:
if base_dataset.loc[:,col].dtype == 'object':
drop_list.append(col)
base_dataset.drop(drop_list,axis=1,inplace=True)
drop_index_rule(base_dataset, np.isnan(base_dataset.family_income)) #去除缺省行
base_dataset.drop(base_dataset.loc[:,np.isnan(base_dataset).any()].columns,axis = 1,inplace = True) #删除所有有缺省的列
base_dataset = base_dataset.reset_index() #重设索引
base_dataset.drop(['index'],axis=1,inplace=True) #去除多余索引
base_labelset = base_dataset['happiness']
base_dataset.drop(['happiness'],axis=1,inplace=True)
base_dataset.drop(['id'],axis=1,inplace=True)
base_dataset = base_dataset.loc[:,base_dataset.var() > 0.9] #提取方差大于1的特征
base_dataset = pd.concat( [base_labelset, base_dataset], axis = 1) #把标签和数据拼上
drop_index_rule(base_dataset,base_dataset.happiness<1) #幸福感有问题的行删掉
train_data, test_data = data_classify(base_dataset, 'happiness', 0.8)
data_expand(train_data, 2000) #由于样本严重不平衡 这里先进行样本扩增 每一个happiness类都扩到2500个样本 用复制的方法扩增
data_expand(test_data, 1000)
train_data = pack_data_list(train_data) #把数据分成了几类的list给打包到一起
test_data = pack_data_list(test_data)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
labels_train = train_data['happiness']
labels_test = test_data['happiness']
labels_test_src = labels_test
labels_train_src = labels_train
train_data.drop(['happiness'],axis=1,inplace=True) #去除缺省且不必要的列
test_data.drop(['happiness'],axis=1,inplace=True) #去除缺省且不必要的列
train_data.head().to_csv("./temp-data/colname.csv",index = False)
np_src_happiness = labels_train.values #获取原始的幸福感数据 numpy格式 #训练labels
np_happiness_train_labels = np.zeros((np_src_happiness.shape[0],5),dtype = np.int) #创建可以用于训练的np 标签 5分类问题
for i in range(np_src_happiness.shape[0]):
np_happiness_train_labels[i,np_src_happiness[i]-1] = 1
np_src_happiness = labels_test.values #获取原始的幸福感数据 numpy格式 #测试labels
np_happiness_test_labels = np.zeros((np_src_happiness.shape[0],5),dtype = np.int) #创建可以用于训练的np 标签 5分类问题
for i in range(np_src_happiness.shape[0]):
np_happiness_test_labels[i,np_src_happiness[i]-1] = 1
train_data = preprocessing.scale(train_data) #数据标准化 使数据的每列基本满足正态分布
test_data = preprocessing.scale(test_data)
train_labels = np_happiness_train_labels
test_labels = np_happiness_test_labels
# print(labels_test_src,test_data)
print(train_data.shape,test_data.shape)
print(train_labels.shape,test_labels.shape)
# train_data = preprocessing.scale(base_dataset)
# stop()
ax = [] # 定义一个 x 轴的空列表用来接收动态的数据
ay = [] # 定义一个 y 轴的空列表用来接收动态的数据
ay2 = []
ay3 = []
ay4 = []
TRAIN_DATA_SIZZE = train_data.shape[0]
BATCH_SIZE = 5000
LEARNING_RATE_BASE = 0.9
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0009
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH="modules/"
MODEL_NAME="mnist_model"
TRAINING_STEPS = 9000000000
input_num = train_data.shape[1]
layer_node_num = [400, 200, 100]
output_num = 5
def get_weight_variable(name, shape, regularizer):
weights = tf.get_variable(name, shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(weights))
return weights
#定义两层简单的网络
x = tf.placeholder(tf.float32, [None, input_num], name='x-input')
y_ = tf.placeholder(tf.float32, [None, output_num], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
weights1 = get_weight_variable("weights1",[input_num, layer_node_num[0]], regularizer)
biases1 = tf.get_variable("biases1", [layer_node_num[0]], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(x, weights1) + biases1)
weights2 = get_weight_variable("weights2", [layer_node_num[0], layer_node_num[1]],regularizer)
biases2 = tf.get_variable("biases2", [layer_node_num[1]], initializer=tf.constant_initializer(0.0))
layer2 = tf.nn.relu(tf.matmul(layer1, weights2) + biases2)
weights3 = get_weight_variable("weights3", [layer_node_num[1], layer_node_num[2]],regularizer)
biases3 = tf.get_variable("biases3", [layer_node_num[2]], initializer=tf.constant_initializer(0.0))
layer3 = tf.nn.tanh(tf.matmul(layer2, weights3) + biases3)
weights_out = get_weight_variable("weights_out",[layer_node_num[2], output_num], regularizer)
biases_out = tf.get_variable("biases_out", [output_num], initializer=tf.constant_initializer(0.0))
layer_out = tf.matmul(layer3, weights_out) + biases_out
y = layer_out
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
TRAIN_DATA_SIZZE / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
# Optimizer
# GradientDescentOptimizer
# AdagradOptimizer
# AdagradDAOptimizer
# MomentumOptimizer
# AdamOptimizer
# FtrlOptimizer
# RMSPropOptimizer
# train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver()
with tf.Session() as sess:
writer = tf.summary.FileWriter("logs/", sess.graph)
tf.global_variables_initializer().run()
plt.ion() # 开启一个画图的窗口
for step_count in range(TRAINING_STEPS):
xs = get_next_batch(train_data,BATCH_SIZE,step_count)
ys = get_next_batch(train_labels,BATCH_SIZE,step_count)
xt = test_data
yt = test_labels
_, loss_value, step ,np_output_labels= sess.run([train_op, loss, global_step, y], feed_dict={x: xs, y_: ys})
np_test_output_labels,_ = sess.run([y,loss], feed_dict={x: xt, y_: yt})
output_labels_size,_ = np_output_labels.shape #计算训练集的错误率
result = np.zeros((output_labels_size))
result_train = np.zeros((output_labels_size))
for i in range(output_labels_size):
maxarg = np.argmax(np_output_labels[i])
result_train[i] = maxarg
np_output_labels[i] = 0
np_output_labels[i][maxarg] = 1
result[i] = np.logical_not(np.logical_not(np_output_labels[i] == ys[i]).any())
#pass
right_sum = result.sum()
err_sum = np.logical_not(result).sum()
train_labels_test = np.zeros((ys.shape[0]))
for i in range(ys.shape[0]):
train_labels_test[i] = np.argmax(ys[i]) + 1
score_train = ((train_labels_test - result_train)**2).sum()/result_train.shape[0]
ay4.append(score_train/20)
print("loss :",loss_value)
print("训练集:\n","错误率:",err_sum/result.shape[0]," 正确率:",right_sum/result.shape[0],"score :",score_train)
ay2.append(right_sum/result.shape[0]/2) # 添加 i 的平方到 y 轴的数据中
output_labels_size,_ = np_test_output_labels.shape #计算测试集的错误率
result = np.zeros((output_labels_size))
# print(np.argmax(np_test_output_labels))
result_test = np.zeros((output_labels_size))
for i in range(output_labels_size):
maxarg = np.argmax(np_test_output_labels[i])
result_test[i] = maxarg
np_test_output_labels[i] = 0
np_test_output_labels[i][maxarg] = 1
result[i] = np.logical_not(np.logical_not(np_test_output_labels[i] == yt[i]).any())
pass
score = ((labels_test_src.values - result_test)**2).sum()/result_test.shape[0]
ay3.append(score/20)
right_sum = result.sum()
err_sum = np.logical_not(result).sum()
print("测试集:\n","错误率:",err_sum/result.shape[0]," 正确率:",right_sum/result.shape[0])
ax.append(step_count) # 添加 i 到 x 轴的数据中
ay.append(right_sum/result.shape[0]) # 添加 i 的平方到 y 轴的数据中
plt.clf() # 清除之前画的图
plt.plot(ax,ay4,color = 'black') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay3,color = 'green') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay,color = 'red') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay2) # 画出当前 ax 列表和 ay 列表中的值的图形
plt.pause(0.01) # 暂停一秒
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME))
plt.ioff() # 关闭画图的窗口
| ],axis=1,inplace=True) #去除多余索引
def pack_data_list(data_list):
temp = data_list[0]
row_num = len(data_list)
for count in range(1,row_num):
temp = pd.con | conditional_block |
feature-select.py | import os
import sys
import pandas as pd
import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.svm import SVC
from sklearn import preprocessing
# from sklearn.externals import joblib
import seaborn
import matplotlib.pyplot as plt
import tensorflow as tf
def stop():
while 1:
pass
def where_is_nan(data): #查数据中缺省的位置 pd的格式
temp = 0
print(sys._getframe().f_back.f_lineno)
for columname in data:
if(np.isnan(data.loc[:,columname]).any()):
temp = 1
print(columname)
print("\tsum = ",np.isnan(data.loc[:,columname]).sum(),"\n\tpos = ",np.where(np.isnan(data.loc[:,columname]) == True)[0])
if temp == 0 :
print("have no nan\n")
def drop_index_rule(data_src,rule):
drop_pos = data_src.loc[rule,:].index.values
data_src.drop(drop_pos,axis=0,inplace=True) #去除年月日
def data_classify(data_src, label_name, data_train_proportion): #把乱的数据根据标签分好类 变成有序数据 再分成训练集和测试集两部分 分为两个list返回 data_src存放从文件读来得原始数据 label_name存放的是用于分类的列的名字 data_train_proportion是训练集占总的比列
labels_class_num = data_src.loc[:,label_name].nunique()
labels_classes = data_src.loc[:,label_name].unique()
order=np.argsort(labels_classes)
labels_classes = labels_classes[order]
temp_train = []
temp_test = []
for lab in labels_classes:
classfy_pos = data_src.loc[data_src[label_name] == lab,:].index.values
data_classed = data_src.loc[classfy_pos,:]
data_classed = data_classed.reset_index() #重设索引
data_classed.drop(['index'],axis=1,inplace=True) #去除多余索引
row_num,col_num = data_classed.shape
train_row_num = row_num * data_train_proportion
train_data = data_classed.loc[0:(train_row_num)]
test_data = data_classed.loc[train_row_num:]
train_data = train_data.reset_index() #重设索引
train_data.drop(['index'],axis=1,inplace=True) #去除多余索引
test_data = test_data.reset_index() #重设索引
test_data.drop(['index'],axis=1,inplace=True) #去除多余索引
temp_train.append(train_data)
temp_test.append(test_data)
return temp_train,temp_test
def data_expand(data_list,row_expand_aim_num): #数据扩增 解决样本不平衡 待扩增的数据以list的新式放进去 row_expand_aim_num是想要扩增为多少行
# data_list[0] = pd.concat( [data_list[0], data_list[0]], axis = 0)
for count in range(len(data_list)):
row_num = data_list[count].shape[0]
if row_num < row_expand_aim_num:
err = row_expand_aim_num - row_num
temp = data_list[count].sample(n = err, replace=True)
data_list[count] = pd.concat( [data_list[count], temp], axis = 0)
data_list[count] = data_list[count].reset_index() #重设索引
data_list[count].drop(['index'],axis=1,inplace=True) #去除多余索引
def pack_data_list(data_list):
temp = data_list[0]
row_num = len(data_list)
for count in range(1,row_num):
temp = pd.concat( [temp, data_list[count]], axis = 0)
temp = temp.reset_index() #重设索引
temp.drop(['index'],axis=1,inplace=True) #去除多余索引
return temp
def get_next_batch(all_data,batch_size,step):
row_num = all_data.shape[0]
batch_num = row_num/batch_size
batch_count = step%batch_num
begin = int(batch_count * batch_size)
end = int((batch_count + 1) * batch_size)
return all_data[begin:end]
base_dataset = pd.read_csv("./src-data/happiness_train_complete.csv",encoding='ISO-8859-1')
drop_list = [] #去除字符串形式的数据
for col in base_dataset:
if base_dataset.loc[:,col].dtype == 'object':
drop_list.append(col)
base_dataset.drop(drop_list,axis=1,inplace=True)
drop_index_rule(base_dataset, np.isnan(base_dataset.family_income)) #去除缺省行
base_dataset.drop(base_dataset.loc[:,np.isnan(base_dataset).any()].columns,axis = 1,inplace = True) #删除所有有缺省的列
base_dataset = base_dataset.reset_index() #重设索引
base_dataset.drop(['index'],axis=1,inplace=True) #去除多余索引
base_labelset = base_dataset['happiness']
base_dataset.drop(['happiness'],axis=1,inplace=True)
base_dataset.drop(['id'],axis=1,inplace=True)
base_dataset = base_dataset.loc[:,base_dataset.var() > 0.9] #提取方差大于1的特征
base_dataset = pd.concat( [base_labelset, base_dataset], axis = 1) #把标签和数据拼上
drop_index_rule(base_dataset,base_dataset.happiness<1) #幸福感有问题的行删掉
train_data, test_data = data_classify(base_dataset, 'happiness', 0.8)
data_expand(train_data, 2000) #由于样本严重不平衡 这里先进行样本扩增 每一个happiness类都扩到2500个样本 用复制的方法扩增
data_expand(test_data, 1000)
train_data = pack_data_list(train_data) #把数据分成了几类的list给打包到一起
test_data = pack_data_list(test_data)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
labels_train = train_data['happiness']
labels_test = test_data['happiness']
labels_test_src = labels_test
labels_train_src = labels_train
train_data.drop(['happiness'],axis=1,inplace=True) #去除缺省且不必要的列
test_data.drop(['happiness'],axis=1,inplace=True) #去除缺省且不必要的列
train_data.head().to_csv("./temp-data/colname.csv",index = False)
np_src_happiness = labels_train.values #获取原始的幸福感数据 numpy格式 #训练labels
np_happiness_train_labels = np.zeros((np_src_happiness.shape[0],5),dtype = np.int) #创建可以用于训练的np 标签 5分类问题
for i in range(np_src_happiness.shape[0]):
np_happiness_train_labels[i,np_src_happiness[i]-1] = 1
np_src_happiness = labels_test.values #获取原始的幸福感数据 numpy格式 #测试labels
np_happiness_test_labels = np.zeros((np_src_happiness.shape[0],5),dtype = np.int) #创建可以用于训练的np 标签 5分类问题
for i in range(np_src_happiness.shape[0]):
np_happiness_test_labels[i,np_src_happiness[i]-1] = 1
train_data = preprocessing.scale(train_data) #数据标准化 使数据的每列基本满足正态分布
test_data = preprocessing.scale(test_data)
train_labels = np_happiness_train_labels
test_labels = np_happiness_test_labels
# print(labels_test_src,test_data)
print(train_data.shape,test_data.shape)
print(train_labels.shape,test_labels.shape)
# train_data = preprocessing.scale(base_dataset)
# stop()
ax = [] # 定义一个 x 轴的空列表用来接收动态的数据
ay = [] # 定义一个 y 轴的空列表用来接收动态的数据
ay2 = []
ay3 = []
ay4 = []
TRAIN_DATA_SIZZE = train_data.shape[0]
BATCH_SIZE = 5000
LEARNING_RATE_BASE = 0.9
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0009
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH="modules/"
MODEL_NAME="mnist_model"
TRAINING_STEPS = 9000000000
input_num = train_data.shape[1]
layer_node_num = [400, 200, 100]
output_num = 5
def get_weight_variable(name, shape, regularizer):
weights = tf.get_variable(name, shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(weights))
return weights
#定义两层简单的网络
x = tf.placeholder(tf.float32, [None, input_num], name='x-input')
y_ = tf.placeholder(tf.float32, [None, output_num], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
weights1 = get_weight_variable("weights1",[input_num, layer_node_num[0]], regularizer)
biases1 = tf.get_variable("biases1", [layer_node_num[0]], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(x, weights1) + biases1)
weights2 = get_weight_variable("weights2", [layer_node_num[0], layer_node_num[1]],regularizer)
biases2 = tf.get_variable("biases2", [layer_node_num[1]], initializer=tf.constant_initializer(0.0))
layer2 = tf | um[2]], initializer=tf.constant_initializer(0.0))
layer3 = tf.nn.tanh(tf.matmul(layer2, weights3) + biases3)
weights_out = get_weight_variable("weights_out",[layer_node_num[2], output_num], regularizer)
biases_out = tf.get_variable("biases_out", [output_num], initializer=tf.constant_initializer(0.0))
layer_out = tf.matmul(layer3, weights_out) + biases_out
y = layer_out
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
TRAIN_DATA_SIZZE / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
# Optimizer
# GradientDescentOptimizer
# AdagradOptimizer
# AdagradDAOptimizer
# MomentumOptimizer
# AdamOptimizer
# FtrlOptimizer
# RMSPropOptimizer
# train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver()
with tf.Session() as sess:
writer = tf.summary.FileWriter("logs/", sess.graph)
tf.global_variables_initializer().run()
plt.ion() # 开启一个画图的窗口
for step_count in range(TRAINING_STEPS):
xs = get_next_batch(train_data,BATCH_SIZE,step_count)
ys = get_next_batch(train_labels,BATCH_SIZE,step_count)
xt = test_data
yt = test_labels
_, loss_value, step ,np_output_labels= sess.run([train_op, loss, global_step, y], feed_dict={x: xs, y_: ys})
np_test_output_labels,_ = sess.run([y,loss], feed_dict={x: xt, y_: yt})
output_labels_size,_ = np_output_labels.shape #计算训练集的错误率
result = np.zeros((output_labels_size))
result_train = np.zeros((output_labels_size))
for i in range(output_labels_size):
maxarg = np.argmax(np_output_labels[i])
result_train[i] = maxarg
np_output_labels[i] = 0
np_output_labels[i][maxarg] = 1
result[i] = np.logical_not(np.logical_not(np_output_labels[i] == ys[i]).any())
#pass
right_sum = result.sum()
err_sum = np.logical_not(result).sum()
train_labels_test = np.zeros((ys.shape[0]))
for i in range(ys.shape[0]):
train_labels_test[i] = np.argmax(ys[i]) + 1
score_train = ((train_labels_test - result_train)**2).sum()/result_train.shape[0]
ay4.append(score_train/20)
print("loss :",loss_value)
print("训练集:\n","错误率:",err_sum/result.shape[0]," 正确率:",right_sum/result.shape[0],"score :",score_train)
ay2.append(right_sum/result.shape[0]/2) # 添加 i 的平方到 y 轴的数据中
output_labels_size,_ = np_test_output_labels.shape #计算测试集的错误率
result = np.zeros((output_labels_size))
# print(np.argmax(np_test_output_labels))
result_test = np.zeros((output_labels_size))
for i in range(output_labels_size):
maxarg = np.argmax(np_test_output_labels[i])
result_test[i] = maxarg
np_test_output_labels[i] = 0
np_test_output_labels[i][maxarg] = 1
result[i] = np.logical_not(np.logical_not(np_test_output_labels[i] == yt[i]).any())
pass
score = ((labels_test_src.values - result_test)**2).sum()/result_test.shape[0]
ay3.append(score/20)
right_sum = result.sum()
err_sum = np.logical_not(result).sum()
print("测试集:\n","错误率:",err_sum/result.shape[0]," 正确率:",right_sum/result.shape[0])
ax.append(step_count) # 添加 i 到 x 轴的数据中
ay.append(right_sum/result.shape[0]) # 添加 i 的平方到 y 轴的数据中
plt.clf() # 清除之前画的图
plt.plot(ax,ay4,color = 'black') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay3,color = 'green') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay,color = 'red') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay2) # 画出当前 ax 列表和 ay 列表中的值的图形
plt.pause(0.01) # 暂停一秒
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME))
plt.ioff() # 关闭画图的窗口
| .nn.relu(tf.matmul(layer1, weights2) + biases2)
weights3 = get_weight_variable("weights3", [layer_node_num[1], layer_node_num[2]],regularizer)
biases3 = tf.get_variable("biases3", [layer_node_n | identifier_body |
feature-select.py | import os
import sys
import pandas as pd
import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.svm import SVC
from sklearn import preprocessing
# from sklearn.externals import joblib
import seaborn
import matplotlib.pyplot as plt
import tensorflow as tf
def stop():
while 1:
pass
def where_is_nan(data): #查数据中缺省的位置 pd的格式
temp = 0
print(sys._getframe().f_back.f_lineno)
for columname in data:
if(np.isnan(data.loc[:,columname]).any()):
temp = 1
print(columname)
print("\tsum = ",np.isnan(data.loc[:,columname]).sum(),"\n\tpos = ",np.where(np.isnan(data.loc[:,columname]) == True)[0])
if temp == 0 :
print("have no nan\n")
def drop_index_rule(data_src,rule):
drop_pos = data_src.loc[rule,:].index.values
data_src.drop(drop_pos,axis=0,inplace=True) #去除年月日
def data_classify(data_src, label_name, data_train_proportion): #把乱的数据根据标签分好类 变成有序数据 再分成训练集和测试集两部分 分为两个list返回 data_src存放从文件读来得原始数据 label_name存放的是用于分类的列的名字 data_train_proportion是训练集占总的比列
labels_class_num = data_src.loc[:,label_name].nunique()
labels_classes = data_src.loc[:,label_name].unique()
order=np.argsort(labels_classes)
labels_classes = labels_classes[order]
temp_train = []
temp_test = []
for lab in labels_classes:
classfy_pos = data_src.loc[data_src[label_name] == lab,:].index.values
data_classed = data_src.loc[classfy_pos,:]
data_classed = data_classed.reset_index() #重设索引
data_classed.drop(['index'],axis=1,inplace=True) #去除多余索引
row_num,col_num = data_classed.shape
train_row_num = row_num * data_train_proportion
train_data = data_classed.loc[0:(train_row_num)]
test_data = data_classed.loc[train_row_num:]
train_data = train_data.reset_index() #重设索引
train_data.drop(['index'],axis=1,inplace=True) #去除多余索引
test_data = test_data.reset_index() #重设索引
test_data.drop(['index'],axis=1,inplace=True) #去除多余索引
temp_train.append(train_data)
temp_test.append(test_data)
return temp_train,temp_test
def data_expand(data_list,row_expand_aim_num): #数据扩增 解决样本不平衡 待扩增的数据以list的新式放进去 row_expand_aim_num是想要扩增为多少行
# data_list[0] = pd.concat( [data_list[0], data_list[0]], axis = 0)
for count in range(len(data_list)):
row_num = data_list[count].shape[0]
if row_num < row_expand_aim_num:
err = row_expand_aim_num - row_num
temp = data_list[count].sample(n = err, replace=True)
data_list[count] = pd.concat( [data_list[count], temp], axis = 0)
data_list[count] = data_list[count].reset_index() #重设索引
data_list[count].drop(['index'],axis=1,inplace=True) #去除多余索引
def pack_data_list(data_list):
temp = data_list[0]
row_num = len(data_list)
for count in range(1,row_num):
temp = pd.concat( [temp, data_list[count]], axis = 0)
temp = temp.reset_index() #重设索引
temp.drop(['index'],axis=1,inplace=True) #去除多余索引
return temp
def get_next_batch(all_data,batch_size,step):
row_num = all_data.shape[0]
batch_num = row_num/batch_size
batch_count = step%batch_num
begin = int(batch_count * batch_size)
end = int((batch_count + 1) * batch_size)
return all_data[begin:end]
base_dataset = pd.read_csv("./src-data/happiness_train_complet | g='ISO-8859-1')
drop_list = [] #去除字符串形式的数据
for col in base_dataset:
if base_dataset.loc[:,col].dtype == 'object':
drop_list.append(col)
base_dataset.drop(drop_list,axis=1,inplace=True)
drop_index_rule(base_dataset, np.isnan(base_dataset.family_income)) #去除缺省行
base_dataset.drop(base_dataset.loc[:,np.isnan(base_dataset).any()].columns,axis = 1,inplace = True) #删除所有有缺省的列
base_dataset = base_dataset.reset_index() #重设索引
base_dataset.drop(['index'],axis=1,inplace=True) #去除多余索引
base_labelset = base_dataset['happiness']
base_dataset.drop(['happiness'],axis=1,inplace=True)
base_dataset.drop(['id'],axis=1,inplace=True)
base_dataset = base_dataset.loc[:,base_dataset.var() > 0.9] #提取方差大于1的特征
base_dataset = pd.concat( [base_labelset, base_dataset], axis = 1) #把标签和数据拼上
drop_index_rule(base_dataset,base_dataset.happiness<1) #幸福感有问题的行删掉
train_data, test_data = data_classify(base_dataset, 'happiness', 0.8)
data_expand(train_data, 2000) #由于样本严重不平衡 这里先进行样本扩增 每一个happiness类都扩到2500个样本 用复制的方法扩增
data_expand(test_data, 1000)
train_data = pack_data_list(train_data) #把数据分成了几类的list给打包到一起
test_data = pack_data_list(test_data)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
labels_train = train_data['happiness']
labels_test = test_data['happiness']
labels_test_src = labels_test
labels_train_src = labels_train
train_data.drop(['happiness'],axis=1,inplace=True) #去除缺省且不必要的列
test_data.drop(['happiness'],axis=1,inplace=True) #去除缺省且不必要的列
train_data.head().to_csv("./temp-data/colname.csv",index = False)
np_src_happiness = labels_train.values #获取原始的幸福感数据 numpy格式 #训练labels
np_happiness_train_labels = np.zeros((np_src_happiness.shape[0],5),dtype = np.int) #创建可以用于训练的np 标签 5分类问题
for i in range(np_src_happiness.shape[0]):
np_happiness_train_labels[i,np_src_happiness[i]-1] = 1
np_src_happiness = labels_test.values #获取原始的幸福感数据 numpy格式 #测试labels
np_happiness_test_labels = np.zeros((np_src_happiness.shape[0],5),dtype = np.int) #创建可以用于训练的np 标签 5分类问题
for i in range(np_src_happiness.shape[0]):
np_happiness_test_labels[i,np_src_happiness[i]-1] = 1
train_data = preprocessing.scale(train_data) #数据标准化 使数据的每列基本满足正态分布
test_data = preprocessing.scale(test_data)
train_labels = np_happiness_train_labels
test_labels = np_happiness_test_labels
# print(labels_test_src,test_data)
print(train_data.shape,test_data.shape)
print(train_labels.shape,test_labels.shape)
# train_data = preprocessing.scale(base_dataset)
# stop()
ax = [] # 定义一个 x 轴的空列表用来接收动态的数据
ay = [] # 定义一个 y 轴的空列表用来接收动态的数据
ay2 = []
ay3 = []
ay4 = []
TRAIN_DATA_SIZZE = train_data.shape[0]
BATCH_SIZE = 5000
LEARNING_RATE_BASE = 0.9
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0009
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH="modules/"
MODEL_NAME="mnist_model"
TRAINING_STEPS = 9000000000
input_num = train_data.shape[1]
layer_node_num = [400, 200, 100]
output_num = 5
def get_weight_variable(name, shape, regularizer):
weights = tf.get_variable(name, shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(weights))
return weights
#定义两层简单的网络
x = tf.placeholder(tf.float32, [None, input_num], name='x-input')
y_ = tf.placeholder(tf.float32, [None, output_num], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
weights1 = get_weight_variable("weights1",[input_num, layer_node_num[0]], regularizer)
biases1 = tf.get_variable("biases1", [layer_node_num[0]], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(x, weights1) + biases1)
weights2 = get_weight_variable("weights2", [layer_node_num[0], layer_node_num[1]],regularizer)
biases2 = tf.get_variable("biases2", [layer_node_num[1]], initializer=tf.constant_initializer(0.0))
layer2 = tf.nn.relu(tf.matmul(layer1, weights2) + biases2)
weights3 = get_weight_variable("weights3", [layer_node_num[1], layer_node_num[2]],regularizer)
biases3 = tf.get_variable("biases3", [layer_node_num[2]], initializer=tf.constant_initializer(0.0))
layer3 = tf.nn.tanh(tf.matmul(layer2, weights3) + biases3)
weights_out = get_weight_variable("weights_out",[layer_node_num[2], output_num], regularizer)
biases_out = tf.get_variable("biases_out", [output_num], initializer=tf.constant_initializer(0.0))
layer_out = tf.matmul(layer3, weights_out) + biases_out
y = layer_out
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
TRAIN_DATA_SIZZE / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
# Optimizer
# GradientDescentOptimizer
# AdagradOptimizer
# AdagradDAOptimizer
# MomentumOptimizer
# AdamOptimizer
# FtrlOptimizer
# RMSPropOptimizer
# train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver()
with tf.Session() as sess:
writer = tf.summary.FileWriter("logs/", sess.graph)
tf.global_variables_initializer().run()
plt.ion() # 开启一个画图的窗口
for step_count in range(TRAINING_STEPS):
xs = get_next_batch(train_data,BATCH_SIZE,step_count)
ys = get_next_batch(train_labels,BATCH_SIZE,step_count)
xt = test_data
yt = test_labels
_, loss_value, step ,np_output_labels= sess.run([train_op, loss, global_step, y], feed_dict={x: xs, y_: ys})
np_test_output_labels,_ = sess.run([y,loss], feed_dict={x: xt, y_: yt})
output_labels_size,_ = np_output_labels.shape #计算训练集的错误率
result = np.zeros((output_labels_size))
result_train = np.zeros((output_labels_size))
for i in range(output_labels_size):
maxarg = np.argmax(np_output_labels[i])
result_train[i] = maxarg
np_output_labels[i] = 0
np_output_labels[i][maxarg] = 1
result[i] = np.logical_not(np.logical_not(np_output_labels[i] == ys[i]).any())
#pass
right_sum = result.sum()
err_sum = np.logical_not(result).sum()
train_labels_test = np.zeros((ys.shape[0]))
for i in range(ys.shape[0]):
train_labels_test[i] = np.argmax(ys[i]) + 1
score_train = ((train_labels_test - result_train)**2).sum()/result_train.shape[0]
ay4.append(score_train/20)
print("loss :",loss_value)
print("训练集:\n","错误率:",err_sum/result.shape[0]," 正确率:",right_sum/result.shape[0],"score :",score_train)
ay2.append(right_sum/result.shape[0]/2) # 添加 i 的平方到 y 轴的数据中
output_labels_size,_ = np_test_output_labels.shape #计算测试集的错误率
result = np.zeros((output_labels_size))
# print(np.argmax(np_test_output_labels))
result_test = np.zeros((output_labels_size))
for i in range(output_labels_size):
maxarg = np.argmax(np_test_output_labels[i])
result_test[i] = maxarg
np_test_output_labels[i] = 0
np_test_output_labels[i][maxarg] = 1
result[i] = np.logical_not(np.logical_not(np_test_output_labels[i] == yt[i]).any())
pass
score = ((labels_test_src.values - result_test)**2).sum()/result_test.shape[0]
ay3.append(score/20)
right_sum = result.sum()
err_sum = np.logical_not(result).sum()
print("测试集:\n","错误率:",err_sum/result.shape[0]," 正确率:",right_sum/result.shape[0])
ax.append(step_count) # 添加 i 到 x 轴的数据中
ay.append(right_sum/result.shape[0]) # 添加 i 的平方到 y 轴的数据中
plt.clf() # 清除之前画的图
plt.plot(ax,ay4,color = 'black') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay3,color = 'green') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay,color = 'red') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay2) # 画出当前 ax 列表和 ay 列表中的值的图形
plt.pause(0.01) # 暂停一秒
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME))
plt.ioff() # 关闭画图的窗口
| e.csv",encodin | identifier_name |
feature-select.py | import os
import sys
import pandas as pd
import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.svm import SVC
from sklearn import preprocessing
# from sklearn.externals import joblib
import seaborn
import matplotlib.pyplot as plt
import tensorflow as tf
def stop():
while 1:
pass
def where_is_nan(data): #查数据中缺省的位置 pd的格式
temp = 0
print(sys._getframe().f_back.f_lineno)
for columname in data:
if(np.isnan(data.loc[:,columname]).any()):
temp = 1
print(columname)
print("\tsum = ",np.isnan(data.loc[:,columname]).sum(),"\n\tpos = ",np.where(np.isnan(data.loc[:,columname]) == True)[0])
if temp == 0 :
print("have no nan\n")
def drop_index_rule(data_src,rule):
drop_pos = data_src.loc[rule,:].index.values
data_src.drop(drop_pos,axis=0,inplace=True) #去除年月日
def data_classify(data_src, label_name, data_train_proportion): #把乱的数据根据标签分好类 变成有序数据 再分成训练集和测试集两部分 分为两个list返回 data_src存放从文件读来得原始数据 label_name存放的是用于分类的列的名字 data_train_proportion是训练集占总的比列
labels_class_num = data_src.loc[:,label_name].nunique()
labels_classes = data_src.loc[:,label_name].unique()
order=np.argsort(labels_classes)
labels_classes = labels_classes[order]
temp_train = []
temp_test = []
for lab in labels_classes:
classfy_pos = data_src.loc[data_src[label_name] == lab,:].index.values
data_classed = data_src.loc[classfy_pos,:]
data_classed = data_classed.reset_index() #重设索引
data_classed.drop(['index'],axis=1,inplace=True) #去除多余索引
row_num,col_num = data_classed.shape
train_row_num = row_num * data_train_proportion
train_data = data_classed.loc[0:(train_row_num)]
test_data = data_classed.loc[train_row_num:]
train_data = train_data.reset_index() #重设索引
train_data.drop(['index'],axis=1,inplace=True) #去除多余索引
test_data = test_data.reset_index() #重设索引
test_data.drop(['index'],axis=1,inplace=True) #去除多余索引
temp_train.append(train_data)
temp_test.append(test_data)
return temp_train,temp_test
def data_expand(data_list,row_expand_aim_num): #数据扩增 解决样本不平衡 待扩增的数据以list的新式放进去 row_expand_aim_num是想要扩增为多少行
# data_list[0] = pd.concat( [data_list[0], data_list[0]], axis = 0)
for count in range(len(data_list)):
row_num = data_list[count].shape[0]
if row_num < row_expand_aim_num:
err = row_expand_aim_num - row_num
temp = data_list[count].sample(n = err, replace=True)
data_list[count] = pd.concat( [data_list[count], temp], axis = 0)
data_list[count] = data_list[count].reset_index() #重设索引
data_list[count].drop(['index'],axis=1,inplace=True) #去除多余索引
def pack_data_list(data_list):
temp = data_list[0]
row_num = len(data_list)
for count in range(1,row_num):
temp = pd.concat( [temp, data_list[count]], axis = 0)
temp = temp.reset_index() #重设索引
temp.drop(['index'],axis=1,inplace=True) #去除多余索引
return temp
def get_next_batch(all_data,batch_size,step):
row_num = all_data.shape[0]
batch_num = row_num/batch_size
batch_count = step%batch_num
begin = int(batch_count * batch_size)
end = int((batch_count + 1) * batch_size)
return all_data[begin:end]
base_dataset = pd.read_csv("./src-data/happiness_train_complete.csv",encoding='ISO-8859-1')
drop_list = [] #去除字符串形式的数据
for col in base_dataset:
if base_dataset.loc[:,col].dtype == 'object':
drop_list.append(col)
base_dataset.drop(drop_list,axis=1,inplace=True)
drop_index_rule(base_dataset, np.isnan(base_dataset.family_income)) #去除缺省行
base_dataset.drop(base_dataset.loc[:,np.isnan(base_dataset).any()].columns,axis = 1,inplace = True) #删除所有有缺省的列
base_dataset = base_dataset.reset_index() #重设索引
base_dataset.drop(['index'],axis=1,inplace=True) #去除多余索引
base_labelset = base_dataset['happiness']
base_dataset.drop(['happiness'],axis=1,inplace=True)
base_dataset.drop(['id'],axis=1,inplace=True)
base_dataset = base_dataset.loc[:,base_dataset.var() > 0.9] #提取方差大于1的特征
base_dataset = pd.concat( [base_labelset, base_dataset], axis = 1) #把标签和数据拼上
drop_index_rule(base_dataset,base_dataset.happiness<1) #幸福感有问题的行删掉
train_data, test_data = data_classify(base_dataset, 'happiness', 0.8)
data_expand(train_data, 2000) #由于样本严重不平衡 这里先进行样本扩增 每一个happiness类都扩到2500个样本 用复制的方法扩增
data_expand(test_data, 1000)
train_data = pack_data_list(train_data) #把数据分成了几类的list给打包到一起
test_data = pack_data_list(test_data)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sample(frac=1).reset_index(drop=True) #打乱样本顺序
test_data = test_data.sample(frac=1).reset_index(drop=True)
labels_train = train_data['happiness']
labels_test = test_data['happiness']
labels_test_src = labels_test
labels_train_src = labels_train
train_data.drop(['happiness'],axis=1,inplace=True) #去除缺省且不必要的列
test_data.drop(['happiness'],axis=1,inplace=True) #去除缺省且不必要的列
train_data.head().to_csv("./temp-data/colname.csv",index = False)
np_src_happiness = labels_train.values #获取原始的幸福感数据 numpy格式 #训练labels
np_happiness_train_labels = np.zeros((np_src_happiness.shape[0],5),dtype = np.int) #创建可以用于训练的np 标签 5分类问题
for i in range(np_src_happiness.shape[0]):
np_happiness_train_labels[i,np_src_happiness[i]-1] = 1
np_src_happiness = labels_test.values #获取原始的幸福感数据 numpy格式 #测试labels
np_happiness_test_labels = np.zeros((np_src_happiness.shape[0],5),dtype = np.int) #创建可以用于训练的np 标签 5分类问题
for i in range(np_src_happiness.shape[0]):
np_happiness_test_labels[i,np_src_happiness[i]-1] = 1
train_data = preprocessing.scale(train_data) #数据标准化 使数据的每列基本满足正态分布
test_data = preprocessing.scale(test_data)
train_labels = np_happiness_train_labels
test_labels = np_happiness_test_labels
# print(labels_test_src,test_data)
print(train_data.shape,test_data.shape)
print(train_labels.shape,test_labels.shape)
# train_data = preprocessing.scale(base_dataset)
# stop()
ax = [] # 定义一个 x 轴的空列表用来接收动态的数据
ay = [] # 定义一个 y 轴的空列表用来接收动态的数据
ay2 = []
ay3 = []
ay4 = []
TRAIN_DATA_SIZZE = train_data.shape[0]
BATCH_SIZE = 5000
LEARNING_RATE_BASE = 0.9
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0009
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH="modules/"
MODEL_NAME="mnist_model"
TRAINING_STEPS = 9000000000
input_num = train_data.shape[1]
layer_node_num = [400, 200, 100]
output_num = 5
def get_weight_variable(name, shape, regularizer):
weights = tf.get_variable(name, shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(weights))
return weights
#定义两层简单的网络
x = tf.placeholder(tf.float32, [None, input_num], name='x-input')
y_ = tf.placeholder(tf.float32, [None, output_num], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
weights1 = get_weight_variable("weights1",[input_num, layer_node_num[0]], regularizer)
biases1 = tf.get_variable("biases1", [layer_node_num[0]], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(x, weights1) + biases1)
weights2 = get_weight_variable("weights2", [layer_node_num[0], layer_node_num[1]],regularizer)
biases2 = tf.get_variable("biases2", [layer_node_num[1]], initializer=tf.constant_initializer(0.0))
layer2 = tf.nn.relu(tf.matmul(layer1, weights2) + biases2)
weights3 = get_weight_variable("weights3", [layer_node_num[1], layer_node_num[2]],regularizer)
biases3 = tf.get_variable("biases3", [layer_node_num[2]], initializer=tf.constant_initializer(0.0))
layer3 = tf.nn.tanh(tf.matmul(layer2, weights3) + biases3)
weights_out = get_weight_variable("weights_out",[layer_node_num[2], output_num], regularizer)
biases_out = tf.get_variable("biases_out", [output_num], initializer=tf.constant_initializer(0.0))
layer_out = tf.matmul(layer3, weights_out) + biases_out
y = layer_out
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
TRAIN_DATA_SIZZE / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
# Optimizer
# GradientDescentOptimizer
# AdagradOptimizer
# AdagradDAOptimizer
# MomentumOptimizer
|
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver()
with tf.Session() as sess:
writer = tf.summary.FileWriter("logs/", sess.graph)
tf.global_variables_initializer().run()
plt.ion() # 开启一个画图的窗口
for step_count in range(TRAINING_STEPS):
xs = get_next_batch(train_data,BATCH_SIZE,step_count)
ys = get_next_batch(train_labels,BATCH_SIZE,step_count)
xt = test_data
yt = test_labels
_, loss_value, step ,np_output_labels= sess.run([train_op, loss, global_step, y], feed_dict={x: xs, y_: ys})
np_test_output_labels,_ = sess.run([y,loss], feed_dict={x: xt, y_: yt})
output_labels_size,_ = np_output_labels.shape #计算训练集的错误率
result = np.zeros((output_labels_size))
result_train = np.zeros((output_labels_size))
for i in range(output_labels_size):
maxarg = np.argmax(np_output_labels[i])
result_train[i] = maxarg
np_output_labels[i] = 0
np_output_labels[i][maxarg] = 1
result[i] = np.logical_not(np.logical_not(np_output_labels[i] == ys[i]).any())
#pass
right_sum = result.sum()
err_sum = np.logical_not(result).sum()
train_labels_test = np.zeros((ys.shape[0]))
for i in range(ys.shape[0]):
train_labels_test[i] = np.argmax(ys[i]) + 1
score_train = ((train_labels_test - result_train)**2).sum()/result_train.shape[0]
ay4.append(score_train/20)
print("loss :",loss_value)
print("训练集:\n","错误率:",err_sum/result.shape[0]," 正确率:",right_sum/result.shape[0],"score :",score_train)
ay2.append(right_sum/result.shape[0]/2) # 添加 i 的平方到 y 轴的数据中
output_labels_size,_ = np_test_output_labels.shape #计算测试集的错误率
result = np.zeros((output_labels_size))
# print(np.argmax(np_test_output_labels))
result_test = np.zeros((output_labels_size))
for i in range(output_labels_size):
maxarg = np.argmax(np_test_output_labels[i])
result_test[i] = maxarg
np_test_output_labels[i] = 0
np_test_output_labels[i][maxarg] = 1
result[i] = np.logical_not(np.logical_not(np_test_output_labels[i] == yt[i]).any())
pass
score = ((labels_test_src.values - result_test)**2).sum()/result_test.shape[0]
ay3.append(score/20)
right_sum = result.sum()
err_sum = np.logical_not(result).sum()
print("测试集:\n","错误率:",err_sum/result.shape[0]," 正确率:",right_sum/result.shape[0])
ax.append(step_count) # 添加 i 到 x 轴的数据中
ay.append(right_sum/result.shape[0]) # 添加 i 的平方到 y 轴的数据中
plt.clf() # 清除之前画的图
plt.plot(ax,ay4,color = 'black') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay3,color = 'green') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay,color = 'red') # 画出当前 ax 列表和 ay 列表中的值的图形
plt.plot(ax,ay2) # 画出当前 ax 列表和 ay 列表中的值的图形
plt.pause(0.01) # 暂停一秒
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME))
plt.ioff() # 关闭画图的窗口 | # AdamOptimizer
# FtrlOptimizer
# RMSPropOptimizer
# train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
| random_line_split |
misc.py | import copy
import os
import re
import time
from contextlib import contextmanager
from typing import *
import numpy as np
from heapdict import heapdict
__all__ = [
'PatternType',
'Singleton', 'NOT_SET',
'format_duration', 'ETA', 'minibatch_slices_iterator',
'optional_apply', 'validate_enum_arg',
'maybe_close', 'iter_files',
'InheritanceDict', 'CachedInheritanceDict',
'parse_tags', 'deep_copy',
]
PatternType = type(re.compile('x'))
class Singleton(object):
"""
Base class for singleton classes.
>>> class Parent(Singleton):
... pass
>>> class Child(Parent):
... pass
>>> Parent() is Parent()
True
>>> Child() is Child()
True
>>> Parent() is not Child()
True
"""
__instances_dict = {}
def __new__(cls, *args, **kwargs):
if cls not in Singleton.__instances_dict:
Singleton.__instances_dict[cls] = \
object.__new__(cls, *args, **kwargs)
return Singleton.__instances_dict[cls]
class NotSet(Singleton):
"""
Class of the `NOT_SET` constant.
>>> NOT_SET is not None
True
>>> NOT_SET
NOT_SET
>>> NOT_SET == NOT_SET
True
>>> NotSet() is NOT_SET
True
>>> NotSet() == NOT_SET
True
"""
def __repr__(self):
return 'NOT_SET'
NOT_SET = NotSet()
def format_duration(seconds: Union[float, int],
short_units: bool = True,
keep_zeros: bool = False):
"""
Format specified time duration as human readable text.
>>> format_duration(0)
'0s'
>>> format_duration(61)
'1m 1s'
>>> format_duration(86400 * 2 + 60)
'2d 1m'
>>> format_duration(86400 * 2 + 60, keep_zeros=True)
'2d 0h 1m 0s'
>>> format_duration(86400 * 2 + 60, short_units=False)
'2 days 1 minute'
>>> format_duration(-1)
'1s ago'
Args:
seconds: Number of seconds of the time duration.
short_units: Whether or not to use short units ("d", "h", "m", "s")
instead of long units ("day", "hour", "minute", "second")?
keep_zeros: Whether or not to keep zero components?
(e.g., to keep "0h 0m" in "1d 0h 0m 3s").
Returns:
str: The formatted time duration.
"""
if short_units:
units = [(86400, 'd', 'd'), (3600, 'h', 'h'),
(60, 'm', 'm'), (1, 's', 's')]
else:
units = [(86400, ' day', ' days'), (3600, ' hour', ' hours'),
(60, ' minute', ' minutes'), (1, ' second', ' seconds')]
if seconds < 0:
seconds = -seconds
suffix = ' ago'
else:
suffix = ''
pieces = []
for uvalue, uname, uname_plural in units[:-1]:
if seconds >= uvalue:
val = int(seconds // uvalue)
pieces.append(f'{val:d}{uname_plural if val > 1 else uname}')
seconds %= uvalue
elif keep_zeros and pieces:
pieces.append(f'0{uname}')
uname, uname_plural = units[-1][1:]
if seconds > np.finfo(np.float64).eps:
pieces.append(f'{seconds:.4g}{uname_plural if seconds > 1 else uname}')
elif not pieces or keep_zeros:
pieces.append(f'0{uname}')
return ' '.join(pieces) + suffix
class ETA(object):
"""
Class to help compute the Estimated Time Ahead (ETA).
>>> now = time.time()
>>> eta = ETA()
>>> eta.take_snapshot(progress=0.0, now=now) # record the start time
>>> eta.get_eta(progress=0.01, now=now + 5.) # i.e., 1% work costs 5s
495.0
"""
def __init__(self):
"""Construct a new :class:`ETA`."""
self._times = []
self._progresses = []
def take_snapshot(self, progress: Union[int, float],
now: Optional[Union[int, float]] = None):
"""
Take a snapshot of ``(progress, now)``, for later computing ETA.
Args:
progress: The current progress, range in ``[0, 1]``.
now: The current timestamp in seconds. If not specified, use
``time.time()``.
"""
if not self._progresses or progress - self._progresses[-1] > .001:
# we only record the time and corresponding progress if the
# progress has been advanced by 0.1%
if now is None:
now = time.time()
self._progresses.append(progress)
self._times.append(now)
def get_eta(self,
progress: Union[int, float],
now: Optional[Union[int, float]] = None,
take_snapshot: bool = True) -> Optional[float]:
"""
Get the Estimated Time Ahead (ETA).
Args:
progress: The current progress, range in ``[0, 1]``.
now: The current timestamp in seconds. If not specified, use
``time.time()``.
take_snapshot: Whether or not to take a snapshot of
the specified ``(progress, now)``? (default :obj:`True`)
Returns:
The ETA in seconds, or :obj:`None` if the ETA cannot be estimated.
"""
# TODO: Maybe we can have a better estimation algorithm here!
if now is None:
now = time.time()
if self._progresses:
time_delta = now - self._times[0]
progress_delta = progress - self._progresses[0]
progress_left = 1. - progress
if progress_delta < 1e-7:
return None
eta = time_delta / progress_delta * progress_left
else:
eta = None
if take_snapshot:
self.take_snapshot(progress, now)
return eta
def minibatch_slices_iterator(length: int,
batch_size: int,
skip_incomplete: bool = False
) -> Generator[slice, None, None]:
"""
Iterate through all the mini-batch slices.
>>> arr = np.arange(10)
>>> for batch_s in minibatch_slices_iterator(len(arr), batch_size=4):
... print(arr[batch_s])
[0 1 2 3]
[4 5 6 7]
[8 9]
>>> for batch_s in minibatch_slices_iterator(
... len(arr), batch_size=4, skip_incomplete=True):
... print(arr[batch_s])
[0 1 2 3]
[4 5 6 7]
Args:
length: Total length of data in an epoch.
batch_size: Size of each mini-batch.
skip_incomplete: If :obj:`True`, discard the final batch if it
contains less than `batch_size` number of items.
Yields
Slices of each mini-batch. The last mini-batch may contain less
elements than `batch_size`.
"""
start = 0
stop1 = (length // batch_size) * batch_size
while start < stop1:
|
if not skip_incomplete and start < length:
yield slice(start, length, 1)
def optional_apply(f, value):
"""
If `value` is not None, return `f(value)`, otherwise return None.
>>> optional_apply(int, None) is None
True
>>> optional_apply(int, '123')
123
Args:
f: The function to apply on `value`.
value: The value, maybe None.
"""
if value is not None:
return f(value)
TArgValue = TypeVar('TArgValue')
def validate_enum_arg(arg_name: str,
arg_value: Optional[TArgValue],
choices: Iterable[TArgValue],
nullable: bool = False) -> Optional[TArgValue]:
"""
Validate the value of an enumeration argument.
Args:
arg_name: Name of the argument.
arg_value: Value of the argument.
choices: Valid choices of the argument value.
nullable: Whether or not the argument can be None?
Returns:
The validated argument value.
Raises:
ValueError: If `arg_value` is not valid.
"""
choices = tuple(choices)
if not (nullable and arg_value is None) and (arg_value not in choices):
raise ValueError('Invalid value for argument `{}`: expected to be one '
'of {!r}, but got {!r}.'.
format(arg_name, choices, arg_value))
return arg_value
@contextmanager
def maybe_close(obj):
"""
Enter a context, and if `obj` has ``.close()`` method, close it
when exiting the context.
>>> class HasClose(object):
... def close(self):
... print('closed')
>>> class HasNotClose(object):
... pass
>>> with maybe_close(HasClose()) as obj: # doctest: +ELLIPSIS
... print(obj)
<mltk.utils.misc.HasClose ...>
closed
>>> with maybe_close(HasNotClose()) as obj: # doctest: +ELLIPSIS
... print(obj)
<mltk.utils.misc.HasNotClose ...>
Args:
obj: The object maybe to close.
Yields:
The specified `obj`.
"""
try:
yield obj
finally:
if hasattr(obj, 'close'):
obj.close()
def iter_files(root_dir: str, sep: str = '/') -> Generator[str, None, None]:
"""
Iterate through all files in `root_dir`, returning the relative paths
of each file. The sub-directories will not be yielded.
Args:
root_dir: The root directory, from which to iterate.
sep: The separator for the relative paths.
Yields:
The relative paths of each file.
"""
def f(parent_path, parent_name):
for f_name in os.listdir(parent_path):
f_child_path = parent_path + os.sep + f_name
f_child_name = parent_name + sep + f_name
if os.path.isdir(f_child_path):
for s in f(f_child_path, f_child_name):
yield s
else:
yield f_child_name
for name in os.listdir(root_dir):
child_path = root_dir + os.sep + name
if os.path.isdir(child_path):
for x in f(child_path, name):
yield x
else:
yield name
TValue = TypeVar('TValue')
class _InheritanceNode(object):
def __init__(self, type_: type):
self.type = type_
self.children = []
def add_child(self, child: '_InheritanceNode'):
self.children.append(child)
class InheritanceDict(Generic[TValue]):
"""
A dict that gives the registered value of the closest known ancestor
of a query type (`ancestor` includes the type itself).
>>> class GrandPa(object): pass
>>> class Parent(GrandPa): pass
>>> class Child(Parent): pass
>>> class Uncle(GrandPa): pass
>>> d = InheritanceDict()
>>> d[Child] = 1
>>> d[GrandPa] = 2
>>> d[Uncle] = 3
>>> d[GrandPa]
2
>>> d[Parent]
2
>>> d[Child]
1
>>> d[Uncle]
3
>>> d[str]
Traceback (most recent call last):
...
KeyError: <class 'str'>
"""
def __init__(self):
self._nodes = [] # type: List[_InheritanceNode]
self._values = {}
self._topo_sorted = None
def __setitem__(self, type_: type, value: TValue):
this_node = _InheritanceNode(type_)
if type_ not in self._values:
for node in self._nodes:
if issubclass(type_, node.type):
node.add_child(this_node)
elif issubclass(node.type, type_):
this_node.add_child(node)
self._nodes.append(this_node)
self._topo_sorted = None
self._values[type_] = value
def __getitem__(self, type_: type) -> TValue:
if self._topo_sorted is None:
self._topo_sort()
for t in reversed(self._topo_sorted):
if t is type_ or issubclass(type_, t):
return self._values[t]
raise KeyError(type_)
def _topo_sort(self):
parent_count = {node: 0 for node in self._nodes}
for node in self._nodes:
for child in node.children:
parent_count[child] += 1
heap = heapdict()
for node, pa_count in parent_count.items():
heap[node] = pa_count
topo_sorted = []
while heap:
node, priority = heap.popitem()
topo_sorted.append(node.type)
for child in node.children:
heap[child] -= 1
self._topo_sorted = topo_sorted
class CachedInheritanceDict(InheritanceDict[TValue]):
"""
A subclass of :class:`InheritanceDict`, with an additional lookup cache.
The cache is infinitely large, thus this class is only suitable under the
situation where the number of queried types are not too large.
"""
NOT_EXIST = ...
def __init__(self):
super().__init__()
self._cache = {} # type: Dict[type, TValue]
def _topo_sort(self):
self._cache.clear()
super()._topo_sort()
def __getitem__(self, type_: type) -> TValue:
ret = self._cache.get(type_, None)
if ret is None:
try:
ret = self._cache[type_] = super().__getitem__(type_)
except KeyError:
self._cache[type_] = self.NOT_EXIST
raise
elif ret is self.NOT_EXIST:
raise KeyError(type_)
return ret
def __setitem__(self, type_: type, value: TValue):
self._cache.clear()
super().__setitem__(type_, value)
def parse_tags(s: str) -> List[str]:
"""
Parse comma separated tags str into list of tags.
>>> parse_tags('one tag')
['one tag']
>>> parse_tags(' strip left and right ends ')
['strip left and right ends']
>>> parse_tags('two, tags')
['two', 'tags']
>>> parse_tags('"quoted, string" is one tag')
['quoted, string is one tag']
>>> parse_tags(', empty tags, , will be skipped, ')
['empty tags', 'will be skipped']
Args:
s: The comma separated tags str.
Returns:
The parsed tags.
"""
tags = []
buf = []
in_quoted = None
for c in s:
if in_quoted:
if c == in_quoted:
in_quoted = None
else:
buf.append(c)
elif c == '"' or c == '\'':
in_quoted = c
elif c == ',':
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
buf.clear()
else:
buf.append(c)
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
return tags
TValue = TypeVar('TValue')
def deep_copy(value: TValue) -> TValue:
"""
A patched deep copy function, that can handle various types cannot be
handled by the standard :func:`copy.deepcopy`.
Args:
value: The value to be copied.
Returns:
The copied value.
"""
def pattern_dispatcher(v, memo=None):
return v # we don't need to copy a regex pattern object, it's read-only
old_dispatcher = copy._deepcopy_dispatch.get(PatternType, None)
copy._deepcopy_dispatch[PatternType] = pattern_dispatcher
try:
return copy.deepcopy(value)
finally:
if old_dispatcher is not None: # pragma: no cover
copy._deepcopy_dispatch[PatternType] = old_dispatcher
else:
del copy._deepcopy_dispatch[PatternType]
| yield slice(start, start + batch_size, 1)
start += batch_size | conditional_block |
misc.py | import copy
import os
import re
import time
from contextlib import contextmanager
from typing import *
import numpy as np
from heapdict import heapdict
__all__ = [
'PatternType',
'Singleton', 'NOT_SET',
'format_duration', 'ETA', 'minibatch_slices_iterator',
'optional_apply', 'validate_enum_arg',
'maybe_close', 'iter_files',
'InheritanceDict', 'CachedInheritanceDict',
'parse_tags', 'deep_copy',
]
PatternType = type(re.compile('x'))
class Singleton(object):
"""
Base class for singleton classes.
>>> class Parent(Singleton):
... pass
>>> class Child(Parent):
... pass
>>> Parent() is Parent()
True
>>> Child() is Child()
True
>>> Parent() is not Child()
True
"""
__instances_dict = {}
def __new__(cls, *args, **kwargs):
if cls not in Singleton.__instances_dict:
Singleton.__instances_dict[cls] = \
object.__new__(cls, *args, **kwargs)
return Singleton.__instances_dict[cls]
class NotSet(Singleton):
"""
Class of the `NOT_SET` constant.
>>> NOT_SET is not None
True
>>> NOT_SET
NOT_SET
>>> NOT_SET == NOT_SET
True
>>> NotSet() is NOT_SET
True
>>> NotSet() == NOT_SET
True
"""
def __repr__(self):
return 'NOT_SET'
NOT_SET = NotSet()
def format_duration(seconds: Union[float, int],
short_units: bool = True,
keep_zeros: bool = False):
"""
Format specified time duration as human readable text.
>>> format_duration(0)
'0s'
>>> format_duration(61)
'1m 1s'
>>> format_duration(86400 * 2 + 60)
'2d 1m'
>>> format_duration(86400 * 2 + 60, keep_zeros=True)
'2d 0h 1m 0s'
>>> format_duration(86400 * 2 + 60, short_units=False)
'2 days 1 minute'
>>> format_duration(-1)
'1s ago'
Args:
seconds: Number of seconds of the time duration.
short_units: Whether or not to use short units ("d", "h", "m", "s")
instead of long units ("day", "hour", "minute", "second")?
keep_zeros: Whether or not to keep zero components?
(e.g., to keep "0h 0m" in "1d 0h 0m 3s").
Returns:
str: The formatted time duration.
"""
if short_units:
units = [(86400, 'd', 'd'), (3600, 'h', 'h'),
(60, 'm', 'm'), (1, 's', 's')]
else:
units = [(86400, ' day', ' days'), (3600, ' hour', ' hours'),
(60, ' minute', ' minutes'), (1, ' second', ' seconds')]
if seconds < 0:
seconds = -seconds
suffix = ' ago'
else:
suffix = ''
pieces = []
for uvalue, uname, uname_plural in units[:-1]:
if seconds >= uvalue:
val = int(seconds // uvalue)
pieces.append(f'{val:d}{uname_plural if val > 1 else uname}')
seconds %= uvalue
elif keep_zeros and pieces:
pieces.append(f'0{uname}')
uname, uname_plural = units[-1][1:]
if seconds > np.finfo(np.float64).eps:
pieces.append(f'{seconds:.4g}{uname_plural if seconds > 1 else uname}')
elif not pieces or keep_zeros:
pieces.append(f'0{uname}')
return ' '.join(pieces) + suffix
class ETA(object):
"""
Class to help compute the Estimated Time Ahead (ETA).
>>> now = time.time()
>>> eta = ETA()
>>> eta.take_snapshot(progress=0.0, now=now) # record the start time
>>> eta.get_eta(progress=0.01, now=now + 5.) # i.e., 1% work costs 5s
495.0
"""
def __init__(self):
"""Construct a new :class:`ETA`."""
self._times = []
self._progresses = []
def take_snapshot(self, progress: Union[int, float],
now: Optional[Union[int, float]] = None):
"""
Take a snapshot of ``(progress, now)``, for later computing ETA.
Args:
progress: The current progress, range in ``[0, 1]``.
now: The current timestamp in seconds. If not specified, use
``time.time()``.
"""
if not self._progresses or progress - self._progresses[-1] > .001:
# we only record the time and corresponding progress if the
# progress has been advanced by 0.1%
if now is None:
now = time.time()
self._progresses.append(progress)
self._times.append(now)
def get_eta(self,
progress: Union[int, float],
now: Optional[Union[int, float]] = None,
take_snapshot: bool = True) -> Optional[float]:
"""
Get the Estimated Time Ahead (ETA).
Args:
progress: The current progress, range in ``[0, 1]``.
now: The current timestamp in seconds. If not specified, use
``time.time()``.
take_snapshot: Whether or not to take a snapshot of
the specified ``(progress, now)``? (default :obj:`True`)
Returns:
The ETA in seconds, or :obj:`None` if the ETA cannot be estimated.
"""
# TODO: Maybe we can have a better estimation algorithm here!
if now is None:
now = time.time()
if self._progresses:
time_delta = now - self._times[0]
progress_delta = progress - self._progresses[0]
progress_left = 1. - progress
if progress_delta < 1e-7:
return None
eta = time_delta / progress_delta * progress_left
else:
eta = None
if take_snapshot:
self.take_snapshot(progress, now)
return eta
def minibatch_slices_iterator(length: int,
batch_size: int,
skip_incomplete: bool = False
) -> Generator[slice, None, None]:
|
def optional_apply(f, value):
"""
If `value` is not None, return `f(value)`, otherwise return None.
>>> optional_apply(int, None) is None
True
>>> optional_apply(int, '123')
123
Args:
f: The function to apply on `value`.
value: The value, maybe None.
"""
if value is not None:
return f(value)
TArgValue = TypeVar('TArgValue')
def validate_enum_arg(arg_name: str,
arg_value: Optional[TArgValue],
choices: Iterable[TArgValue],
nullable: bool = False) -> Optional[TArgValue]:
"""
Validate the value of an enumeration argument.
Args:
arg_name: Name of the argument.
arg_value: Value of the argument.
choices: Valid choices of the argument value.
nullable: Whether or not the argument can be None?
Returns:
The validated argument value.
Raises:
ValueError: If `arg_value` is not valid.
"""
choices = tuple(choices)
if not (nullable and arg_value is None) and (arg_value not in choices):
raise ValueError('Invalid value for argument `{}`: expected to be one '
'of {!r}, but got {!r}.'.
format(arg_name, choices, arg_value))
return arg_value
@contextmanager
def maybe_close(obj):
"""
Enter a context, and if `obj` has ``.close()`` method, close it
when exiting the context.
>>> class HasClose(object):
... def close(self):
... print('closed')
>>> class HasNotClose(object):
... pass
>>> with maybe_close(HasClose()) as obj: # doctest: +ELLIPSIS
... print(obj)
<mltk.utils.misc.HasClose ...>
closed
>>> with maybe_close(HasNotClose()) as obj: # doctest: +ELLIPSIS
... print(obj)
<mltk.utils.misc.HasNotClose ...>
Args:
obj: The object maybe to close.
Yields:
The specified `obj`.
"""
try:
yield obj
finally:
if hasattr(obj, 'close'):
obj.close()
def iter_files(root_dir: str, sep: str = '/') -> Generator[str, None, None]:
"""
Iterate through all files in `root_dir`, returning the relative paths
of each file. The sub-directories will not be yielded.
Args:
root_dir: The root directory, from which to iterate.
sep: The separator for the relative paths.
Yields:
The relative paths of each file.
"""
def f(parent_path, parent_name):
for f_name in os.listdir(parent_path):
f_child_path = parent_path + os.sep + f_name
f_child_name = parent_name + sep + f_name
if os.path.isdir(f_child_path):
for s in f(f_child_path, f_child_name):
yield s
else:
yield f_child_name
for name in os.listdir(root_dir):
child_path = root_dir + os.sep + name
if os.path.isdir(child_path):
for x in f(child_path, name):
yield x
else:
yield name
TValue = TypeVar('TValue')
class _InheritanceNode(object):
def __init__(self, type_: type):
self.type = type_
self.children = []
def add_child(self, child: '_InheritanceNode'):
self.children.append(child)
class InheritanceDict(Generic[TValue]):
"""
A dict that gives the registered value of the closest known ancestor
of a query type (`ancestor` includes the type itself).
>>> class GrandPa(object): pass
>>> class Parent(GrandPa): pass
>>> class Child(Parent): pass
>>> class Uncle(GrandPa): pass
>>> d = InheritanceDict()
>>> d[Child] = 1
>>> d[GrandPa] = 2
>>> d[Uncle] = 3
>>> d[GrandPa]
2
>>> d[Parent]
2
>>> d[Child]
1
>>> d[Uncle]
3
>>> d[str]
Traceback (most recent call last):
...
KeyError: <class 'str'>
"""
def __init__(self):
self._nodes = [] # type: List[_InheritanceNode]
self._values = {}
self._topo_sorted = None
def __setitem__(self, type_: type, value: TValue):
this_node = _InheritanceNode(type_)
if type_ not in self._values:
for node in self._nodes:
if issubclass(type_, node.type):
node.add_child(this_node)
elif issubclass(node.type, type_):
this_node.add_child(node)
self._nodes.append(this_node)
self._topo_sorted = None
self._values[type_] = value
def __getitem__(self, type_: type) -> TValue:
if self._topo_sorted is None:
self._topo_sort()
for t in reversed(self._topo_sorted):
if t is type_ or issubclass(type_, t):
return self._values[t]
raise KeyError(type_)
def _topo_sort(self):
parent_count = {node: 0 for node in self._nodes}
for node in self._nodes:
for child in node.children:
parent_count[child] += 1
heap = heapdict()
for node, pa_count in parent_count.items():
heap[node] = pa_count
topo_sorted = []
while heap:
node, priority = heap.popitem()
topo_sorted.append(node.type)
for child in node.children:
heap[child] -= 1
self._topo_sorted = topo_sorted
class CachedInheritanceDict(InheritanceDict[TValue]):
"""
A subclass of :class:`InheritanceDict`, with an additional lookup cache.
The cache is infinitely large, thus this class is only suitable under the
situation where the number of queried types are not too large.
"""
NOT_EXIST = ...
def __init__(self):
super().__init__()
self._cache = {} # type: Dict[type, TValue]
def _topo_sort(self):
self._cache.clear()
super()._topo_sort()
def __getitem__(self, type_: type) -> TValue:
ret = self._cache.get(type_, None)
if ret is None:
try:
ret = self._cache[type_] = super().__getitem__(type_)
except KeyError:
self._cache[type_] = self.NOT_EXIST
raise
elif ret is self.NOT_EXIST:
raise KeyError(type_)
return ret
def __setitem__(self, type_: type, value: TValue):
self._cache.clear()
super().__setitem__(type_, value)
def parse_tags(s: str) -> List[str]:
"""
Parse comma separated tags str into list of tags.
>>> parse_tags('one tag')
['one tag']
>>> parse_tags(' strip left and right ends ')
['strip left and right ends']
>>> parse_tags('two, tags')
['two', 'tags']
>>> parse_tags('"quoted, string" is one tag')
['quoted, string is one tag']
>>> parse_tags(', empty tags, , will be skipped, ')
['empty tags', 'will be skipped']
Args:
s: The comma separated tags str.
Returns:
The parsed tags.
"""
tags = []
buf = []
in_quoted = None
for c in s:
if in_quoted:
if c == in_quoted:
in_quoted = None
else:
buf.append(c)
elif c == '"' or c == '\'':
in_quoted = c
elif c == ',':
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
buf.clear()
else:
buf.append(c)
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
return tags
TValue = TypeVar('TValue')
def deep_copy(value: TValue) -> TValue:
"""
A patched deep copy function, that can handle various types cannot be
handled by the standard :func:`copy.deepcopy`.
Args:
value: The value to be copied.
Returns:
The copied value.
"""
def pattern_dispatcher(v, memo=None):
return v # we don't need to copy a regex pattern object, it's read-only
old_dispatcher = copy._deepcopy_dispatch.get(PatternType, None)
copy._deepcopy_dispatch[PatternType] = pattern_dispatcher
try:
return copy.deepcopy(value)
finally:
if old_dispatcher is not None: # pragma: no cover
copy._deepcopy_dispatch[PatternType] = old_dispatcher
else:
del copy._deepcopy_dispatch[PatternType]
| """
Iterate through all the mini-batch slices.
>>> arr = np.arange(10)
>>> for batch_s in minibatch_slices_iterator(len(arr), batch_size=4):
... print(arr[batch_s])
[0 1 2 3]
[4 5 6 7]
[8 9]
>>> for batch_s in minibatch_slices_iterator(
... len(arr), batch_size=4, skip_incomplete=True):
... print(arr[batch_s])
[0 1 2 3]
[4 5 6 7]
Args:
length: Total length of data in an epoch.
batch_size: Size of each mini-batch.
skip_incomplete: If :obj:`True`, discard the final batch if it
contains less than `batch_size` number of items.
Yields
Slices of each mini-batch. The last mini-batch may contain less
elements than `batch_size`.
"""
start = 0
stop1 = (length // batch_size) * batch_size
while start < stop1:
yield slice(start, start + batch_size, 1)
start += batch_size
if not skip_incomplete and start < length:
yield slice(start, length, 1) | identifier_body |
misc.py | import copy
import os
import re
import time
from contextlib import contextmanager
from typing import *
import numpy as np
from heapdict import heapdict
__all__ = [
'PatternType',
'Singleton', 'NOT_SET',
'format_duration', 'ETA', 'minibatch_slices_iterator',
'optional_apply', 'validate_enum_arg',
'maybe_close', 'iter_files',
'InheritanceDict', 'CachedInheritanceDict',
'parse_tags', 'deep_copy',
]
PatternType = type(re.compile('x'))
class Singleton(object):
"""
Base class for singleton classes.
>>> class Parent(Singleton):
... pass
>>> class Child(Parent):
... pass
>>> Parent() is Parent()
True
>>> Child() is Child()
True
>>> Parent() is not Child()
True
"""
__instances_dict = {}
def __new__(cls, *args, **kwargs):
if cls not in Singleton.__instances_dict:
Singleton.__instances_dict[cls] = \
object.__new__(cls, *args, **kwargs)
return Singleton.__instances_dict[cls]
class NotSet(Singleton):
"""
Class of the `NOT_SET` constant.
>>> NOT_SET is not None
True
>>> NOT_SET
NOT_SET
>>> NOT_SET == NOT_SET
True
>>> NotSet() is NOT_SET
True
>>> NotSet() == NOT_SET
True
"""
def __repr__(self):
return 'NOT_SET'
NOT_SET = NotSet()
def format_duration(seconds: Union[float, int],
short_units: bool = True,
keep_zeros: bool = False):
"""
Format specified time duration as human readable text.
>>> format_duration(0)
'0s'
>>> format_duration(61)
'1m 1s'
>>> format_duration(86400 * 2 + 60)
'2d 1m'
>>> format_duration(86400 * 2 + 60, keep_zeros=True)
'2d 0h 1m 0s'
>>> format_duration(86400 * 2 + 60, short_units=False)
'2 days 1 minute'
>>> format_duration(-1)
'1s ago'
Args:
seconds: Number of seconds of the time duration.
short_units: Whether or not to use short units ("d", "h", "m", "s")
instead of long units ("day", "hour", "minute", "second")?
keep_zeros: Whether or not to keep zero components?
(e.g., to keep "0h 0m" in "1d 0h 0m 3s").
Returns:
str: The formatted time duration.
"""
if short_units:
units = [(86400, 'd', 'd'), (3600, 'h', 'h'),
(60, 'm', 'm'), (1, 's', 's')]
else:
units = [(86400, ' day', ' days'), (3600, ' hour', ' hours'),
(60, ' minute', ' minutes'), (1, ' second', ' seconds')]
if seconds < 0:
seconds = -seconds
suffix = ' ago'
else:
suffix = ''
pieces = []
for uvalue, uname, uname_plural in units[:-1]:
if seconds >= uvalue:
val = int(seconds // uvalue)
pieces.append(f'{val:d}{uname_plural if val > 1 else uname}')
seconds %= uvalue
elif keep_zeros and pieces:
pieces.append(f'0{uname}')
uname, uname_plural = units[-1][1:]
if seconds > np.finfo(np.float64).eps:
pieces.append(f'{seconds:.4g}{uname_plural if seconds > 1 else uname}')
elif not pieces or keep_zeros:
pieces.append(f'0{uname}')
return ' '.join(pieces) + suffix
class ETA(object):
"""
Class to help compute the Estimated Time Ahead (ETA).
>>> now = time.time()
>>> eta = ETA()
>>> eta.take_snapshot(progress=0.0, now=now) # record the start time
>>> eta.get_eta(progress=0.01, now=now + 5.) # i.e., 1% work costs 5s
495.0
"""
def __init__(self):
"""Construct a new :class:`ETA`."""
self._times = []
self._progresses = []
def take_snapshot(self, progress: Union[int, float],
now: Optional[Union[int, float]] = None):
"""
Take a snapshot of ``(progress, now)``, for later computing ETA.
Args:
progress: The current progress, range in ``[0, 1]``.
now: The current timestamp in seconds. If not specified, use
``time.time()``.
"""
if not self._progresses or progress - self._progresses[-1] > .001:
# we only record the time and corresponding progress if the
# progress has been advanced by 0.1%
if now is None:
now = time.time()
self._progresses.append(progress)
self._times.append(now)
def get_eta(self,
progress: Union[int, float],
now: Optional[Union[int, float]] = None,
take_snapshot: bool = True) -> Optional[float]:
"""
Get the Estimated Time Ahead (ETA).
Args:
progress: The current progress, range in ``[0, 1]``.
now: The current timestamp in seconds. If not specified, use
``time.time()``.
take_snapshot: Whether or not to take a snapshot of
the specified ``(progress, now)``? (default :obj:`True`)
Returns:
The ETA in seconds, or :obj:`None` if the ETA cannot be estimated.
"""
# TODO: Maybe we can have a better estimation algorithm here!
if now is None:
now = time.time()
if self._progresses:
time_delta = now - self._times[0]
progress_delta = progress - self._progresses[0]
progress_left = 1. - progress
if progress_delta < 1e-7:
return None
eta = time_delta / progress_delta * progress_left
else:
eta = None
if take_snapshot:
self.take_snapshot(progress, now)
return eta
def minibatch_slices_iterator(length: int,
batch_size: int,
skip_incomplete: bool = False
) -> Generator[slice, None, None]:
"""
Iterate through all the mini-batch slices.
>>> arr = np.arange(10)
>>> for batch_s in minibatch_slices_iterator(len(arr), batch_size=4):
... print(arr[batch_s])
[0 1 2 3]
[4 5 6 7]
[8 9]
>>> for batch_s in minibatch_slices_iterator(
... len(arr), batch_size=4, skip_incomplete=True):
... print(arr[batch_s])
[0 1 2 3]
[4 5 6 7]
Args:
length: Total length of data in an epoch.
batch_size: Size of each mini-batch.
skip_incomplete: If :obj:`True`, discard the final batch if it
contains less than `batch_size` number of items.
Yields
Slices of each mini-batch. The last mini-batch may contain less
elements than `batch_size`.
"""
start = 0
stop1 = (length // batch_size) * batch_size
while start < stop1:
yield slice(start, start + batch_size, 1)
start += batch_size
if not skip_incomplete and start < length:
yield slice(start, length, 1)
def optional_apply(f, value):
"""
If `value` is not None, return `f(value)`, otherwise return None.
>>> optional_apply(int, None) is None
True
>>> optional_apply(int, '123')
123
Args:
f: The function to apply on `value`.
value: The value, maybe None.
"""
if value is not None:
return f(value)
TArgValue = TypeVar('TArgValue')
def validate_enum_arg(arg_name: str,
arg_value: Optional[TArgValue],
choices: Iterable[TArgValue],
nullable: bool = False) -> Optional[TArgValue]:
"""
Validate the value of an enumeration argument.
Args:
arg_name: Name of the argument.
arg_value: Value of the argument.
choices: Valid choices of the argument value.
nullable: Whether or not the argument can be None?
Returns:
The validated argument value.
Raises:
ValueError: If `arg_value` is not valid.
"""
choices = tuple(choices)
if not (nullable and arg_value is None) and (arg_value not in choices):
raise ValueError('Invalid value for argument `{}`: expected to be one '
'of {!r}, but got {!r}.'.
format(arg_name, choices, arg_value))
return arg_value
@contextmanager
def maybe_close(obj):
"""
Enter a context, and if `obj` has ``.close()`` method, close it
when exiting the context.
>>> class HasClose(object):
... def close(self):
... print('closed')
>>> class HasNotClose(object):
... pass
>>> with maybe_close(HasClose()) as obj: # doctest: +ELLIPSIS
... print(obj)
<mltk.utils.misc.HasClose ...>
closed
>>> with maybe_close(HasNotClose()) as obj: # doctest: +ELLIPSIS
... print(obj)
<mltk.utils.misc.HasNotClose ...>
Args:
obj: The object maybe to close.
Yields:
The specified `obj`.
"""
try:
yield obj
finally:
if hasattr(obj, 'close'):
obj.close()
def iter_files(root_dir: str, sep: str = '/') -> Generator[str, None, None]:
"""
Iterate through all files in `root_dir`, returning the relative paths
of each file. The sub-directories will not be yielded.
Args:
root_dir: The root directory, from which to iterate.
sep: The separator for the relative paths.
Yields:
The relative paths of each file.
"""
def f(parent_path, parent_name):
for f_name in os.listdir(parent_path):
f_child_path = parent_path + os.sep + f_name
f_child_name = parent_name + sep + f_name
if os.path.isdir(f_child_path):
for s in f(f_child_path, f_child_name):
yield s
else:
yield f_child_name
for name in os.listdir(root_dir):
child_path = root_dir + os.sep + name
if os.path.isdir(child_path):
for x in f(child_path, name):
yield x
else:
yield name
TValue = TypeVar('TValue')
class _InheritanceNode(object):
def __init__(self, type_: type):
self.type = type_
self.children = []
def add_child(self, child: '_InheritanceNode'):
self.children.append(child)
class InheritanceDict(Generic[TValue]):
"""
A dict that gives the registered value of the closest known ancestor
of a query type (`ancestor` includes the type itself).
>>> class GrandPa(object): pass
>>> class Parent(GrandPa): pass
>>> class Child(Parent): pass
>>> class Uncle(GrandPa): pass
>>> d = InheritanceDict()
>>> d[Child] = 1
>>> d[GrandPa] = 2
>>> d[Uncle] = 3
>>> d[GrandPa]
2
>>> d[Parent]
2
>>> d[Child]
1
>>> d[Uncle]
3
>>> d[str]
Traceback (most recent call last):
...
KeyError: <class 'str'>
"""
def __init__(self):
self._nodes = [] # type: List[_InheritanceNode]
self._values = {}
self._topo_sorted = None
def __setitem__(self, type_: type, value: TValue):
this_node = _InheritanceNode(type_)
if type_ not in self._values: | self._nodes.append(this_node)
self._topo_sorted = None
self._values[type_] = value
def __getitem__(self, type_: type) -> TValue:
if self._topo_sorted is None:
self._topo_sort()
for t in reversed(self._topo_sorted):
if t is type_ or issubclass(type_, t):
return self._values[t]
raise KeyError(type_)
def _topo_sort(self):
parent_count = {node: 0 for node in self._nodes}
for node in self._nodes:
for child in node.children:
parent_count[child] += 1
heap = heapdict()
for node, pa_count in parent_count.items():
heap[node] = pa_count
topo_sorted = []
while heap:
node, priority = heap.popitem()
topo_sorted.append(node.type)
for child in node.children:
heap[child] -= 1
self._topo_sorted = topo_sorted
class CachedInheritanceDict(InheritanceDict[TValue]):
"""
A subclass of :class:`InheritanceDict`, with an additional lookup cache.
The cache is infinitely large, thus this class is only suitable under the
situation where the number of queried types are not too large.
"""
NOT_EXIST = ...
def __init__(self):
super().__init__()
self._cache = {} # type: Dict[type, TValue]
def _topo_sort(self):
self._cache.clear()
super()._topo_sort()
def __getitem__(self, type_: type) -> TValue:
ret = self._cache.get(type_, None)
if ret is None:
try:
ret = self._cache[type_] = super().__getitem__(type_)
except KeyError:
self._cache[type_] = self.NOT_EXIST
raise
elif ret is self.NOT_EXIST:
raise KeyError(type_)
return ret
def __setitem__(self, type_: type, value: TValue):
self._cache.clear()
super().__setitem__(type_, value)
def parse_tags(s: str) -> List[str]:
"""
Parse comma separated tags str into list of tags.
>>> parse_tags('one tag')
['one tag']
>>> parse_tags(' strip left and right ends ')
['strip left and right ends']
>>> parse_tags('two, tags')
['two', 'tags']
>>> parse_tags('"quoted, string" is one tag')
['quoted, string is one tag']
>>> parse_tags(', empty tags, , will be skipped, ')
['empty tags', 'will be skipped']
Args:
s: The comma separated tags str.
Returns:
The parsed tags.
"""
tags = []
buf = []
in_quoted = None
for c in s:
if in_quoted:
if c == in_quoted:
in_quoted = None
else:
buf.append(c)
elif c == '"' or c == '\'':
in_quoted = c
elif c == ',':
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
buf.clear()
else:
buf.append(c)
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
return tags
TValue = TypeVar('TValue')
def deep_copy(value: TValue) -> TValue:
"""
A patched deep copy function, that can handle various types cannot be
handled by the standard :func:`copy.deepcopy`.
Args:
value: The value to be copied.
Returns:
The copied value.
"""
def pattern_dispatcher(v, memo=None):
return v # we don't need to copy a regex pattern object, it's read-only
old_dispatcher = copy._deepcopy_dispatch.get(PatternType, None)
copy._deepcopy_dispatch[PatternType] = pattern_dispatcher
try:
return copy.deepcopy(value)
finally:
if old_dispatcher is not None: # pragma: no cover
copy._deepcopy_dispatch[PatternType] = old_dispatcher
else:
del copy._deepcopy_dispatch[PatternType] | for node in self._nodes:
if issubclass(type_, node.type):
node.add_child(this_node)
elif issubclass(node.type, type_):
this_node.add_child(node) | random_line_split |
misc.py | import copy
import os
import re
import time
from contextlib import contextmanager
from typing import *
import numpy as np
from heapdict import heapdict
__all__ = [
'PatternType',
'Singleton', 'NOT_SET',
'format_duration', 'ETA', 'minibatch_slices_iterator',
'optional_apply', 'validate_enum_arg',
'maybe_close', 'iter_files',
'InheritanceDict', 'CachedInheritanceDict',
'parse_tags', 'deep_copy',
]
PatternType = type(re.compile('x'))
class Singleton(object):
"""
Base class for singleton classes.
>>> class Parent(Singleton):
... pass
>>> class Child(Parent):
... pass
>>> Parent() is Parent()
True
>>> Child() is Child()
True
>>> Parent() is not Child()
True
"""
__instances_dict = {}
def __new__(cls, *args, **kwargs):
if cls not in Singleton.__instances_dict:
Singleton.__instances_dict[cls] = \
object.__new__(cls, *args, **kwargs)
return Singleton.__instances_dict[cls]
class NotSet(Singleton):
"""
Class of the `NOT_SET` constant.
>>> NOT_SET is not None
True
>>> NOT_SET
NOT_SET
>>> NOT_SET == NOT_SET
True
>>> NotSet() is NOT_SET
True
>>> NotSet() == NOT_SET
True
"""
def | (self):
return 'NOT_SET'
NOT_SET = NotSet()
def format_duration(seconds: Union[float, int],
short_units: bool = True,
keep_zeros: bool = False):
"""
Format specified time duration as human readable text.
>>> format_duration(0)
'0s'
>>> format_duration(61)
'1m 1s'
>>> format_duration(86400 * 2 + 60)
'2d 1m'
>>> format_duration(86400 * 2 + 60, keep_zeros=True)
'2d 0h 1m 0s'
>>> format_duration(86400 * 2 + 60, short_units=False)
'2 days 1 minute'
>>> format_duration(-1)
'1s ago'
Args:
seconds: Number of seconds of the time duration.
short_units: Whether or not to use short units ("d", "h", "m", "s")
instead of long units ("day", "hour", "minute", "second")?
keep_zeros: Whether or not to keep zero components?
(e.g., to keep "0h 0m" in "1d 0h 0m 3s").
Returns:
str: The formatted time duration.
"""
if short_units:
units = [(86400, 'd', 'd'), (3600, 'h', 'h'),
(60, 'm', 'm'), (1, 's', 's')]
else:
units = [(86400, ' day', ' days'), (3600, ' hour', ' hours'),
(60, ' minute', ' minutes'), (1, ' second', ' seconds')]
if seconds < 0:
seconds = -seconds
suffix = ' ago'
else:
suffix = ''
pieces = []
for uvalue, uname, uname_plural in units[:-1]:
if seconds >= uvalue:
val = int(seconds // uvalue)
pieces.append(f'{val:d}{uname_plural if val > 1 else uname}')
seconds %= uvalue
elif keep_zeros and pieces:
pieces.append(f'0{uname}')
uname, uname_plural = units[-1][1:]
if seconds > np.finfo(np.float64).eps:
pieces.append(f'{seconds:.4g}{uname_plural if seconds > 1 else uname}')
elif not pieces or keep_zeros:
pieces.append(f'0{uname}')
return ' '.join(pieces) + suffix
class ETA(object):
"""
Class to help compute the Estimated Time Ahead (ETA).
>>> now = time.time()
>>> eta = ETA()
>>> eta.take_snapshot(progress=0.0, now=now) # record the start time
>>> eta.get_eta(progress=0.01, now=now + 5.) # i.e., 1% work costs 5s
495.0
"""
def __init__(self):
"""Construct a new :class:`ETA`."""
self._times = []
self._progresses = []
def take_snapshot(self, progress: Union[int, float],
now: Optional[Union[int, float]] = None):
"""
Take a snapshot of ``(progress, now)``, for later computing ETA.
Args:
progress: The current progress, range in ``[0, 1]``.
now: The current timestamp in seconds. If not specified, use
``time.time()``.
"""
if not self._progresses or progress - self._progresses[-1] > .001:
# we only record the time and corresponding progress if the
# progress has been advanced by 0.1%
if now is None:
now = time.time()
self._progresses.append(progress)
self._times.append(now)
def get_eta(self,
progress: Union[int, float],
now: Optional[Union[int, float]] = None,
take_snapshot: bool = True) -> Optional[float]:
"""
Get the Estimated Time Ahead (ETA).
Args:
progress: The current progress, range in ``[0, 1]``.
now: The current timestamp in seconds. If not specified, use
``time.time()``.
take_snapshot: Whether or not to take a snapshot of
the specified ``(progress, now)``? (default :obj:`True`)
Returns:
The ETA in seconds, or :obj:`None` if the ETA cannot be estimated.
"""
# TODO: Maybe we can have a better estimation algorithm here!
if now is None:
now = time.time()
if self._progresses:
time_delta = now - self._times[0]
progress_delta = progress - self._progresses[0]
progress_left = 1. - progress
if progress_delta < 1e-7:
return None
eta = time_delta / progress_delta * progress_left
else:
eta = None
if take_snapshot:
self.take_snapshot(progress, now)
return eta
def minibatch_slices_iterator(length: int,
batch_size: int,
skip_incomplete: bool = False
) -> Generator[slice, None, None]:
"""
Iterate through all the mini-batch slices.
>>> arr = np.arange(10)
>>> for batch_s in minibatch_slices_iterator(len(arr), batch_size=4):
... print(arr[batch_s])
[0 1 2 3]
[4 5 6 7]
[8 9]
>>> for batch_s in minibatch_slices_iterator(
... len(arr), batch_size=4, skip_incomplete=True):
... print(arr[batch_s])
[0 1 2 3]
[4 5 6 7]
Args:
length: Total length of data in an epoch.
batch_size: Size of each mini-batch.
skip_incomplete: If :obj:`True`, discard the final batch if it
contains less than `batch_size` number of items.
Yields
Slices of each mini-batch. The last mini-batch may contain less
elements than `batch_size`.
"""
start = 0
stop1 = (length // batch_size) * batch_size
while start < stop1:
yield slice(start, start + batch_size, 1)
start += batch_size
if not skip_incomplete and start < length:
yield slice(start, length, 1)
def optional_apply(f, value):
"""
If `value` is not None, return `f(value)`, otherwise return None.
>>> optional_apply(int, None) is None
True
>>> optional_apply(int, '123')
123
Args:
f: The function to apply on `value`.
value: The value, maybe None.
"""
if value is not None:
return f(value)
TArgValue = TypeVar('TArgValue')
def validate_enum_arg(arg_name: str,
arg_value: Optional[TArgValue],
choices: Iterable[TArgValue],
nullable: bool = False) -> Optional[TArgValue]:
"""
Validate the value of an enumeration argument.
Args:
arg_name: Name of the argument.
arg_value: Value of the argument.
choices: Valid choices of the argument value.
nullable: Whether or not the argument can be None?
Returns:
The validated argument value.
Raises:
ValueError: If `arg_value` is not valid.
"""
choices = tuple(choices)
if not (nullable and arg_value is None) and (arg_value not in choices):
raise ValueError('Invalid value for argument `{}`: expected to be one '
'of {!r}, but got {!r}.'.
format(arg_name, choices, arg_value))
return arg_value
@contextmanager
def maybe_close(obj):
"""
Enter a context, and if `obj` has ``.close()`` method, close it
when exiting the context.
>>> class HasClose(object):
... def close(self):
... print('closed')
>>> class HasNotClose(object):
... pass
>>> with maybe_close(HasClose()) as obj: # doctest: +ELLIPSIS
... print(obj)
<mltk.utils.misc.HasClose ...>
closed
>>> with maybe_close(HasNotClose()) as obj: # doctest: +ELLIPSIS
... print(obj)
<mltk.utils.misc.HasNotClose ...>
Args:
obj: The object maybe to close.
Yields:
The specified `obj`.
"""
try:
yield obj
finally:
if hasattr(obj, 'close'):
obj.close()
def iter_files(root_dir: str, sep: str = '/') -> Generator[str, None, None]:
"""
Iterate through all files in `root_dir`, returning the relative paths
of each file. The sub-directories will not be yielded.
Args:
root_dir: The root directory, from which to iterate.
sep: The separator for the relative paths.
Yields:
The relative paths of each file.
"""
def f(parent_path, parent_name):
for f_name in os.listdir(parent_path):
f_child_path = parent_path + os.sep + f_name
f_child_name = parent_name + sep + f_name
if os.path.isdir(f_child_path):
for s in f(f_child_path, f_child_name):
yield s
else:
yield f_child_name
for name in os.listdir(root_dir):
child_path = root_dir + os.sep + name
if os.path.isdir(child_path):
for x in f(child_path, name):
yield x
else:
yield name
TValue = TypeVar('TValue')
class _InheritanceNode(object):
def __init__(self, type_: type):
self.type = type_
self.children = []
def add_child(self, child: '_InheritanceNode'):
self.children.append(child)
class InheritanceDict(Generic[TValue]):
"""
A dict that gives the registered value of the closest known ancestor
of a query type (`ancestor` includes the type itself).
>>> class GrandPa(object): pass
>>> class Parent(GrandPa): pass
>>> class Child(Parent): pass
>>> class Uncle(GrandPa): pass
>>> d = InheritanceDict()
>>> d[Child] = 1
>>> d[GrandPa] = 2
>>> d[Uncle] = 3
>>> d[GrandPa]
2
>>> d[Parent]
2
>>> d[Child]
1
>>> d[Uncle]
3
>>> d[str]
Traceback (most recent call last):
...
KeyError: <class 'str'>
"""
def __init__(self):
self._nodes = [] # type: List[_InheritanceNode]
self._values = {}
self._topo_sorted = None
def __setitem__(self, type_: type, value: TValue):
this_node = _InheritanceNode(type_)
if type_ not in self._values:
for node in self._nodes:
if issubclass(type_, node.type):
node.add_child(this_node)
elif issubclass(node.type, type_):
this_node.add_child(node)
self._nodes.append(this_node)
self._topo_sorted = None
self._values[type_] = value
def __getitem__(self, type_: type) -> TValue:
if self._topo_sorted is None:
self._topo_sort()
for t in reversed(self._topo_sorted):
if t is type_ or issubclass(type_, t):
return self._values[t]
raise KeyError(type_)
def _topo_sort(self):
parent_count = {node: 0 for node in self._nodes}
for node in self._nodes:
for child in node.children:
parent_count[child] += 1
heap = heapdict()
for node, pa_count in parent_count.items():
heap[node] = pa_count
topo_sorted = []
while heap:
node, priority = heap.popitem()
topo_sorted.append(node.type)
for child in node.children:
heap[child] -= 1
self._topo_sorted = topo_sorted
class CachedInheritanceDict(InheritanceDict[TValue]):
"""
A subclass of :class:`InheritanceDict`, with an additional lookup cache.
The cache is infinitely large, thus this class is only suitable under the
situation where the number of queried types are not too large.
"""
NOT_EXIST = ...
def __init__(self):
super().__init__()
self._cache = {} # type: Dict[type, TValue]
def _topo_sort(self):
self._cache.clear()
super()._topo_sort()
def __getitem__(self, type_: type) -> TValue:
ret = self._cache.get(type_, None)
if ret is None:
try:
ret = self._cache[type_] = super().__getitem__(type_)
except KeyError:
self._cache[type_] = self.NOT_EXIST
raise
elif ret is self.NOT_EXIST:
raise KeyError(type_)
return ret
def __setitem__(self, type_: type, value: TValue):
self._cache.clear()
super().__setitem__(type_, value)
def parse_tags(s: str) -> List[str]:
"""
Parse comma separated tags str into list of tags.
>>> parse_tags('one tag')
['one tag']
>>> parse_tags(' strip left and right ends ')
['strip left and right ends']
>>> parse_tags('two, tags')
['two', 'tags']
>>> parse_tags('"quoted, string" is one tag')
['quoted, string is one tag']
>>> parse_tags(', empty tags, , will be skipped, ')
['empty tags', 'will be skipped']
Args:
s: The comma separated tags str.
Returns:
The parsed tags.
"""
tags = []
buf = []
in_quoted = None
for c in s:
if in_quoted:
if c == in_quoted:
in_quoted = None
else:
buf.append(c)
elif c == '"' or c == '\'':
in_quoted = c
elif c == ',':
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
buf.clear()
else:
buf.append(c)
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
return tags
TValue = TypeVar('TValue')
def deep_copy(value: TValue) -> TValue:
"""
A patched deep copy function, that can handle various types cannot be
handled by the standard :func:`copy.deepcopy`.
Args:
value: The value to be copied.
Returns:
The copied value.
"""
def pattern_dispatcher(v, memo=None):
return v # we don't need to copy a regex pattern object, it's read-only
old_dispatcher = copy._deepcopy_dispatch.get(PatternType, None)
copy._deepcopy_dispatch[PatternType] = pattern_dispatcher
try:
return copy.deepcopy(value)
finally:
if old_dispatcher is not None: # pragma: no cover
copy._deepcopy_dispatch[PatternType] = old_dispatcher
else:
del copy._deepcopy_dispatch[PatternType]
| __repr__ | identifier_name |
cube_reader.py | import os
import re
import file_writer
import numpy as np
from numpy import unravel_index
import datetime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy import signal
import peakutils
from lmfit import minimize, Parameters, Model
from astropy.io import fits
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
def read_file(file_name):
""" reads file_name and returns specific header data and image data """
fits_file = fits.open(file_name) | header = fits_file[0].header
image_data = fits_file[1].data
segmentation_data = fits_file[2].data
header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}
# clause to differentiate between CDELT3 and CD3_3
for hdr_key, hdr_value in header_keywords.items():
# finding required header values
hdr_value = header[hdr_key]
header_keywords[hdr_key] = hdr_value
return header_keywords, image_data, segmentation_data
def wavelength_solution(file_name):
""" wavelength solution in Angstroms """
file_data = read_file(file_name)
header_data = file_data[0]
image_data = file_data[1]
range_begin = header_data['CRVAL3']
pixel_begin = header_data['CRPIX3']
step_size = header_data['CD3_3']
steps = len(image_data)
range_end = range_begin + steps * step_size
return {'begin': range_begin, 'end': range_end, 'steps': steps}
def image_collapser(file_name):
""" collapses image data so it can be passed as a heatmap """
file_data = read_file(file_name)
header_data = file_data[0]
image_data = file_data[1]
data_shape = np.shape(image_data)
ra_axis = data_shape[2]
dec_axis = data_shape[1]
wl_axis = data_shape[0]
image_median = np.zeros((ra_axis, dec_axis))
image_sum = np.zeros((ra_axis, dec_axis))
for i_ra in range(ra_axis):
for i_dec in range(dec_axis):
pixel_data = image_data[:][:,i_dec][:,i_ra]
pd_median = np.nanmedian(pixel_data)
pd_sum = np.nansum(pixel_data)
image_median[i_ra][i_dec] = pd_median
image_sum[i_ra][i_dec] = pd_sum
return {'median': image_median, 'sum': image_sum}
def spectrum_creator(file_name):
""" creating a spectra from the area as defined in the segementation area """
file_data = read_file(file_name)
image_data = file_data[1]
segmentation_data = file_data[2]
collapsed_data = image_collapser(file_name)
# spectrum for central pixel
cp_bright = []
for key, data in collapsed_data.items():
lgst_val = data.argmax()
lgst_loc = unravel_index(data.argmax(), data.shape)
cp_bright.append(lgst_loc)
cp_loc = 0
if ( cp_bright[0] == cp_bright[1] ):
cp_loc = cp_bright[0]
else:
cp_loc = cp_bright[1]
cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]
# spectrum as defined by the segmentation area
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
cube_id = [int(x) for x in re.findall('\d+', stk_f_n)][0]
# locating where the galaxy pixels are from the cube_id
seg_curr_cube = np.where(segmentation_data == cube_id)
scc_rows, scc_cols = seg_curr_cube
#np.set_printoptions(threshold=np.nan)
#print(segmentation_data)
collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])
for i_r in range(len(scc_rows)):
# I want to pull out each pixel and store it into the collapsed spectrum array
collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]
galaxy_spectrum = np.zeros(np.shape(image_data)[0])
for i_ax in range(len(galaxy_spectrum)):
galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])
return {'central': cp_spec_data, 'galaxy': galaxy_spectrum,
'segmentation': segmentation_data}
def cube_noise(cube_id):
cube_file_name = ("/Volumes/Jacky_Cao/University/level4/project/cubes_better/" +
"cube_" + str(cube_id) + ".fits")
cube_file = read_file(cube_file_name)
image_data = cube_file[1]
collapsed_data = spectrum_creator(cube_file_name)
segmentation_data = cube_file[2]
pixels_data = np.where(segmentation_data == cube_id)
pixels_noise = np.where(segmentation_data == 0)
pn_rows, pn_cols = pixels_noise
pd_num = np.shape(pixels_data)[1] # number of pixels so that a ratio can be
pn_num = np.shape(pixels_noise)[1] # calculated
# calculating the noise based off the segmentation data
noise_spectra = np.zeros([np.shape(image_data)[0], len(pn_rows)])
for i_noise in range(len(pn_rows)):
noise_spectra[:,i_noise] = image_data[:,pn_rows[i_noise],pn_cols[i_noise]]
nr_noise = np.zeros(np.shape(noise_spectra)[0])
for i_ax in range(np.shape(noise_spectra)[0]):
nr_noise[i_ax] = np.nansum(noise_spectra[i_ax])
noise = np.median(nr_noise) * np.sqrt(pd_num**2/pn_num**2)
return {'noise_data': nr_noise, 'noise_value': noise, 'pd_num': pd_num,
'pn_num': pn_num}
def spectra_stacker(file_name):
""" stacking all spectra together for a stacked spectra image """
file_data = read_file(file_name)
image_data = file_data[1]
data_shape = np.shape(image_data)
ra_axis = data_shape[2]
dec_axis = data_shape[1]
wl_axis = data_shape[0]
pxl_total = ra_axis * dec_axis
data_unwrap = []
for i_ra in range(ra_axis):
for i_dec in range(dec_axis):
pixel_data = image_data[:][:,i_dec][:,i_ra]
data_unwrap.append(pixel_data)
data_stacked = np.zeros((pxl_total, wl_axis))
for i_row in range(np.shape(data_unwrap)[0]):
data_row = data_unwrap[i_row]
for i_pixel in range(len(data_row)):
data_stacked[i_row][i_pixel] = data_row[i_pixel]
# writing data to a fits file
hdr = fits.Header()
hdr['CTYPE1'] = 'pixel'
hdr['CRPIX1'] = 1
hdr['CRVAL1'] = data_stacked[0][0]
hdr['CDELT1'] = data_stacked[0][1] - data_stacked[0][0]
primary_hdu = fits.PrimaryHDU(header=hdr)
hdu = fits.ImageHDU(data_stacked)
hdul = fits.HDUList([primary_hdu, hdu])
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
hdul.writeto(data_dir + '/stacked.fits')
return data_unwrap
def sky_noise(sky_file_name):
""" returning sky noise data files """
fits_file = fits.open(sky_file_name)
image_data = fits_file[0].data
return image_data
def spectra_analysis(file_name, sky_file_name):
""" correcting data to be in rest frame """
# read file name and select out the id that we are dealing with
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
cube_id = int(re.search(r'\d+', stk_f_n).group())
# read catalogue and obtain the HST redshift estimate
#catalogue = np.load("data/matched_catalogue.npy")
catalogue = np.load("data/low_redshift_catalogue.npy")
cat_loc = np.where(catalogue[:,0] == cube_id)[0]
cube_info = catalogue[cat_loc][0]
hst_redshift = cube_info[7]
# spectra and sky noise data
spectra_data = spectrum_creator(file_name)
wl_soln = wavelength_solution(file_name)
sn_data = sky_noise(sky_file_name)
galaxy_data = spectra_data['galaxy']
# removing baseline from data
base = peakutils.baseline(galaxy_data, 3)
gd_mc = galaxy_data - base
# scaling sky-noise to be similar to spectra data
gd_max = np.amax(galaxy_data)
sn_data_max = np.amax(sn_data)
sn_scale = gd_max / sn_data_max
sn_data = sn_data * sn_scale
# spectra lines
sl = {
'emis': {
'[OII]': '3727',
'CaK': '3933',
'CaH': '3968',
'Hdelta': '4101',
},
'abs': {'K': '3934.777',
}
}
# we can use the redshift from the HST catalogue to define the region to search for
# the doublet in
# lower and upper bound on wavelength range
lower_lambda = (1+hst_redshift)*3600
upper_lambda = (1+hst_redshift)*3850
# x-axis data
data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])
mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda)
lambda_data = data_h_range[mask]
flux_data = gd_mc[mask]
# Finding peaks with PeakUtils
pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)
pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)
pu_peaks_x = np.sort(pu_peaks_x)
pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]
pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')
peaks_file.write("Peaks found on " + str(datetime.datetime.now()) + "\n\n")
peaks_file.write("Number Wavelength \n")
for i_peak in range(len(pu_peaks_x)):
curr_peak = pu_peaks_x[i_peak]
peaks_file.write(str(i_peak) + " " + str(curr_peak) + "\n")
# manually selecting which peak is the [OII] peak - given in wavelength
if (pu_peaks_x.size != 0):
otwo_wav = float(pu_peaks_x[0])
otwo_acc = float(sl['emis']['[OII]'])
redshift = (otwo_wav / otwo_acc) - 1
else:
# accepting HST redshift if cannot find peak
redshift = hst_redshift
return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift':
redshift, 'pu_peaks': pu_peaks_x}
def find_nearest(array, value):
""" Find nearest value is an array """
idx = (np.abs(array-value)).argmin()
return idx
def sn_line(x, c):
return c
def sn_gauss(x, c, i1, mu, sigma1):
norm = (sigma1*np.sqrt(2*np.pi))
term1 = ( i1 / norm ) * np.exp(-(x-mu)**2/(2*sigma1**2))
return (c + term1)
def chisq(y_model, y_data, y_err):
csq = (y_data-y_model)**2 / y_err**2
csq = np.sum(csq)
red_csq = csq / (len(y_data) - 4)
return {'chisq': csq, 'chisq_red': red_csq}
def sky_noise_weighting(file_name, sky_file_name):
""" finding the sky noise from a small section of the cube data """
cs_data = spectra_analysis(file_name, sky_file_name)
cube_data = cs_data['gd_shifted']
sn_data = cs_data['sky_noise']
wl_soln = wavelength_solution(file_name)
sn_data_min = np.min(sn_data)
in_wt = 1 / (sn_data - sn_data_min + 1)
sky_regns = np.zeros((len(in_wt),2)) # storing regions of potential sky noise
for i in range(len(in_wt)):
data_acl = cube_data[i]
data_sky = sn_data[i]
data_prb = in_wt[i]
if ( 0.00 <= np.abs(data_prb) <= 1.00 ):
sky_regns[i][0] = data_prb
sky_regns[i][1] = data_sky
# finding max peak in the sky-noise data and fitting a Gaussian to that
# x-axis data
x_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])
# Finding peaks with PeakUtils
sky_peaks = peakutils.indexes(sn_data, thres=300, thres_abs=True)
sky_peaks_x = peakutils.interpolate(x_range, sn_data, sky_peaks)
if (sky_peaks_x.size != 0):
sky_peak = sky_peaks_x[0]
sky_peak_index = find_nearest(sky_peak, x_range)
else:
sky_peak = 6000
sky_peak_index = 0
sky_peak_loc = x_range[sky_peak_index]
sky_peak_range = [sky_peak-100, sky_peak+100]
sky_peak_range_loc = [find_nearest(x_range, x) for x in sky_peak_range]
sky_rng_x = x_range[sky_peak_range_loc[0]:sky_peak_range_loc[1]]
sky_rng_y = sn_data[sky_peak_range_loc[0]:sky_peak_range_loc[1]]
sky_gauss_params = Parameters()
sky_gauss_params.add('c', value=0)
sky_gauss_params.add('i1', value=np.max(sky_rng_y), min=0.0)
sky_gauss_params.add('mu', value=sky_peak_loc)
sky_gauss_params.add('sigma1', value=3)
sky_gauss_model = Model(sn_gauss)
sky_gauss_rslt = sky_gauss_model.fit(sky_rng_y, x=sky_rng_x,
params=sky_gauss_params)
sky_gauss_best = sky_gauss_rslt.best_values
sky_sigma = sky_gauss_best['sigma1']
return {'inverse_sky': in_wt, 'sky_regions': sky_regns, 'sky_sigma': sky_sigma}
def f_doublet(x, c, i1, i2, sigma_gal, z, sigma_inst):
""" function for Gaussian doublet """
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths
l1 = dblt_mu[0] * (1+z)
l2 = dblt_mu[1] * (1+z)
sigma = np.sqrt(sigma_gal**2 + sigma_inst**2)
norm = (sigma*np.sqrt(2*np.pi))
term1 = ( i1 / norm ) * np.exp(-(x-l1)**2/(2*sigma**2))
term2 = ( i2 / norm ) * np.exp(-(x-l2)**2/(2*sigma**2))
return (c*x + term1 + term2)
def otwo_doublet_fitting(file_name, sky_file_name):
sa_data = spectra_analysis(file_name, sky_file_name)
y_shifted = sa_data['gd_shifted']
orr = wavelength_solution(file_name)
sn_data = sky_noise_weighting(file_name, sky_file_name)
redshift = sa_data['redshift']
# obtaining the OII range and region
# lower and upper bound on wavelength range
lower_lambda = (1+redshift)*3600
upper_lambda = (1+redshift)*3750
otr = [lower_lambda, upper_lambda]
print(otr)
orr_x = np.linspace(orr['begin'], orr['end'], orr['steps'])
dt_region = [find_nearest(orr_x, x) for x in otr]
otwo_region = y_shifted[dt_region[0]:dt_region[1]]
print(orr_x)
ot_x = orr_x[dt_region[0]:dt_region[1]]
otwo_max_loc = np.argmax(otwo_region)
otwo_max_val = np.max(otwo_region)
# standard deviation of a range before the peak
stdr_b = 50
stdr_e = otwo_max_loc - 50
stddev_lim = [stdr_b, stdr_e]
stddev_x = ot_x[stddev_lim[0]:stddev_lim[1]]
stddev_region = otwo_region[stddev_lim[0]:stddev_lim[1]]
stddev_val = np.std(stddev_region)
# fitting a gaussian doublet model to the data
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths
dblt_val = ot_x[otwo_max_loc]
dblt_rng = [dblt_val-20, dblt_val+20]
dblt_rng = [find_nearest(orr_x, x) for x in dblt_rng]
dblt_rng_vals = orr_x[dblt_rng[0]:dblt_rng[1]]
dblt_rgn = y_shifted[dblt_rng[0]:dblt_rng[1]]
rdst = sa_data['redshift']
sky_weight = sn_data['inverse_sky']
sky_weight = sky_weight[dt_region[0]:dt_region[1]]
# the parameters we need are (c, i1, i2, sigma1, z)
p0 = [0, otwo_max_val, 1.3, 3, rdst]
c, i_val1, r, sigma_gal, z = p0
sigma_sky = sn_data['sky_sigma']
gss_pars = Parameters()
gss_pars.add('c', value=c)
gss_pars.add('i1', value=i_val1, min=0.0)
gss_pars.add('r', value=r, min=0.5, max=1.5)
gss_pars.add('i2', expr='i1/r', min=0.0)
gss_pars.add('sigma_gal', value=sigma_gal)
gss_pars.add('z', value=z)
gss_pars.add('sigma_inst', value=sigma_sky, vary=False)
gss_model = Model(f_doublet)
gss_result = gss_model.fit(otwo_region, x=ot_x, params=gss_pars,
weights=sky_weight)
opti_pms = gss_result.best_values
init_pms = gss_result.init_values
# working out signal to noise now
sn_line_parms = Parameters()
sn_line_parms.add('c', value=c)
sn_line_model = Model(sn_line)
sn_line_rslt = sn_line_model.fit(otwo_region, x=ot_x, params=sn_line_parms)
sn_line_bpms = sn_line_rslt.best_values
sn_line_data = sn_line_rslt.best_fit
sn_gauss_parms = Parameters()
sn_gauss_parms.add('c', value=c)
sn_gauss_parms.add('i1', value=i_val1, min=0.0)
sn_gauss_parms.add('mu', value=dblt_val)
sn_gauss_parms.add('sigma1', value=sigma_gal)
sn_gauss_model = Model(sn_gauss)
sn_gauss_rslt = sn_gauss_model.fit(otwo_region, x=ot_x, params=sn_gauss_parms)
sn_gauss_bpms = sn_gauss_rslt.best_values
sn_gauss_data = sn_gauss_rslt.best_fit
sn_line_csqs = chisq(sn_line_data, otwo_region, stddev_val)
sn_gauss_csqs = chisq(sn_gauss_data, otwo_region, stddev_val)
signal_noise = np.sqrt(sn_line_csqs['chisq'] - sn_gauss_csqs['chisq'])
# saving data to text files
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
file_writer.analysis_complete(data_dir, stk_f_n, gss_result, init_pms, opti_pms,
sn_line_csqs, sn_gauss_csqs, signal_noise, sn_line_bpms, sn_line_data,
sn_gauss_bpms, sn_gauss_data)
return {'range': otr, 'x_region': ot_x,'y_region': otwo_region, 'doublet_range':
dblt_rng_vals, 'std_x': stddev_x, 'std_y': stddev_region, 'lm_best_fit':
gss_result.best_fit, 'lm_best_param': gss_result.best_values,
'lm_init_fit': gss_result.init_fit, 'sn_line': sn_line_rslt.best_fit,
'sn_gauss': sn_gauss_rslt.best_fit}
def analysis(file_name, sky_file_name):
""" Graphs and results from analysing the cube for OII spectra """
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams['text.latex.preamble'] = [r'\boldmath']
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
spectra_stacker(file_name)
# one figure to rule them all
main_fig = plt.figure(1)
# calling data once will be enough
im_coll_data = image_collapser(file_name)
spectra_data = spectrum_creator(file_name)
sr = wavelength_solution(file_name)
gs_data = spectra_analysis(file_name, sky_file_name)
def graph_indiv():
cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
cbs_y = gs_data['gd_shifted']
# plotting spectra to check
fig, ax3 = plt.subplots()
ax3.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
ax3.tick_params(labelsize=20)
ax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
ax3.set_ylabel(r'\textbf{Flux}', fontsize=20)
fig.savefig(data_dir + "/" + stk_f_n + '_single_spectra.pdf',
bbox_inches="tight")
# --- for collapsed images ---
def graphs_collapsed():
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(im_coll_data['median'], cmap='gray_r')
ax1.set_title(r'\textbf{galaxy: median}', fontsize=13)
ax1.set_xlabel(r'\textbf{Pixels}', fontsize=13)
ax1.set_ylabel(r'\textbf{Pixels}', fontsize=13)
ax2.imshow(im_coll_data['sum'], cmap='gray_r')
ax2.set_title(r'\textbf{galaxy: sum}', fontsize=13)
ax2.set_xlabel(r'\textbf{Pixels}', fontsize=13)
ax2.set_ylabel(r'\textbf{Pixels}', fontsize=13)
f.subplots_adjust(wspace=0.4)
f.savefig(data_dir + "/" + stk_f_n + '_collapsed_images.pdf')
snw_data = sky_noise_weighting(file_name, sky_file_name)
df_data = otwo_doublet_fitting(file_name, sky_file_name)
# --- spectra ---
def graphs_spectra():
f, (ax1, ax2) = plt.subplots(2, 1)
# --- redshifted data plotting
cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
## plotting our cube data
cbs_y = gs_data['gd_shifted']
ax1.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
## plotting our sky noise data
snd_y = snw_data['sky_regions'][:,1]
ax1.plot(cbd_x, snd_y, linewidth=0.5, color="#f44336", alpha=0.5)
# plotting spectra to check
fig, ax3 = plt.subplots()
ax3.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
ax3.tick_params(labelsize=20)
ax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
ax3.set_ylabel(r'\textbf{Flux}', fontsize=20)
fig.savefig(data_dir + "/" + stk_f_n + '_single_spectra.pdf',
bbox_inches="tight")
## plotting our [OII] region
ot_x = df_data['x_region']
ot_y = df_data['y_region']
ax1.plot(ot_x, ot_y, linewidth=0.5, color="#00c853")
## plotting the standard deviation region in the [OII] section
std_x = df_data['std_x']
std_y = df_data['std_y']
ax1.plot(std_x, std_y, linewidth=0.5, color="#00acc1")
## plotting peak lines for scipy finder and peakutils finder
#pk_lines = gs_data['gd_peaks']
#for i in range(len(pk_lines)):
#srb = sr['begin']
#ax1.axvline(x=srb+pk_lines[i], linewidth=0.5, color="#8bc34a", alpha=0.2)
pu_lines = gs_data['pu_peaks']
for i in range(len(pu_lines)):
srb = sr['begin']
ax1.axvline(x=(pu_lines[i]), linewidth=0.5, color="#ec407a", alpha=0.2)
ax1.set_title(r'\textbf{spectra: cross-section redshifted}', fontsize=13)
ax1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
ax1.set_ylabel(r'\textbf{Flux}', fontsize=13)
ax1.set_ylim([-1000,5000]) # setting manual limits for now
# --- corrected redshift
crs_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
rdst = gs_data['redshift']
sp_lines = gs_data['spectra']
## corrected wavelengths
corr_x = crs_x / (1+rdst)
## plotting our cube data
cps_y = gs_data['gd_shifted']
ax2.plot(corr_x, cps_y, linewidth=0.5, color="#000000")
## plotting our sky noise data
sn_y = gs_data['sky_noise']
ax2.plot(corr_x, sn_y, linewidth=0.5, color="#e53935")
## plotting spectra lines
for e_key, e_val in sp_lines['emis'].items():
spec_line = float(e_val)
ax2.axvline(x=spec_line, linewidth=0.5, color="#00c853")
ax2.text(spec_line-10, 4800, e_key, rotation=-90)
ax2.set_title(r'\textbf{spectra: cross-section corrected}', fontsize=13)
ax2.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
ax2.set_ylabel(r'\textbf{Flux}', fontsize=13)
ax2.set_ylim([-500,5000]) # setting manual limits for now
f.subplots_adjust(hspace=0.5)
f.savefig(data_dir + "/" + stk_f_n + '_spectra.pdf')
# saving our plotting into npy files so they can be used elsewhere
np.save(data_dir + "/" + stk_f_n + "_cbd_x", cbd_x)
np.save(data_dir + "/" + stk_f_n + "_cbs_y", cbs_y)
np.save(data_dir + "/" + stk_f_n + "_snd_y", snd_y)
np.save(data_dir + "/" + stk_f_n + "_corr_x", corr_x)
np.save(data_dir + "/" + stk_f_n + "_cps_y", cps_y)
def graphs_otwo_region():
ot_fig = plt.figure(6)
# plotting the data for the cutout [OII] region
ot_x = df_data['x_region']
ot_y = df_data['y_region']
plt.plot(ot_x, ot_y, linewidth=1.5, color="#000000")
## plotting the standard deviation region in the [OII] section
std_x = df_data['std_x']
std_y = df_data['std_y']
#plt.plot(std_x, std_y, linewidth=1.5, color="#00acc1")
dblt_rng = df_data['doublet_range']
ot_x_b, ot_x_e = dblt_rng[0], dblt_rng[-1]
x_ax_vals = np.linspace(ot_x_b, ot_x_e, 1000)
# lmfit
lm_init = df_data['lm_init_fit']
lm_best = df_data['lm_best_fit']
plt.plot(ot_x, lm_best, linewidth=1.5, color="#1e88e5",
label=r"\textbf{Best fit}")
plt.plot(ot_x, lm_init, linewidth=1.5, color="#43a047", alpha=0.5,
label=r"\textbf{Initial guess}")
lm_params = df_data['lm_best_param']
lm_params = [prm_value for prm_key, prm_value in lm_params.items()]
c, i_val1, i_val2, sig_g, rdsh, sig_i = lm_params
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths for OII
l1 = dblt_mu[0] * (1+rdsh)
l2 = dblt_mu[1] * (1+rdsh)
sig = np.sqrt(sig_g**2 + sig_i**2)
norm = (sig*np.sqrt(2*np.pi))
lm_y1 = c + ( i_val1 / norm ) * np.exp(-(ot_x-l1)**2/(2*sig**2))
lm_y2 = c + ( i_val2 / norm ) * np.exp(-(ot_x-l2)**2/(2*sig**2))
plt.plot(ot_x, lm_y1, linewidth=1.5, color="#e64a19", alpha=0.7,
label=r"\textbf{Gaussian 1}")
plt.plot(ot_x, lm_y2, linewidth=1.5, color="#1a237e", alpha=0.7,
label=r"\textbf{Gaussian 2}")
# plotting signal-to-noise straight line and gaussian to verify it works
sn_line = df_data['sn_line']
sn_gauss = df_data['sn_gauss']
#plt.axhline(y=sn_line, linewidth=0.5, color="#5c6bc0", alpha=0.7)
#plt.plot(ot_x, sn_gauss, linewidth=0.5, color="#5c6bc0", alpha=0.7)
#plt.title(r'\textbf{[OII] region}', fontsize=13)
plt.legend(loc='upper left', prop={'size': 15})
plt.tick_params(labelsize=20)
plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
plt.ylabel(r'\textbf{Flux}', fontsize=20)
plt.xlim([l1-100,np.max(ot_x)])
plt.ylim([-100,np.max(ot_y)+100]) # setting manual limits for now
plt.savefig(data_dir + "/" + stk_f_n + '_otwo_region.pdf',bbox_inches="tight")
graph_indiv()
graphs_collapsed()
graphs_spectra()
graphs_otwo_region()
plt.close("all")
return {'image_data': im_coll_data, 'spectra_data': spectra_data, 'sr': sr,
'df_data': df_data, 'gs_data': gs_data, 'snw_data': snw_data}
if __name__ == '__main__':
analysis("/Volumes/Jacky_Cao/University/level4/project/cubes_better/" +
"cube_1068.fits", "data/skyvariance_csub.fits") | random_line_split | |
cube_reader.py | import os
import re
import file_writer
import numpy as np
from numpy import unravel_index
import datetime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy import signal
import peakutils
from lmfit import minimize, Parameters, Model
from astropy.io import fits
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
def read_file(file_name):
""" reads file_name and returns specific header data and image data """
fits_file = fits.open(file_name)
header = fits_file[0].header
image_data = fits_file[1].data
segmentation_data = fits_file[2].data
header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}
# clause to differentiate between CDELT3 and CD3_3
for hdr_key, hdr_value in header_keywords.items():
# finding required header values
hdr_value = header[hdr_key]
header_keywords[hdr_key] = hdr_value
return header_keywords, image_data, segmentation_data
def wavelength_solution(file_name):
""" wavelength solution in Angstroms """
file_data = read_file(file_name)
header_data = file_data[0]
image_data = file_data[1]
range_begin = header_data['CRVAL3']
pixel_begin = header_data['CRPIX3']
step_size = header_data['CD3_3']
steps = len(image_data)
range_end = range_begin + steps * step_size
return {'begin': range_begin, 'end': range_end, 'steps': steps}
def image_collapser(file_name):
""" collapses image data so it can be passed as a heatmap """
file_data = read_file(file_name)
header_data = file_data[0]
image_data = file_data[1]
data_shape = np.shape(image_data)
ra_axis = data_shape[2]
dec_axis = data_shape[1]
wl_axis = data_shape[0]
image_median = np.zeros((ra_axis, dec_axis))
image_sum = np.zeros((ra_axis, dec_axis))
for i_ra in range(ra_axis):
for i_dec in range(dec_axis):
pixel_data = image_data[:][:,i_dec][:,i_ra]
pd_median = np.nanmedian(pixel_data)
pd_sum = np.nansum(pixel_data)
image_median[i_ra][i_dec] = pd_median
image_sum[i_ra][i_dec] = pd_sum
return {'median': image_median, 'sum': image_sum}
def spectrum_creator(file_name):
""" creating a spectra from the area as defined in the segementation area """
file_data = read_file(file_name)
image_data = file_data[1]
segmentation_data = file_data[2]
collapsed_data = image_collapser(file_name)
# spectrum for central pixel
cp_bright = []
for key, data in collapsed_data.items():
lgst_val = data.argmax()
lgst_loc = unravel_index(data.argmax(), data.shape)
cp_bright.append(lgst_loc)
cp_loc = 0
if ( cp_bright[0] == cp_bright[1] ):
cp_loc = cp_bright[0]
else:
cp_loc = cp_bright[1]
cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]
# spectrum as defined by the segmentation area
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
cube_id = [int(x) for x in re.findall('\d+', stk_f_n)][0]
# locating where the galaxy pixels are from the cube_id
seg_curr_cube = np.where(segmentation_data == cube_id)
scc_rows, scc_cols = seg_curr_cube
#np.set_printoptions(threshold=np.nan)
#print(segmentation_data)
collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])
for i_r in range(len(scc_rows)):
# I want to pull out each pixel and store it into the collapsed spectrum array
collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]
galaxy_spectrum = np.zeros(np.shape(image_data)[0])
for i_ax in range(len(galaxy_spectrum)):
galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])
return {'central': cp_spec_data, 'galaxy': galaxy_spectrum,
'segmentation': segmentation_data}
def cube_noise(cube_id):
cube_file_name = ("/Volumes/Jacky_Cao/University/level4/project/cubes_better/" +
"cube_" + str(cube_id) + ".fits")
cube_file = read_file(cube_file_name)
image_data = cube_file[1]
collapsed_data = spectrum_creator(cube_file_name)
segmentation_data = cube_file[2]
pixels_data = np.where(segmentation_data == cube_id)
pixels_noise = np.where(segmentation_data == 0)
pn_rows, pn_cols = pixels_noise
pd_num = np.shape(pixels_data)[1] # number of pixels so that a ratio can be
pn_num = np.shape(pixels_noise)[1] # calculated
# calculating the noise based off the segmentation data
noise_spectra = np.zeros([np.shape(image_data)[0], len(pn_rows)])
for i_noise in range(len(pn_rows)):
noise_spectra[:,i_noise] = image_data[:,pn_rows[i_noise],pn_cols[i_noise]]
nr_noise = np.zeros(np.shape(noise_spectra)[0])
for i_ax in range(np.shape(noise_spectra)[0]):
nr_noise[i_ax] = np.nansum(noise_spectra[i_ax])
noise = np.median(nr_noise) * np.sqrt(pd_num**2/pn_num**2)
return {'noise_data': nr_noise, 'noise_value': noise, 'pd_num': pd_num,
'pn_num': pn_num}
def spectra_stacker(file_name):
""" stacking all spectra together for a stacked spectra image """
file_data = read_file(file_name)
image_data = file_data[1]
data_shape = np.shape(image_data)
ra_axis = data_shape[2]
dec_axis = data_shape[1]
wl_axis = data_shape[0]
pxl_total = ra_axis * dec_axis
data_unwrap = []
for i_ra in range(ra_axis):
for i_dec in range(dec_axis):
pixel_data = image_data[:][:,i_dec][:,i_ra]
data_unwrap.append(pixel_data)
data_stacked = np.zeros((pxl_total, wl_axis))
for i_row in range(np.shape(data_unwrap)[0]):
data_row = data_unwrap[i_row]
for i_pixel in range(len(data_row)):
data_stacked[i_row][i_pixel] = data_row[i_pixel]
# writing data to a fits file
hdr = fits.Header()
hdr['CTYPE1'] = 'pixel'
hdr['CRPIX1'] = 1
hdr['CRVAL1'] = data_stacked[0][0]
hdr['CDELT1'] = data_stacked[0][1] - data_stacked[0][0]
primary_hdu = fits.PrimaryHDU(header=hdr)
hdu = fits.ImageHDU(data_stacked)
hdul = fits.HDUList([primary_hdu, hdu])
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
hdul.writeto(data_dir + '/stacked.fits')
return data_unwrap
def sky_noise(sky_file_name):
""" returning sky noise data files """
fits_file = fits.open(sky_file_name)
image_data = fits_file[0].data
return image_data
def spectra_analysis(file_name, sky_file_name):
""" correcting data to be in rest frame """
# read file name and select out the id that we are dealing with
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
cube_id = int(re.search(r'\d+', stk_f_n).group())
# read catalogue and obtain the HST redshift estimate
#catalogue = np.load("data/matched_catalogue.npy")
catalogue = np.load("data/low_redshift_catalogue.npy")
cat_loc = np.where(catalogue[:,0] == cube_id)[0]
cube_info = catalogue[cat_loc][0]
hst_redshift = cube_info[7]
# spectra and sky noise data
spectra_data = spectrum_creator(file_name)
wl_soln = wavelength_solution(file_name)
sn_data = sky_noise(sky_file_name)
galaxy_data = spectra_data['galaxy']
# removing baseline from data
base = peakutils.baseline(galaxy_data, 3)
gd_mc = galaxy_data - base
# scaling sky-noise to be similar to spectra data
gd_max = np.amax(galaxy_data)
sn_data_max = np.amax(sn_data)
sn_scale = gd_max / sn_data_max
sn_data = sn_data * sn_scale
# spectra lines
sl = {
'emis': {
'[OII]': '3727',
'CaK': '3933',
'CaH': '3968',
'Hdelta': '4101',
},
'abs': {'K': '3934.777',
}
}
# we can use the redshift from the HST catalogue to define the region to search for
# the doublet in
# lower and upper bound on wavelength range
lower_lambda = (1+hst_redshift)*3600
upper_lambda = (1+hst_redshift)*3850
# x-axis data
data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])
mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda)
lambda_data = data_h_range[mask]
flux_data = gd_mc[mask]
# Finding peaks with PeakUtils
pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)
pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)
pu_peaks_x = np.sort(pu_peaks_x)
pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]
pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')
peaks_file.write("Peaks found on " + str(datetime.datetime.now()) + "\n\n")
peaks_file.write("Number Wavelength \n")
for i_peak in range(len(pu_peaks_x)):
curr_peak = pu_peaks_x[i_peak]
peaks_file.write(str(i_peak) + " " + str(curr_peak) + "\n")
# manually selecting which peak is the [OII] peak - given in wavelength
if (pu_peaks_x.size != 0):
otwo_wav = float(pu_peaks_x[0])
otwo_acc = float(sl['emis']['[OII]'])
redshift = (otwo_wav / otwo_acc) - 1
else:
# accepting HST redshift if cannot find peak
redshift = hst_redshift
return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift':
redshift, 'pu_peaks': pu_peaks_x}
def find_nearest(array, value):
""" Find nearest value is an array """
idx = (np.abs(array-value)).argmin()
return idx
def sn_line(x, c):
return c
def sn_gauss(x, c, i1, mu, sigma1):
norm = (sigma1*np.sqrt(2*np.pi))
term1 = ( i1 / norm ) * np.exp(-(x-mu)**2/(2*sigma1**2))
return (c + term1)
def chisq(y_model, y_data, y_err):
csq = (y_data-y_model)**2 / y_err**2
csq = np.sum(csq)
red_csq = csq / (len(y_data) - 4)
return {'chisq': csq, 'chisq_red': red_csq}
def sky_noise_weighting(file_name, sky_file_name):
""" finding the sky noise from a small section of the cube data """
cs_data = spectra_analysis(file_name, sky_file_name)
cube_data = cs_data['gd_shifted']
sn_data = cs_data['sky_noise']
wl_soln = wavelength_solution(file_name)
sn_data_min = np.min(sn_data)
in_wt = 1 / (sn_data - sn_data_min + 1)
sky_regns = np.zeros((len(in_wt),2)) # storing regions of potential sky noise
for i in range(len(in_wt)):
data_acl = cube_data[i]
data_sky = sn_data[i]
data_prb = in_wt[i]
if ( 0.00 <= np.abs(data_prb) <= 1.00 ):
sky_regns[i][0] = data_prb
sky_regns[i][1] = data_sky
# finding max peak in the sky-noise data and fitting a Gaussian to that
# x-axis data
x_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])
# Finding peaks with PeakUtils
sky_peaks = peakutils.indexes(sn_data, thres=300, thres_abs=True)
sky_peaks_x = peakutils.interpolate(x_range, sn_data, sky_peaks)
if (sky_peaks_x.size != 0):
sky_peak = sky_peaks_x[0]
sky_peak_index = find_nearest(sky_peak, x_range)
else:
sky_peak = 6000
sky_peak_index = 0
sky_peak_loc = x_range[sky_peak_index]
sky_peak_range = [sky_peak-100, sky_peak+100]
sky_peak_range_loc = [find_nearest(x_range, x) for x in sky_peak_range]
sky_rng_x = x_range[sky_peak_range_loc[0]:sky_peak_range_loc[1]]
sky_rng_y = sn_data[sky_peak_range_loc[0]:sky_peak_range_loc[1]]
sky_gauss_params = Parameters()
sky_gauss_params.add('c', value=0)
sky_gauss_params.add('i1', value=np.max(sky_rng_y), min=0.0)
sky_gauss_params.add('mu', value=sky_peak_loc)
sky_gauss_params.add('sigma1', value=3)
sky_gauss_model = Model(sn_gauss)
sky_gauss_rslt = sky_gauss_model.fit(sky_rng_y, x=sky_rng_x,
params=sky_gauss_params)
sky_gauss_best = sky_gauss_rslt.best_values
sky_sigma = sky_gauss_best['sigma1']
return {'inverse_sky': in_wt, 'sky_regions': sky_regns, 'sky_sigma': sky_sigma}
def f_doublet(x, c, i1, i2, sigma_gal, z, sigma_inst):
""" function for Gaussian doublet """
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths
l1 = dblt_mu[0] * (1+z)
l2 = dblt_mu[1] * (1+z)
sigma = np.sqrt(sigma_gal**2 + sigma_inst**2)
norm = (sigma*np.sqrt(2*np.pi))
term1 = ( i1 / norm ) * np.exp(-(x-l1)**2/(2*sigma**2))
term2 = ( i2 / norm ) * np.exp(-(x-l2)**2/(2*sigma**2))
return (c*x + term1 + term2)
def otwo_doublet_fitting(file_name, sky_file_name):
sa_data = spectra_analysis(file_name, sky_file_name)
y_shifted = sa_data['gd_shifted']
orr = wavelength_solution(file_name)
sn_data = sky_noise_weighting(file_name, sky_file_name)
redshift = sa_data['redshift']
# obtaining the OII range and region
# lower and upper bound on wavelength range
lower_lambda = (1+redshift)*3600
upper_lambda = (1+redshift)*3750
otr = [lower_lambda, upper_lambda]
print(otr)
orr_x = np.linspace(orr['begin'], orr['end'], orr['steps'])
dt_region = [find_nearest(orr_x, x) for x in otr]
otwo_region = y_shifted[dt_region[0]:dt_region[1]]
print(orr_x)
ot_x = orr_x[dt_region[0]:dt_region[1]]
otwo_max_loc = np.argmax(otwo_region)
otwo_max_val = np.max(otwo_region)
# standard deviation of a range before the peak
stdr_b = 50
stdr_e = otwo_max_loc - 50
stddev_lim = [stdr_b, stdr_e]
stddev_x = ot_x[stddev_lim[0]:stddev_lim[1]]
stddev_region = otwo_region[stddev_lim[0]:stddev_lim[1]]
stddev_val = np.std(stddev_region)
# fitting a gaussian doublet model to the data
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths
dblt_val = ot_x[otwo_max_loc]
dblt_rng = [dblt_val-20, dblt_val+20]
dblt_rng = [find_nearest(orr_x, x) for x in dblt_rng]
dblt_rng_vals = orr_x[dblt_rng[0]:dblt_rng[1]]
dblt_rgn = y_shifted[dblt_rng[0]:dblt_rng[1]]
rdst = sa_data['redshift']
sky_weight = sn_data['inverse_sky']
sky_weight = sky_weight[dt_region[0]:dt_region[1]]
# the parameters we need are (c, i1, i2, sigma1, z)
p0 = [0, otwo_max_val, 1.3, 3, rdst]
c, i_val1, r, sigma_gal, z = p0
sigma_sky = sn_data['sky_sigma']
gss_pars = Parameters()
gss_pars.add('c', value=c)
gss_pars.add('i1', value=i_val1, min=0.0)
gss_pars.add('r', value=r, min=0.5, max=1.5)
gss_pars.add('i2', expr='i1/r', min=0.0)
gss_pars.add('sigma_gal', value=sigma_gal)
gss_pars.add('z', value=z)
gss_pars.add('sigma_inst', value=sigma_sky, vary=False)
gss_model = Model(f_doublet)
gss_result = gss_model.fit(otwo_region, x=ot_x, params=gss_pars,
weights=sky_weight)
opti_pms = gss_result.best_values
init_pms = gss_result.init_values
# working out signal to noise now
sn_line_parms = Parameters()
sn_line_parms.add('c', value=c)
sn_line_model = Model(sn_line)
sn_line_rslt = sn_line_model.fit(otwo_region, x=ot_x, params=sn_line_parms)
sn_line_bpms = sn_line_rslt.best_values
sn_line_data = sn_line_rslt.best_fit
sn_gauss_parms = Parameters()
sn_gauss_parms.add('c', value=c)
sn_gauss_parms.add('i1', value=i_val1, min=0.0)
sn_gauss_parms.add('mu', value=dblt_val)
sn_gauss_parms.add('sigma1', value=sigma_gal)
sn_gauss_model = Model(sn_gauss)
sn_gauss_rslt = sn_gauss_model.fit(otwo_region, x=ot_x, params=sn_gauss_parms)
sn_gauss_bpms = sn_gauss_rslt.best_values
sn_gauss_data = sn_gauss_rslt.best_fit
sn_line_csqs = chisq(sn_line_data, otwo_region, stddev_val)
sn_gauss_csqs = chisq(sn_gauss_data, otwo_region, stddev_val)
signal_noise = np.sqrt(sn_line_csqs['chisq'] - sn_gauss_csqs['chisq'])
# saving data to text files
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
file_writer.analysis_complete(data_dir, stk_f_n, gss_result, init_pms, opti_pms,
sn_line_csqs, sn_gauss_csqs, signal_noise, sn_line_bpms, sn_line_data,
sn_gauss_bpms, sn_gauss_data)
return {'range': otr, 'x_region': ot_x,'y_region': otwo_region, 'doublet_range':
dblt_rng_vals, 'std_x': stddev_x, 'std_y': stddev_region, 'lm_best_fit':
gss_result.best_fit, 'lm_best_param': gss_result.best_values,
'lm_init_fit': gss_result.init_fit, 'sn_line': sn_line_rslt.best_fit,
'sn_gauss': sn_gauss_rslt.best_fit}
def analysis(file_name, sky_file_name):
""" Graphs and results from analysing the cube for OII spectra """
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams['text.latex.preamble'] = [r'\boldmath']
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
o |
spectra_stacker(file_name)
# one figure to rule them all
main_fig = plt.figure(1)
# calling data once will be enough
im_coll_data = image_collapser(file_name)
spectra_data = spectrum_creator(file_name)
sr = wavelength_solution(file_name)
gs_data = spectra_analysis(file_name, sky_file_name)
def graph_indiv():
cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
cbs_y = gs_data['gd_shifted']
# plotting spectra to check
fig, ax3 = plt.subplots()
ax3.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
ax3.tick_params(labelsize=20)
ax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
ax3.set_ylabel(r'\textbf{Flux}', fontsize=20)
fig.savefig(data_dir + "/" + stk_f_n + '_single_spectra.pdf',
bbox_inches="tight")
# --- for collapsed images ---
def graphs_collapsed():
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(im_coll_data['median'], cmap='gray_r')
ax1.set_title(r'\textbf{galaxy: median}', fontsize=13)
ax1.set_xlabel(r'\textbf{Pixels}', fontsize=13)
ax1.set_ylabel(r'\textbf{Pixels}', fontsize=13)
ax2.imshow(im_coll_data['sum'], cmap='gray_r')
ax2.set_title(r'\textbf{galaxy: sum}', fontsize=13)
ax2.set_xlabel(r'\textbf{Pixels}', fontsize=13)
ax2.set_ylabel(r'\textbf{Pixels}', fontsize=13)
f.subplots_adjust(wspace=0.4)
f.savefig(data_dir + "/" + stk_f_n + '_collapsed_images.pdf')
snw_data = sky_noise_weighting(file_name, sky_file_name)
df_data = otwo_doublet_fitting(file_name, sky_file_name)
# --- spectra ---
def graphs_spectra():
f, (ax1, ax2) = plt.subplots(2, 1)
# --- redshifted data plotting
cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
## plotting our cube data
cbs_y = gs_data['gd_shifted']
ax1.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
## plotting our sky noise data
snd_y = snw_data['sky_regions'][:,1]
ax1.plot(cbd_x, snd_y, linewidth=0.5, color="#f44336", alpha=0.5)
# plotting spectra to check
fig, ax3 = plt.subplots()
ax3.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
ax3.tick_params(labelsize=20)
ax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
ax3.set_ylabel(r'\textbf{Flux}', fontsize=20)
fig.savefig(data_dir + "/" + stk_f_n + '_single_spectra.pdf',
bbox_inches="tight")
## plotting our [OII] region
ot_x = df_data['x_region']
ot_y = df_data['y_region']
ax1.plot(ot_x, ot_y, linewidth=0.5, color="#00c853")
## plotting the standard deviation region in the [OII] section
std_x = df_data['std_x']
std_y = df_data['std_y']
ax1.plot(std_x, std_y, linewidth=0.5, color="#00acc1")
## plotting peak lines for scipy finder and peakutils finder
#pk_lines = gs_data['gd_peaks']
#for i in range(len(pk_lines)):
#srb = sr['begin']
#ax1.axvline(x=srb+pk_lines[i], linewidth=0.5, color="#8bc34a", alpha=0.2)
pu_lines = gs_data['pu_peaks']
for i in range(len(pu_lines)):
srb = sr['begin']
ax1.axvline(x=(pu_lines[i]), linewidth=0.5, color="#ec407a", alpha=0.2)
ax1.set_title(r'\textbf{spectra: cross-section redshifted}', fontsize=13)
ax1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
ax1.set_ylabel(r'\textbf{Flux}', fontsize=13)
ax1.set_ylim([-1000,5000]) # setting manual limits for now
# --- corrected redshift
crs_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
rdst = gs_data['redshift']
sp_lines = gs_data['spectra']
## corrected wavelengths
corr_x = crs_x / (1+rdst)
## plotting our cube data
cps_y = gs_data['gd_shifted']
ax2.plot(corr_x, cps_y, linewidth=0.5, color="#000000")
## plotting our sky noise data
sn_y = gs_data['sky_noise']
ax2.plot(corr_x, sn_y, linewidth=0.5, color="#e53935")
## plotting spectra lines
for e_key, e_val in sp_lines['emis'].items():
spec_line = float(e_val)
ax2.axvline(x=spec_line, linewidth=0.5, color="#00c853")
ax2.text(spec_line-10, 4800, e_key, rotation=-90)
ax2.set_title(r'\textbf{spectra: cross-section corrected}', fontsize=13)
ax2.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
ax2.set_ylabel(r'\textbf{Flux}', fontsize=13)
ax2.set_ylim([-500,5000]) # setting manual limits for now
f.subplots_adjust(hspace=0.5)
f.savefig(data_dir + "/" + stk_f_n + '_spectra.pdf')
# saving our plotting into npy files so they can be used elsewhere
np.save(data_dir + "/" + stk_f_n + "_cbd_x", cbd_x)
np.save(data_dir + "/" + stk_f_n + "_cbs_y", cbs_y)
np.save(data_dir + "/" + stk_f_n + "_snd_y", snd_y)
np.save(data_dir + "/" + stk_f_n + "_corr_x", corr_x)
np.save(data_dir + "/" + stk_f_n + "_cps_y", cps_y)
def graphs_otwo_region():
ot_fig = plt.figure(6)
# plotting the data for the cutout [OII] region
ot_x = df_data['x_region']
ot_y = df_data['y_region']
plt.plot(ot_x, ot_y, linewidth=1.5, color="#000000")
## plotting the standard deviation region in the [OII] section
std_x = df_data['std_x']
std_y = df_data['std_y']
#plt.plot(std_x, std_y, linewidth=1.5, color="#00acc1")
dblt_rng = df_data['doublet_range']
ot_x_b, ot_x_e = dblt_rng[0], dblt_rng[-1]
x_ax_vals = np.linspace(ot_x_b, ot_x_e, 1000)
# lmfit
lm_init = df_data['lm_init_fit']
lm_best = df_data['lm_best_fit']
plt.plot(ot_x, lm_best, linewidth=1.5, color="#1e88e5",
label=r"\textbf{Best fit}")
plt.plot(ot_x, lm_init, linewidth=1.5, color="#43a047", alpha=0.5,
label=r"\textbf{Initial guess}")
lm_params = df_data['lm_best_param']
lm_params = [prm_value for prm_key, prm_value in lm_params.items()]
c, i_val1, i_val2, sig_g, rdsh, sig_i = lm_params
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths for OII
l1 = dblt_mu[0] * (1+rdsh)
l2 = dblt_mu[1] * (1+rdsh)
sig = np.sqrt(sig_g**2 + sig_i**2)
norm = (sig*np.sqrt(2*np.pi))
lm_y1 = c + ( i_val1 / norm ) * np.exp(-(ot_x-l1)**2/(2*sig**2))
lm_y2 = c + ( i_val2 / norm ) * np.exp(-(ot_x-l2)**2/(2*sig**2))
plt.plot(ot_x, lm_y1, linewidth=1.5, color="#e64a19", alpha=0.7,
label=r"\textbf{Gaussian 1}")
plt.plot(ot_x, lm_y2, linewidth=1.5, color="#1a237e", alpha=0.7,
label=r"\textbf{Gaussian 2}")
# plotting signal-to-noise straight line and gaussian to verify it works
sn_line = df_data['sn_line']
sn_gauss = df_data['sn_gauss']
#plt.axhline(y=sn_line, linewidth=0.5, color="#5c6bc0", alpha=0.7)
#plt.plot(ot_x, sn_gauss, linewidth=0.5, color="#5c6bc0", alpha=0.7)
#plt.title(r'\textbf{[OII] region}', fontsize=13)
plt.legend(loc='upper left', prop={'size': 15})
plt.tick_params(labelsize=20)
plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
plt.ylabel(r'\textbf{Flux}', fontsize=20)
plt.xlim([l1-100,np.max(ot_x)])
plt.ylim([-100,np.max(ot_y)+100]) # setting manual limits for now
plt.savefig(data_dir + "/" + stk_f_n + '_otwo_region.pdf',bbox_inches="tight")
graph_indiv()
graphs_collapsed()
graphs_spectra()
graphs_otwo_region()
plt.close("all")
return {'image_data': im_coll_data, 'spectra_data': spectra_data, 'sr': sr,
'df_data': df_data, 'gs_data': gs_data, 'snw_data': snw_data}
if __name__ == '__main__':
analysis("/Volumes/Jacky_Cao/University/level4/project/cubes_better/" +
"cube_1068.fits", "data/skyvariance_csub.fits")
| s.mkdir(data_dir)
| conditional_block |
cube_reader.py | import os
import re
import file_writer
import numpy as np
from numpy import unravel_index
import datetime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy import signal
import peakutils
from lmfit import minimize, Parameters, Model
from astropy.io import fits
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
def read_file(file_name):
""" reads file_name and returns specific header data and image data """
fits_file = fits.open(file_name)
header = fits_file[0].header
image_data = fits_file[1].data
segmentation_data = fits_file[2].data
header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}
# clause to differentiate between CDELT3 and CD3_3
for hdr_key, hdr_value in header_keywords.items():
# finding required header values
hdr_value = header[hdr_key]
header_keywords[hdr_key] = hdr_value
return header_keywords, image_data, segmentation_data
def wavelength_solution(file_name):
""" wavelength solution in Angstroms """
file_data = read_file(file_name)
header_data = file_data[0]
image_data = file_data[1]
range_begin = header_data['CRVAL3']
pixel_begin = header_data['CRPIX3']
step_size = header_data['CD3_3']
steps = len(image_data)
range_end = range_begin + steps * step_size
return {'begin': range_begin, 'end': range_end, 'steps': steps}
def image_collapser(file_name):
""" collapses image data so it can be passed as a heatmap """
file_data = read_file(file_name)
header_data = file_data[0]
image_data = file_data[1]
data_shape = np.shape(image_data)
ra_axis = data_shape[2]
dec_axis = data_shape[1]
wl_axis = data_shape[0]
image_median = np.zeros((ra_axis, dec_axis))
image_sum = np.zeros((ra_axis, dec_axis))
for i_ra in range(ra_axis):
for i_dec in range(dec_axis):
pixel_data = image_data[:][:,i_dec][:,i_ra]
pd_median = np.nanmedian(pixel_data)
pd_sum = np.nansum(pixel_data)
image_median[i_ra][i_dec] = pd_median
image_sum[i_ra][i_dec] = pd_sum
return {'median': image_median, 'sum': image_sum}
def spectrum_creator(file_name):
""" creating a spectra from the area as defined in the segementation area """
file_data = read_file(file_name)
image_data = file_data[1]
segmentation_data = file_data[2]
collapsed_data = image_collapser(file_name)
# spectrum for central pixel
cp_bright = []
for key, data in collapsed_data.items():
lgst_val = data.argmax()
lgst_loc = unravel_index(data.argmax(), data.shape)
cp_bright.append(lgst_loc)
cp_loc = 0
if ( cp_bright[0] == cp_bright[1] ):
cp_loc = cp_bright[0]
else:
cp_loc = cp_bright[1]
cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]
# spectrum as defined by the segmentation area
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
cube_id = [int(x) for x in re.findall('\d+', stk_f_n)][0]
# locating where the galaxy pixels are from the cube_id
seg_curr_cube = np.where(segmentation_data == cube_id)
scc_rows, scc_cols = seg_curr_cube
#np.set_printoptions(threshold=np.nan)
#print(segmentation_data)
collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])
for i_r in range(len(scc_rows)):
# I want to pull out each pixel and store it into the collapsed spectrum array
collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]
galaxy_spectrum = np.zeros(np.shape(image_data)[0])
for i_ax in range(len(galaxy_spectrum)):
galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])
return {'central': cp_spec_data, 'galaxy': galaxy_spectrum,
'segmentation': segmentation_data}
def cube_noise(cube_id):
cube_file_name = ("/Volumes/Jacky_Cao/University/level4/project/cubes_better/" +
"cube_" + str(cube_id) + ".fits")
cube_file = read_file(cube_file_name)
image_data = cube_file[1]
collapsed_data = spectrum_creator(cube_file_name)
segmentation_data = cube_file[2]
pixels_data = np.where(segmentation_data == cube_id)
pixels_noise = np.where(segmentation_data == 0)
pn_rows, pn_cols = pixels_noise
pd_num = np.shape(pixels_data)[1] # number of pixels so that a ratio can be
pn_num = np.shape(pixels_noise)[1] # calculated
# calculating the noise based off the segmentation data
noise_spectra = np.zeros([np.shape(image_data)[0], len(pn_rows)])
for i_noise in range(len(pn_rows)):
noise_spectra[:,i_noise] = image_data[:,pn_rows[i_noise],pn_cols[i_noise]]
nr_noise = np.zeros(np.shape(noise_spectra)[0])
for i_ax in range(np.shape(noise_spectra)[0]):
nr_noise[i_ax] = np.nansum(noise_spectra[i_ax])
noise = np.median(nr_noise) * np.sqrt(pd_num**2/pn_num**2)
return {'noise_data': nr_noise, 'noise_value': noise, 'pd_num': pd_num,
'pn_num': pn_num}
def spectra_stacker(file_name):
""" stacking all spectra together for a stacked spectra image """
file_data = read_file(file_name)
image_data = file_data[1]
data_shape = np.shape(image_data)
ra_axis = data_shape[2]
dec_axis = data_shape[1]
wl_axis = data_shape[0]
pxl_total = ra_axis * dec_axis
data_unwrap = []
for i_ra in range(ra_axis):
for i_dec in range(dec_axis):
pixel_data = image_data[:][:,i_dec][:,i_ra]
data_unwrap.append(pixel_data)
data_stacked = np.zeros((pxl_total, wl_axis))
for i_row in range(np.shape(data_unwrap)[0]):
data_row = data_unwrap[i_row]
for i_pixel in range(len(data_row)):
data_stacked[i_row][i_pixel] = data_row[i_pixel]
# writing data to a fits file
hdr = fits.Header()
hdr['CTYPE1'] = 'pixel'
hdr['CRPIX1'] = 1
hdr['CRVAL1'] = data_stacked[0][0]
hdr['CDELT1'] = data_stacked[0][1] - data_stacked[0][0]
primary_hdu = fits.PrimaryHDU(header=hdr)
hdu = fits.ImageHDU(data_stacked)
hdul = fits.HDUList([primary_hdu, hdu])
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
hdul.writeto(data_dir + '/stacked.fits')
return data_unwrap
def sky_noise(sky_file_name):
""" returning sky noise data files """
fits_file = fits.open(sky_file_name)
image_data = fits_file[0].data
return image_data
def spectra_analysis(file_name, sky_file_name):
""" correcting data to be in rest frame """
# read file name and select out the id that we are dealing with
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
cube_id = int(re.search(r'\d+', stk_f_n).group())
# read catalogue and obtain the HST redshift estimate
#catalogue = np.load("data/matched_catalogue.npy")
catalogue = np.load("data/low_redshift_catalogue.npy")
cat_loc = np.where(catalogue[:,0] == cube_id)[0]
cube_info = catalogue[cat_loc][0]
hst_redshift = cube_info[7]
# spectra and sky noise data
spectra_data = spectrum_creator(file_name)
wl_soln = wavelength_solution(file_name)
sn_data = sky_noise(sky_file_name)
galaxy_data = spectra_data['galaxy']
# removing baseline from data
base = peakutils.baseline(galaxy_data, 3)
gd_mc = galaxy_data - base
# scaling sky-noise to be similar to spectra data
gd_max = np.amax(galaxy_data)
sn_data_max = np.amax(sn_data)
sn_scale = gd_max / sn_data_max
sn_data = sn_data * sn_scale
# spectra lines
sl = {
'emis': {
'[OII]': '3727',
'CaK': '3933',
'CaH': '3968',
'Hdelta': '4101',
},
'abs': {'K': '3934.777',
}
}
# we can use the redshift from the HST catalogue to define the region to search for
# the doublet in
# lower and upper bound on wavelength range
lower_lambda = (1+hst_redshift)*3600
upper_lambda = (1+hst_redshift)*3850
# x-axis data
data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])
mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda)
lambda_data = data_h_range[mask]
flux_data = gd_mc[mask]
# Finding peaks with PeakUtils
pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)
pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)
pu_peaks_x = np.sort(pu_peaks_x)
pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]
pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')
peaks_file.write("Peaks found on " + str(datetime.datetime.now()) + "\n\n")
peaks_file.write("Number Wavelength \n")
for i_peak in range(len(pu_peaks_x)):
curr_peak = pu_peaks_x[i_peak]
peaks_file.write(str(i_peak) + " " + str(curr_peak) + "\n")
# manually selecting which peak is the [OII] peak - given in wavelength
if (pu_peaks_x.size != 0):
otwo_wav = float(pu_peaks_x[0])
otwo_acc = float(sl['emis']['[OII]'])
redshift = (otwo_wav / otwo_acc) - 1
else:
# accepting HST redshift if cannot find peak
redshift = hst_redshift
return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift':
redshift, 'pu_peaks': pu_peaks_x}
def find_nearest(array, value):
""" Find nearest value is an array """
idx = (np.abs(array-value)).argmin()
return idx
def sn_line(x, c):
return c
def sn_gauss(x, c, i1, mu, sigma1):
norm = (sigma1*np.sqrt(2*np.pi))
term1 = ( i1 / norm ) * np.exp(-(x-mu)**2/(2*sigma1**2))
return (c + term1)
def chisq(y_model, y_data, y_err):
csq = (y_data-y_model)**2 / y_err**2
csq = np.sum(csq)
red_csq = csq / (len(y_data) - 4)
return {'chisq': csq, 'chisq_red': red_csq}
def sky_noise_weighting(file_name, sky_file_name):
""" finding the sky noise from a small section of the cube data """
cs_data = spectra_analysis(file_name, sky_file_name)
cube_data = cs_data['gd_shifted']
sn_data = cs_data['sky_noise']
wl_soln = wavelength_solution(file_name)
sn_data_min = np.min(sn_data)
in_wt = 1 / (sn_data - sn_data_min + 1)
sky_regns = np.zeros((len(in_wt),2)) # storing regions of potential sky noise
for i in range(len(in_wt)):
data_acl = cube_data[i]
data_sky = sn_data[i]
data_prb = in_wt[i]
if ( 0.00 <= np.abs(data_prb) <= 1.00 ):
sky_regns[i][0] = data_prb
sky_regns[i][1] = data_sky
# finding max peak in the sky-noise data and fitting a Gaussian to that
# x-axis data
x_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])
# Finding peaks with PeakUtils
sky_peaks = peakutils.indexes(sn_data, thres=300, thres_abs=True)
sky_peaks_x = peakutils.interpolate(x_range, sn_data, sky_peaks)
if (sky_peaks_x.size != 0):
sky_peak = sky_peaks_x[0]
sky_peak_index = find_nearest(sky_peak, x_range)
else:
sky_peak = 6000
sky_peak_index = 0
sky_peak_loc = x_range[sky_peak_index]
sky_peak_range = [sky_peak-100, sky_peak+100]
sky_peak_range_loc = [find_nearest(x_range, x) for x in sky_peak_range]
sky_rng_x = x_range[sky_peak_range_loc[0]:sky_peak_range_loc[1]]
sky_rng_y = sn_data[sky_peak_range_loc[0]:sky_peak_range_loc[1]]
sky_gauss_params = Parameters()
sky_gauss_params.add('c', value=0)
sky_gauss_params.add('i1', value=np.max(sky_rng_y), min=0.0)
sky_gauss_params.add('mu', value=sky_peak_loc)
sky_gauss_params.add('sigma1', value=3)
sky_gauss_model = Model(sn_gauss)
sky_gauss_rslt = sky_gauss_model.fit(sky_rng_y, x=sky_rng_x,
params=sky_gauss_params)
sky_gauss_best = sky_gauss_rslt.best_values
sky_sigma = sky_gauss_best['sigma1']
return {'inverse_sky': in_wt, 'sky_regions': sky_regns, 'sky_sigma': sky_sigma}
def f_doublet(x, c, i1, i2, sigma_gal, z, sigma_inst):
""" function for Gaussian doublet """
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths
l1 = dblt_mu[0] * (1+z)
l2 = dblt_mu[1] * (1+z)
sigma = np.sqrt(sigma_gal**2 + sigma_inst**2)
norm = (sigma*np.sqrt(2*np.pi))
term1 = ( i1 / norm ) * np.exp(-(x-l1)**2/(2*sigma**2))
term2 = ( i2 / norm ) * np.exp(-(x-l2)**2/(2*sigma**2))
return (c*x + term1 + term2)
def otwo_doublet_fitting(file_name, sky_file_name):
sa_data = spectra_analysis(file_name, sky_file_name)
y_shifted = sa_data['gd_shifted']
orr = wavelength_solution(file_name)
sn_data = sky_noise_weighting(file_name, sky_file_name)
redshift = sa_data['redshift']
# obtaining the OII range and region
# lower and upper bound on wavelength range
lower_lambda = (1+redshift)*3600
upper_lambda = (1+redshift)*3750
otr = [lower_lambda, upper_lambda]
print(otr)
orr_x = np.linspace(orr['begin'], orr['end'], orr['steps'])
dt_region = [find_nearest(orr_x, x) for x in otr]
otwo_region = y_shifted[dt_region[0]:dt_region[1]]
print(orr_x)
ot_x = orr_x[dt_region[0]:dt_region[1]]
otwo_max_loc = np.argmax(otwo_region)
otwo_max_val = np.max(otwo_region)
# standard deviation of a range before the peak
stdr_b = 50
stdr_e = otwo_max_loc - 50
stddev_lim = [stdr_b, stdr_e]
stddev_x = ot_x[stddev_lim[0]:stddev_lim[1]]
stddev_region = otwo_region[stddev_lim[0]:stddev_lim[1]]
stddev_val = np.std(stddev_region)
# fitting a gaussian doublet model to the data
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths
dblt_val = ot_x[otwo_max_loc]
dblt_rng = [dblt_val-20, dblt_val+20]
dblt_rng = [find_nearest(orr_x, x) for x in dblt_rng]
dblt_rng_vals = orr_x[dblt_rng[0]:dblt_rng[1]]
dblt_rgn = y_shifted[dblt_rng[0]:dblt_rng[1]]
rdst = sa_data['redshift']
sky_weight = sn_data['inverse_sky']
sky_weight = sky_weight[dt_region[0]:dt_region[1]]
# the parameters we need are (c, i1, i2, sigma1, z)
p0 = [0, otwo_max_val, 1.3, 3, rdst]
c, i_val1, r, sigma_gal, z = p0
sigma_sky = sn_data['sky_sigma']
gss_pars = Parameters()
gss_pars.add('c', value=c)
gss_pars.add('i1', value=i_val1, min=0.0)
gss_pars.add('r', value=r, min=0.5, max=1.5)
gss_pars.add('i2', expr='i1/r', min=0.0)
gss_pars.add('sigma_gal', value=sigma_gal)
gss_pars.add('z', value=z)
gss_pars.add('sigma_inst', value=sigma_sky, vary=False)
gss_model = Model(f_doublet)
gss_result = gss_model.fit(otwo_region, x=ot_x, params=gss_pars,
weights=sky_weight)
opti_pms = gss_result.best_values
init_pms = gss_result.init_values
# working out signal to noise now
sn_line_parms = Parameters()
sn_line_parms.add('c', value=c)
sn_line_model = Model(sn_line)
sn_line_rslt = sn_line_model.fit(otwo_region, x=ot_x, params=sn_line_parms)
sn_line_bpms = sn_line_rslt.best_values
sn_line_data = sn_line_rslt.best_fit
sn_gauss_parms = Parameters()
sn_gauss_parms.add('c', value=c)
sn_gauss_parms.add('i1', value=i_val1, min=0.0)
sn_gauss_parms.add('mu', value=dblt_val)
sn_gauss_parms.add('sigma1', value=sigma_gal)
sn_gauss_model = Model(sn_gauss)
sn_gauss_rslt = sn_gauss_model.fit(otwo_region, x=ot_x, params=sn_gauss_parms)
sn_gauss_bpms = sn_gauss_rslt.best_values
sn_gauss_data = sn_gauss_rslt.best_fit
sn_line_csqs = chisq(sn_line_data, otwo_region, stddev_val)
sn_gauss_csqs = chisq(sn_gauss_data, otwo_region, stddev_val)
signal_noise = np.sqrt(sn_line_csqs['chisq'] - sn_gauss_csqs['chisq'])
# saving data to text files
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
file_writer.analysis_complete(data_dir, stk_f_n, gss_result, init_pms, opti_pms,
sn_line_csqs, sn_gauss_csqs, signal_noise, sn_line_bpms, sn_line_data,
sn_gauss_bpms, sn_gauss_data)
return {'range': otr, 'x_region': ot_x,'y_region': otwo_region, 'doublet_range':
dblt_rng_vals, 'std_x': stddev_x, 'std_y': stddev_region, 'lm_best_fit':
gss_result.best_fit, 'lm_best_param': gss_result.best_values,
'lm_init_fit': gss_result.init_fit, 'sn_line': sn_line_rslt.best_fit,
'sn_gauss': sn_gauss_rslt.best_fit}
def analysis(file_name, sky_file_name):
""" Graphs and results from analysing the cube for OII spectra """
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams['text.latex.preamble'] = [r'\boldmath']
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
spectra_stacker(file_name)
# one figure to rule them all
main_fig = plt.figure(1)
# calling data once will be enough
im_coll_data = image_collapser(file_name)
spectra_data = spectrum_creator(file_name)
sr = wavelength_solution(file_name)
gs_data = spectra_analysis(file_name, sky_file_name)
def graph_indiv():
cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
cbs_y = gs_data['gd_shifted']
# plotting spectra to check
fig, ax3 = plt.subplots()
ax3.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
ax3.tick_params(labelsize=20)
ax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
ax3.set_ylabel(r'\textbf{Flux}', fontsize=20)
fig.savefig(data_dir + "/" + stk_f_n + '_single_spectra.pdf',
bbox_inches="tight")
# --- for collapsed images ---
def graphs_collapsed():
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(im_coll_data['median'], cmap='gray_r')
ax1.set_title(r'\textbf{galaxy: median}', fontsize=13)
ax1.set_xlabel(r'\textbf{Pixels}', fontsize=13)
ax1.set_ylabel(r'\textbf{Pixels}', fontsize=13)
ax2.imshow(im_coll_data['sum'], cmap='gray_r')
ax2.set_title(r'\textbf{galaxy: sum}', fontsize=13)
ax2.set_xlabel(r'\textbf{Pixels}', fontsize=13)
ax2.set_ylabel(r'\textbf{Pixels}', fontsize=13)
f.subplots_adjust(wspace=0.4)
f.savefig(data_dir + "/" + stk_f_n + '_collapsed_images.pdf')
snw_data = sky_noise_weighting(file_name, sky_file_name)
df_data = otwo_doublet_fitting(file_name, sky_file_name)
# --- spectra ---
def graphs_spectra():
f, (ax1, ax2) = plt.subplots(2, 1)
# --- redshifted data plotting
cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
## plotting our cube data
cbs_y = gs_data['gd_shifted']
ax1.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
## plotting our sky noise data
snd_y = snw_data['sky_regions'][:,1]
ax1.plot(cbd_x, snd_y, linewidth=0.5, color="#f44336", alpha=0.5)
# plotting spectra to check
fig, ax3 = plt.subplots()
ax3.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
ax3.tick_params(labelsize=20)
ax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
ax3.set_ylabel(r'\textbf{Flux}', fontsize=20)
fig.savefig(data_dir + "/" + stk_f_n + '_single_spectra.pdf',
bbox_inches="tight")
## plotting our [OII] region
ot_x = df_data['x_region']
ot_y = df_data['y_region']
ax1.plot(ot_x, ot_y, linewidth=0.5, color="#00c853")
## plotting the standard deviation region in the [OII] section
std_x = df_data['std_x']
std_y = df_data['std_y']
ax1.plot(std_x, std_y, linewidth=0.5, color="#00acc1")
## plotting peak lines for scipy finder and peakutils finder
#pk_lines = gs_data['gd_peaks']
#for i in range(len(pk_lines)):
#srb = sr['begin']
#ax1.axvline(x=srb+pk_lines[i], linewidth=0.5, color="#8bc34a", alpha=0.2)
pu_lines = gs_data['pu_peaks']
for i in range(len(pu_lines)):
srb = sr['begin']
ax1.axvline(x=(pu_lines[i]), linewidth=0.5, color="#ec407a", alpha=0.2)
ax1.set_title(r'\textbf{spectra: cross-section redshifted}', fontsize=13)
ax1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
ax1.set_ylabel(r'\textbf{Flux}', fontsize=13)
ax1.set_ylim([-1000,5000]) # setting manual limits for now
# --- corrected redshift
crs_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
rdst = gs_data['redshift']
sp_lines = gs_data['spectra']
## corrected wavelengths
corr_x = crs_x / (1+rdst)
## plotting our cube data
cps_y = gs_data['gd_shifted']
ax2.plot(corr_x, cps_y, linewidth=0.5, color="#000000")
## plotting our sky noise data
sn_y = gs_data['sky_noise']
ax2.plot(corr_x, sn_y, linewidth=0.5, color="#e53935")
## plotting spectra lines
for e_key, e_val in sp_lines['emis'].items():
spec_line = float(e_val)
ax2.axvline(x=spec_line, linewidth=0.5, color="#00c853")
ax2.text(spec_line-10, 4800, e_key, rotation=-90)
ax2.set_title(r'\textbf{spectra: cross-section corrected}', fontsize=13)
ax2.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
ax2.set_ylabel(r'\textbf{Flux}', fontsize=13)
ax2.set_ylim([-500,5000]) # setting manual limits for now
f.subplots_adjust(hspace=0.5)
f.savefig(data_dir + "/" + stk_f_n + '_spectra.pdf')
# saving our plotting into npy files so they can be used elsewhere
np.save(data_dir + "/" + stk_f_n + "_cbd_x", cbd_x)
np.save(data_dir + "/" + stk_f_n + "_cbs_y", cbs_y)
np.save(data_dir + "/" + stk_f_n + "_snd_y", snd_y)
np.save(data_dir + "/" + stk_f_n + "_corr_x", corr_x)
np.save(data_dir + "/" + stk_f_n + "_cps_y", cps_y)
def gra |
ot_fig = plt.figure(6)
# plotting the data for the cutout [OII] region
ot_x = df_data['x_region']
ot_y = df_data['y_region']
plt.plot(ot_x, ot_y, linewidth=1.5, color="#000000")
## plotting the standard deviation region in the [OII] section
std_x = df_data['std_x']
std_y = df_data['std_y']
#plt.plot(std_x, std_y, linewidth=1.5, color="#00acc1")
dblt_rng = df_data['doublet_range']
ot_x_b, ot_x_e = dblt_rng[0], dblt_rng[-1]
x_ax_vals = np.linspace(ot_x_b, ot_x_e, 1000)
# lmfit
lm_init = df_data['lm_init_fit']
lm_best = df_data['lm_best_fit']
plt.plot(ot_x, lm_best, linewidth=1.5, color="#1e88e5",
label=r"\textbf{Best fit}")
plt.plot(ot_x, lm_init, linewidth=1.5, color="#43a047", alpha=0.5,
label=r"\textbf{Initial guess}")
lm_params = df_data['lm_best_param']
lm_params = [prm_value for prm_key, prm_value in lm_params.items()]
c, i_val1, i_val2, sig_g, rdsh, sig_i = lm_params
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths for OII
l1 = dblt_mu[0] * (1+rdsh)
l2 = dblt_mu[1] * (1+rdsh)
sig = np.sqrt(sig_g**2 + sig_i**2)
norm = (sig*np.sqrt(2*np.pi))
lm_y1 = c + ( i_val1 / norm ) * np.exp(-(ot_x-l1)**2/(2*sig**2))
lm_y2 = c + ( i_val2 / norm ) * np.exp(-(ot_x-l2)**2/(2*sig**2))
plt.plot(ot_x, lm_y1, linewidth=1.5, color="#e64a19", alpha=0.7,
label=r"\textbf{Gaussian 1}")
plt.plot(ot_x, lm_y2, linewidth=1.5, color="#1a237e", alpha=0.7,
label=r"\textbf{Gaussian 2}")
# plotting signal-to-noise straight line and gaussian to verify it works
sn_line = df_data['sn_line']
sn_gauss = df_data['sn_gauss']
#plt.axhline(y=sn_line, linewidth=0.5, color="#5c6bc0", alpha=0.7)
#plt.plot(ot_x, sn_gauss, linewidth=0.5, color="#5c6bc0", alpha=0.7)
#plt.title(r'\textbf{[OII] region}', fontsize=13)
plt.legend(loc='upper left', prop={'size': 15})
plt.tick_params(labelsize=20)
plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
plt.ylabel(r'\textbf{Flux}', fontsize=20)
plt.xlim([l1-100,np.max(ot_x)])
plt.ylim([-100,np.max(ot_y)+100]) # setting manual limits for now
plt.savefig(data_dir + "/" + stk_f_n + '_otwo_region.pdf',bbox_inches="tight")
graph_indiv()
graphs_collapsed()
graphs_spectra()
graphs_otwo_region()
plt.close("all")
return {'image_data': im_coll_data, 'spectra_data': spectra_data, 'sr': sr,
'df_data': df_data, 'gs_data': gs_data, 'snw_data': snw_data}
if __name__ == '__main__':
analysis("/Volumes/Jacky_Cao/University/level4/project/cubes_better/" +
"cube_1068.fits", "data/skyvariance_csub.fits")
| phs_otwo_region(): | identifier_name |
cube_reader.py | import os
import re
import file_writer
import numpy as np
from numpy import unravel_index
import datetime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy import signal
import peakutils
from lmfit import minimize, Parameters, Model
from astropy.io import fits
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
def read_file(file_name):
""" reads file_name and returns specific header data and image data """
fits_file = fits.open(file_name)
header = fits_file[0].header
image_data = fits_file[1].data
segmentation_data = fits_file[2].data
header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}
# clause to differentiate between CDELT3 and CD3_3
for hdr_key, hdr_value in header_keywords.items():
# finding required header values
hdr_value = header[hdr_key]
header_keywords[hdr_key] = hdr_value
return header_keywords, image_data, segmentation_data
def wavelength_solution(file_name):
""" wavelength solution in Angstroms """
file_data = read_file(file_name)
header_data = file_data[0]
image_data = file_data[1]
range_begin = header_data['CRVAL3']
pixel_begin = header_data['CRPIX3']
step_size = header_data['CD3_3']
steps = len(image_data)
range_end = range_begin + steps * step_size
return {'begin': range_begin, 'end': range_end, 'steps': steps}
def image_collapser(file_name):
|
def spectrum_creator(file_name):
""" creating a spectra from the area as defined in the segementation area """
file_data = read_file(file_name)
image_data = file_data[1]
segmentation_data = file_data[2]
collapsed_data = image_collapser(file_name)
# spectrum for central pixel
cp_bright = []
for key, data in collapsed_data.items():
lgst_val = data.argmax()
lgst_loc = unravel_index(data.argmax(), data.shape)
cp_bright.append(lgst_loc)
cp_loc = 0
if ( cp_bright[0] == cp_bright[1] ):
cp_loc = cp_bright[0]
else:
cp_loc = cp_bright[1]
cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]
# spectrum as defined by the segmentation area
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
cube_id = [int(x) for x in re.findall('\d+', stk_f_n)][0]
# locating where the galaxy pixels are from the cube_id
seg_curr_cube = np.where(segmentation_data == cube_id)
scc_rows, scc_cols = seg_curr_cube
#np.set_printoptions(threshold=np.nan)
#print(segmentation_data)
collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])
for i_r in range(len(scc_rows)):
# I want to pull out each pixel and store it into the collapsed spectrum array
collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]
galaxy_spectrum = np.zeros(np.shape(image_data)[0])
for i_ax in range(len(galaxy_spectrum)):
galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])
return {'central': cp_spec_data, 'galaxy': galaxy_spectrum,
'segmentation': segmentation_data}
def cube_noise(cube_id):
cube_file_name = ("/Volumes/Jacky_Cao/University/level4/project/cubes_better/" +
"cube_" + str(cube_id) + ".fits")
cube_file = read_file(cube_file_name)
image_data = cube_file[1]
collapsed_data = spectrum_creator(cube_file_name)
segmentation_data = cube_file[2]
pixels_data = np.where(segmentation_data == cube_id)
pixels_noise = np.where(segmentation_data == 0)
pn_rows, pn_cols = pixels_noise
pd_num = np.shape(pixels_data)[1] # number of pixels so that a ratio can be
pn_num = np.shape(pixels_noise)[1] # calculated
# calculating the noise based off the segmentation data
noise_spectra = np.zeros([np.shape(image_data)[0], len(pn_rows)])
for i_noise in range(len(pn_rows)):
noise_spectra[:,i_noise] = image_data[:,pn_rows[i_noise],pn_cols[i_noise]]
nr_noise = np.zeros(np.shape(noise_spectra)[0])
for i_ax in range(np.shape(noise_spectra)[0]):
nr_noise[i_ax] = np.nansum(noise_spectra[i_ax])
noise = np.median(nr_noise) * np.sqrt(pd_num**2/pn_num**2)
return {'noise_data': nr_noise, 'noise_value': noise, 'pd_num': pd_num,
'pn_num': pn_num}
def spectra_stacker(file_name):
""" stacking all spectra together for a stacked spectra image """
file_data = read_file(file_name)
image_data = file_data[1]
data_shape = np.shape(image_data)
ra_axis = data_shape[2]
dec_axis = data_shape[1]
wl_axis = data_shape[0]
pxl_total = ra_axis * dec_axis
data_unwrap = []
for i_ra in range(ra_axis):
for i_dec in range(dec_axis):
pixel_data = image_data[:][:,i_dec][:,i_ra]
data_unwrap.append(pixel_data)
data_stacked = np.zeros((pxl_total, wl_axis))
for i_row in range(np.shape(data_unwrap)[0]):
data_row = data_unwrap[i_row]
for i_pixel in range(len(data_row)):
data_stacked[i_row][i_pixel] = data_row[i_pixel]
# writing data to a fits file
hdr = fits.Header()
hdr['CTYPE1'] = 'pixel'
hdr['CRPIX1'] = 1
hdr['CRVAL1'] = data_stacked[0][0]
hdr['CDELT1'] = data_stacked[0][1] - data_stacked[0][0]
primary_hdu = fits.PrimaryHDU(header=hdr)
hdu = fits.ImageHDU(data_stacked)
hdul = fits.HDUList([primary_hdu, hdu])
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
hdul.writeto(data_dir + '/stacked.fits')
return data_unwrap
def sky_noise(sky_file_name):
""" returning sky noise data files """
fits_file = fits.open(sky_file_name)
image_data = fits_file[0].data
return image_data
def spectra_analysis(file_name, sky_file_name):
""" correcting data to be in rest frame """
# read file name and select out the id that we are dealing with
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
cube_id = int(re.search(r'\d+', stk_f_n).group())
# read catalogue and obtain the HST redshift estimate
#catalogue = np.load("data/matched_catalogue.npy")
catalogue = np.load("data/low_redshift_catalogue.npy")
cat_loc = np.where(catalogue[:,0] == cube_id)[0]
cube_info = catalogue[cat_loc][0]
hst_redshift = cube_info[7]
# spectra and sky noise data
spectra_data = spectrum_creator(file_name)
wl_soln = wavelength_solution(file_name)
sn_data = sky_noise(sky_file_name)
galaxy_data = spectra_data['galaxy']
# removing baseline from data
base = peakutils.baseline(galaxy_data, 3)
gd_mc = galaxy_data - base
# scaling sky-noise to be similar to spectra data
gd_max = np.amax(galaxy_data)
sn_data_max = np.amax(sn_data)
sn_scale = gd_max / sn_data_max
sn_data = sn_data * sn_scale
# spectra lines
sl = {
'emis': {
'[OII]': '3727',
'CaK': '3933',
'CaH': '3968',
'Hdelta': '4101',
},
'abs': {'K': '3934.777',
}
}
# we can use the redshift from the HST catalogue to define the region to search for
# the doublet in
# lower and upper bound on wavelength range
lower_lambda = (1+hst_redshift)*3600
upper_lambda = (1+hst_redshift)*3850
# x-axis data
data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])
mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda)
lambda_data = data_h_range[mask]
flux_data = gd_mc[mask]
# Finding peaks with PeakUtils
pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)
pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)
pu_peaks_x = np.sort(pu_peaks_x)
pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]
pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')
peaks_file.write("Peaks found on " + str(datetime.datetime.now()) + "\n\n")
peaks_file.write("Number Wavelength \n")
for i_peak in range(len(pu_peaks_x)):
curr_peak = pu_peaks_x[i_peak]
peaks_file.write(str(i_peak) + " " + str(curr_peak) + "\n")
# manually selecting which peak is the [OII] peak - given in wavelength
if (pu_peaks_x.size != 0):
otwo_wav = float(pu_peaks_x[0])
otwo_acc = float(sl['emis']['[OII]'])
redshift = (otwo_wav / otwo_acc) - 1
else:
# accepting HST redshift if cannot find peak
redshift = hst_redshift
return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift':
redshift, 'pu_peaks': pu_peaks_x}
def find_nearest(array, value):
""" Find nearest value is an array """
idx = (np.abs(array-value)).argmin()
return idx
def sn_line(x, c):
return c
def sn_gauss(x, c, i1, mu, sigma1):
norm = (sigma1*np.sqrt(2*np.pi))
term1 = ( i1 / norm ) * np.exp(-(x-mu)**2/(2*sigma1**2))
return (c + term1)
def chisq(y_model, y_data, y_err):
csq = (y_data-y_model)**2 / y_err**2
csq = np.sum(csq)
red_csq = csq / (len(y_data) - 4)
return {'chisq': csq, 'chisq_red': red_csq}
def sky_noise_weighting(file_name, sky_file_name):
""" finding the sky noise from a small section of the cube data """
cs_data = spectra_analysis(file_name, sky_file_name)
cube_data = cs_data['gd_shifted']
sn_data = cs_data['sky_noise']
wl_soln = wavelength_solution(file_name)
sn_data_min = np.min(sn_data)
in_wt = 1 / (sn_data - sn_data_min + 1)
sky_regns = np.zeros((len(in_wt),2)) # storing regions of potential sky noise
for i in range(len(in_wt)):
data_acl = cube_data[i]
data_sky = sn_data[i]
data_prb = in_wt[i]
if ( 0.00 <= np.abs(data_prb) <= 1.00 ):
sky_regns[i][0] = data_prb
sky_regns[i][1] = data_sky
# finding max peak in the sky-noise data and fitting a Gaussian to that
# x-axis data
x_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])
# Finding peaks with PeakUtils
sky_peaks = peakutils.indexes(sn_data, thres=300, thres_abs=True)
sky_peaks_x = peakutils.interpolate(x_range, sn_data, sky_peaks)
if (sky_peaks_x.size != 0):
sky_peak = sky_peaks_x[0]
sky_peak_index = find_nearest(sky_peak, x_range)
else:
sky_peak = 6000
sky_peak_index = 0
sky_peak_loc = x_range[sky_peak_index]
sky_peak_range = [sky_peak-100, sky_peak+100]
sky_peak_range_loc = [find_nearest(x_range, x) for x in sky_peak_range]
sky_rng_x = x_range[sky_peak_range_loc[0]:sky_peak_range_loc[1]]
sky_rng_y = sn_data[sky_peak_range_loc[0]:sky_peak_range_loc[1]]
sky_gauss_params = Parameters()
sky_gauss_params.add('c', value=0)
sky_gauss_params.add('i1', value=np.max(sky_rng_y), min=0.0)
sky_gauss_params.add('mu', value=sky_peak_loc)
sky_gauss_params.add('sigma1', value=3)
sky_gauss_model = Model(sn_gauss)
sky_gauss_rslt = sky_gauss_model.fit(sky_rng_y, x=sky_rng_x,
params=sky_gauss_params)
sky_gauss_best = sky_gauss_rslt.best_values
sky_sigma = sky_gauss_best['sigma1']
return {'inverse_sky': in_wt, 'sky_regions': sky_regns, 'sky_sigma': sky_sigma}
def f_doublet(x, c, i1, i2, sigma_gal, z, sigma_inst):
""" function for Gaussian doublet """
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths
l1 = dblt_mu[0] * (1+z)
l2 = dblt_mu[1] * (1+z)
sigma = np.sqrt(sigma_gal**2 + sigma_inst**2)
norm = (sigma*np.sqrt(2*np.pi))
term1 = ( i1 / norm ) * np.exp(-(x-l1)**2/(2*sigma**2))
term2 = ( i2 / norm ) * np.exp(-(x-l2)**2/(2*sigma**2))
return (c*x + term1 + term2)
def otwo_doublet_fitting(file_name, sky_file_name):
sa_data = spectra_analysis(file_name, sky_file_name)
y_shifted = sa_data['gd_shifted']
orr = wavelength_solution(file_name)
sn_data = sky_noise_weighting(file_name, sky_file_name)
redshift = sa_data['redshift']
# obtaining the OII range and region
# lower and upper bound on wavelength range
lower_lambda = (1+redshift)*3600
upper_lambda = (1+redshift)*3750
otr = [lower_lambda, upper_lambda]
print(otr)
orr_x = np.linspace(orr['begin'], orr['end'], orr['steps'])
dt_region = [find_nearest(orr_x, x) for x in otr]
otwo_region = y_shifted[dt_region[0]:dt_region[1]]
print(orr_x)
ot_x = orr_x[dt_region[0]:dt_region[1]]
otwo_max_loc = np.argmax(otwo_region)
otwo_max_val = np.max(otwo_region)
# standard deviation of a range before the peak
stdr_b = 50
stdr_e = otwo_max_loc - 50
stddev_lim = [stdr_b, stdr_e]
stddev_x = ot_x[stddev_lim[0]:stddev_lim[1]]
stddev_region = otwo_region[stddev_lim[0]:stddev_lim[1]]
stddev_val = np.std(stddev_region)
# fitting a gaussian doublet model to the data
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths
dblt_val = ot_x[otwo_max_loc]
dblt_rng = [dblt_val-20, dblt_val+20]
dblt_rng = [find_nearest(orr_x, x) for x in dblt_rng]
dblt_rng_vals = orr_x[dblt_rng[0]:dblt_rng[1]]
dblt_rgn = y_shifted[dblt_rng[0]:dblt_rng[1]]
rdst = sa_data['redshift']
sky_weight = sn_data['inverse_sky']
sky_weight = sky_weight[dt_region[0]:dt_region[1]]
# the parameters we need are (c, i1, i2, sigma1, z)
p0 = [0, otwo_max_val, 1.3, 3, rdst]
c, i_val1, r, sigma_gal, z = p0
sigma_sky = sn_data['sky_sigma']
gss_pars = Parameters()
gss_pars.add('c', value=c)
gss_pars.add('i1', value=i_val1, min=0.0)
gss_pars.add('r', value=r, min=0.5, max=1.5)
gss_pars.add('i2', expr='i1/r', min=0.0)
gss_pars.add('sigma_gal', value=sigma_gal)
gss_pars.add('z', value=z)
gss_pars.add('sigma_inst', value=sigma_sky, vary=False)
gss_model = Model(f_doublet)
gss_result = gss_model.fit(otwo_region, x=ot_x, params=gss_pars,
weights=sky_weight)
opti_pms = gss_result.best_values
init_pms = gss_result.init_values
# working out signal to noise now
sn_line_parms = Parameters()
sn_line_parms.add('c', value=c)
sn_line_model = Model(sn_line)
sn_line_rslt = sn_line_model.fit(otwo_region, x=ot_x, params=sn_line_parms)
sn_line_bpms = sn_line_rslt.best_values
sn_line_data = sn_line_rslt.best_fit
sn_gauss_parms = Parameters()
sn_gauss_parms.add('c', value=c)
sn_gauss_parms.add('i1', value=i_val1, min=0.0)
sn_gauss_parms.add('mu', value=dblt_val)
sn_gauss_parms.add('sigma1', value=sigma_gal)
sn_gauss_model = Model(sn_gauss)
sn_gauss_rslt = sn_gauss_model.fit(otwo_region, x=ot_x, params=sn_gauss_parms)
sn_gauss_bpms = sn_gauss_rslt.best_values
sn_gauss_data = sn_gauss_rslt.best_fit
sn_line_csqs = chisq(sn_line_data, otwo_region, stddev_val)
sn_gauss_csqs = chisq(sn_gauss_data, otwo_region, stddev_val)
signal_noise = np.sqrt(sn_line_csqs['chisq'] - sn_gauss_csqs['chisq'])
# saving data to text files
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
file_writer.analysis_complete(data_dir, stk_f_n, gss_result, init_pms, opti_pms,
sn_line_csqs, sn_gauss_csqs, signal_noise, sn_line_bpms, sn_line_data,
sn_gauss_bpms, sn_gauss_data)
return {'range': otr, 'x_region': ot_x,'y_region': otwo_region, 'doublet_range':
dblt_rng_vals, 'std_x': stddev_x, 'std_y': stddev_region, 'lm_best_fit':
gss_result.best_fit, 'lm_best_param': gss_result.best_values,
'lm_init_fit': gss_result.init_fit, 'sn_line': sn_line_rslt.best_fit,
'sn_gauss': sn_gauss_rslt.best_fit}
def analysis(file_name, sky_file_name):
""" Graphs and results from analysing the cube for OII spectra """
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams['text.latex.preamble'] = [r'\boldmath']
curr_file_name = file_name.split('.')
curr_file_name = curr_file_name[0].split('/')
stk_f_n = curr_file_name[-1]
data_dir = 'cube_results/' + stk_f_n
if not os.path.exists(data_dir):
os.mkdir(data_dir)
spectra_stacker(file_name)
# one figure to rule them all
main_fig = plt.figure(1)
# calling data once will be enough
im_coll_data = image_collapser(file_name)
spectra_data = spectrum_creator(file_name)
sr = wavelength_solution(file_name)
gs_data = spectra_analysis(file_name, sky_file_name)
def graph_indiv():
cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
cbs_y = gs_data['gd_shifted']
# plotting spectra to check
fig, ax3 = plt.subplots()
ax3.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
ax3.tick_params(labelsize=20)
ax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
ax3.set_ylabel(r'\textbf{Flux}', fontsize=20)
fig.savefig(data_dir + "/" + stk_f_n + '_single_spectra.pdf',
bbox_inches="tight")
# --- for collapsed images ---
def graphs_collapsed():
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(im_coll_data['median'], cmap='gray_r')
ax1.set_title(r'\textbf{galaxy: median}', fontsize=13)
ax1.set_xlabel(r'\textbf{Pixels}', fontsize=13)
ax1.set_ylabel(r'\textbf{Pixels}', fontsize=13)
ax2.imshow(im_coll_data['sum'], cmap='gray_r')
ax2.set_title(r'\textbf{galaxy: sum}', fontsize=13)
ax2.set_xlabel(r'\textbf{Pixels}', fontsize=13)
ax2.set_ylabel(r'\textbf{Pixels}', fontsize=13)
f.subplots_adjust(wspace=0.4)
f.savefig(data_dir + "/" + stk_f_n + '_collapsed_images.pdf')
snw_data = sky_noise_weighting(file_name, sky_file_name)
df_data = otwo_doublet_fitting(file_name, sky_file_name)
# --- spectra ---
def graphs_spectra():
f, (ax1, ax2) = plt.subplots(2, 1)
# --- redshifted data plotting
cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
## plotting our cube data
cbs_y = gs_data['gd_shifted']
ax1.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
## plotting our sky noise data
snd_y = snw_data['sky_regions'][:,1]
ax1.plot(cbd_x, snd_y, linewidth=0.5, color="#f44336", alpha=0.5)
# plotting spectra to check
fig, ax3 = plt.subplots()
ax3.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")
ax3.tick_params(labelsize=20)
ax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
ax3.set_ylabel(r'\textbf{Flux}', fontsize=20)
fig.savefig(data_dir + "/" + stk_f_n + '_single_spectra.pdf',
bbox_inches="tight")
## plotting our [OII] region
ot_x = df_data['x_region']
ot_y = df_data['y_region']
ax1.plot(ot_x, ot_y, linewidth=0.5, color="#00c853")
## plotting the standard deviation region in the [OII] section
std_x = df_data['std_x']
std_y = df_data['std_y']
ax1.plot(std_x, std_y, linewidth=0.5, color="#00acc1")
## plotting peak lines for scipy finder and peakutils finder
#pk_lines = gs_data['gd_peaks']
#for i in range(len(pk_lines)):
#srb = sr['begin']
#ax1.axvline(x=srb+pk_lines[i], linewidth=0.5, color="#8bc34a", alpha=0.2)
pu_lines = gs_data['pu_peaks']
for i in range(len(pu_lines)):
srb = sr['begin']
ax1.axvline(x=(pu_lines[i]), linewidth=0.5, color="#ec407a", alpha=0.2)
ax1.set_title(r'\textbf{spectra: cross-section redshifted}', fontsize=13)
ax1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
ax1.set_ylabel(r'\textbf{Flux}', fontsize=13)
ax1.set_ylim([-1000,5000]) # setting manual limits for now
# --- corrected redshift
crs_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
rdst = gs_data['redshift']
sp_lines = gs_data['spectra']
## corrected wavelengths
corr_x = crs_x / (1+rdst)
## plotting our cube data
cps_y = gs_data['gd_shifted']
ax2.plot(corr_x, cps_y, linewidth=0.5, color="#000000")
## plotting our sky noise data
sn_y = gs_data['sky_noise']
ax2.plot(corr_x, sn_y, linewidth=0.5, color="#e53935")
## plotting spectra lines
for e_key, e_val in sp_lines['emis'].items():
spec_line = float(e_val)
ax2.axvline(x=spec_line, linewidth=0.5, color="#00c853")
ax2.text(spec_line-10, 4800, e_key, rotation=-90)
ax2.set_title(r'\textbf{spectra: cross-section corrected}', fontsize=13)
ax2.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
ax2.set_ylabel(r'\textbf{Flux}', fontsize=13)
ax2.set_ylim([-500,5000]) # setting manual limits for now
f.subplots_adjust(hspace=0.5)
f.savefig(data_dir + "/" + stk_f_n + '_spectra.pdf')
# saving our plotting into npy files so they can be used elsewhere
np.save(data_dir + "/" + stk_f_n + "_cbd_x", cbd_x)
np.save(data_dir + "/" + stk_f_n + "_cbs_y", cbs_y)
np.save(data_dir + "/" + stk_f_n + "_snd_y", snd_y)
np.save(data_dir + "/" + stk_f_n + "_corr_x", corr_x)
np.save(data_dir + "/" + stk_f_n + "_cps_y", cps_y)
def graphs_otwo_region():
ot_fig = plt.figure(6)
# plotting the data for the cutout [OII] region
ot_x = df_data['x_region']
ot_y = df_data['y_region']
plt.plot(ot_x, ot_y, linewidth=1.5, color="#000000")
## plotting the standard deviation region in the [OII] section
std_x = df_data['std_x']
std_y = df_data['std_y']
#plt.plot(std_x, std_y, linewidth=1.5, color="#00acc1")
dblt_rng = df_data['doublet_range']
ot_x_b, ot_x_e = dblt_rng[0], dblt_rng[-1]
x_ax_vals = np.linspace(ot_x_b, ot_x_e, 1000)
# lmfit
lm_init = df_data['lm_init_fit']
lm_best = df_data['lm_best_fit']
plt.plot(ot_x, lm_best, linewidth=1.5, color="#1e88e5",
label=r"\textbf{Best fit}")
plt.plot(ot_x, lm_init, linewidth=1.5, color="#43a047", alpha=0.5,
label=r"\textbf{Initial guess}")
lm_params = df_data['lm_best_param']
lm_params = [prm_value for prm_key, prm_value in lm_params.items()]
c, i_val1, i_val2, sig_g, rdsh, sig_i = lm_params
dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths for OII
l1 = dblt_mu[0] * (1+rdsh)
l2 = dblt_mu[1] * (1+rdsh)
sig = np.sqrt(sig_g**2 + sig_i**2)
norm = (sig*np.sqrt(2*np.pi))
lm_y1 = c + ( i_val1 / norm ) * np.exp(-(ot_x-l1)**2/(2*sig**2))
lm_y2 = c + ( i_val2 / norm ) * np.exp(-(ot_x-l2)**2/(2*sig**2))
plt.plot(ot_x, lm_y1, linewidth=1.5, color="#e64a19", alpha=0.7,
label=r"\textbf{Gaussian 1}")
plt.plot(ot_x, lm_y2, linewidth=1.5, color="#1a237e", alpha=0.7,
label=r"\textbf{Gaussian 2}")
# plotting signal-to-noise straight line and gaussian to verify it works
sn_line = df_data['sn_line']
sn_gauss = df_data['sn_gauss']
#plt.axhline(y=sn_line, linewidth=0.5, color="#5c6bc0", alpha=0.7)
#plt.plot(ot_x, sn_gauss, linewidth=0.5, color="#5c6bc0", alpha=0.7)
#plt.title(r'\textbf{[OII] region}', fontsize=13)
plt.legend(loc='upper left', prop={'size': 15})
plt.tick_params(labelsize=20)
plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
plt.ylabel(r'\textbf{Flux}', fontsize=20)
plt.xlim([l1-100,np.max(ot_x)])
plt.ylim([-100,np.max(ot_y)+100]) # setting manual limits for now
plt.savefig(data_dir + "/" + stk_f_n + '_otwo_region.pdf',bbox_inches="tight")
graph_indiv()
graphs_collapsed()
graphs_spectra()
graphs_otwo_region()
plt.close("all")
return {'image_data': im_coll_data, 'spectra_data': spectra_data, 'sr': sr,
'df_data': df_data, 'gs_data': gs_data, 'snw_data': snw_data}
if __name__ == '__main__':
analysis("/Volumes/Jacky_Cao/University/level4/project/cubes_better/" +
"cube_1068.fits", "data/skyvariance_csub.fits")
| """ collapses image data so it can be passed as a heatmap """
file_data = read_file(file_name)
header_data = file_data[0]
image_data = file_data[1]
data_shape = np.shape(image_data)
ra_axis = data_shape[2]
dec_axis = data_shape[1]
wl_axis = data_shape[0]
image_median = np.zeros((ra_axis, dec_axis))
image_sum = np.zeros((ra_axis, dec_axis))
for i_ra in range(ra_axis):
for i_dec in range(dec_axis):
pixel_data = image_data[:][:,i_dec][:,i_ra]
pd_median = np.nanmedian(pixel_data)
pd_sum = np.nansum(pixel_data)
image_median[i_ra][i_dec] = pd_median
image_sum[i_ra][i_dec] = pd_sum
return {'median': image_median, 'sum': image_sum} | identifier_body |
route_import.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package data_loader
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"path"
"reflect"
"regexp"
"strings"
"github.com/getkin/kin-openapi/openapi3"
"github.com/gin-gonic/gin"
"github.com/shiningrush/droplet"
"github.com/shiningrush/droplet/data"
"github.com/shiningrush/droplet/wrapper"
wgin "github.com/shiningrush/droplet/wrapper/gin"
"github.com/apisix/manager-api/internal/conf"
"github.com/apisix/manager-api/internal/core/entity"
"github.com/apisix/manager-api/internal/core/store"
"github.com/apisix/manager-api/internal/handler"
"github.com/apisix/manager-api/internal/log"
"github.com/apisix/manager-api/internal/utils"
"github.com/apisix/manager-api/internal/utils/consts"
)
type ImportHandler struct {
routeStore *store.GenericStore
svcStore store.Interface
upstreamStore store.Interface
}
func NewImportHandler() (handler.RouteRegister, error) {
return &ImportHandler{
routeStore: store.GetStore(store.HubKeyRoute),
svcStore: store.GetStore(store.HubKeyService),
upstreamStore: store.GetStore(store.HubKeyUpstream),
}, nil
}
var regPathVar = regexp.MustCompile(`{[\w.]*}`)
var regPathRepeat = regexp.MustCompile(`-APISIX-REPEAT-URI-[\d]*`)
func (h *ImportHandler) ApplyRoute(r *gin.Engine) {
r.POST("/apisix/admin/import/routes", wgin.Wraps(h.Import,
wrapper.InputType(reflect.TypeOf(ImportInput{}))))
}
type ImportInput struct {
Force bool `auto_read:"force,query"`
FileName string `auto_read:"_file"`
FileContent []byte `auto_read:"file"`
}
func (h *ImportHandler) Import(c droplet.Context) (interface{}, error) {
input := c.Input().(*ImportInput)
Force := input.Force
// file check
suffix := path.Ext(input.FileName)
if suffix != ".json" && suffix != ".yaml" && suffix != ".yml" {
return nil, fmt.Errorf("required file type is .yaml, .yml or .json but got: %s", suffix)
}
contentLen := bytes.Count(input.FileContent, nil) - 1
if contentLen > conf.ImportSizeLimit {
log.Warnf("upload file size exceeds limit: %d", contentLen)
return nil, fmt.Errorf("the file size exceeds the limit; limit %d", conf.ImportSizeLimit)
}
swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromData(input.FileContent)
if err != nil {
return nil, err
}
if len(swagger.Paths) < 1 {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
consts.ErrImportFile
}
routes, err := OpenAPI3ToRoute(swagger)
if err != nil {
return nil, err
}
// check route
for _, route := range routes {
err := checkRouteExist(c.Context(), h.routeStore, route)
if err != nil && !Force {
log.Warnf("import duplicate: %s, route: %#v", err, route)
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf("route(uris:%v) conflict, %s", route.Uris, err)
}
if route.ServiceID != nil {
_, err := h.svcStore.Get(c.Context(), utils.InterfaceToString(route.ServiceID))
if err != nil {
if err == data.ErrNotFound {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf(consts.IDNotFound, "service", route.ServiceID)
}
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest}, err
}
}
if route.UpstreamID != nil {
_, err := h.upstreamStore.Get(c.Context(), utils.InterfaceToString(route.UpstreamID))
if err != nil {
if err == data.ErrNotFound {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf(consts.IDNotFound, "upstream", route.UpstreamID)
}
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest}, err
}
}
if _, err := h.routeStore.CreateCheck(route); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("create route(uris:%v) failed: %s", route.Uris, err)
}
}
// create route
for _, route := range routes {
if Force && route.ID != nil {
if _, err := h.routeStore.Update(c.Context(), route, true); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("update route(uris:%v) failed: %s", route.Uris, err)
}
} else {
if _, err := h.routeStore.Create(c.Context(), route); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("create route(uris:%v) failed: %s", route.Uris, err)
}
}
}
return map[string]int{
"paths": len(swagger.Paths),
"routes": len(routes),
}, nil
}
func checkRouteExist(ctx context.Context, routeStore *store.GenericStore, route *entity.Route) error {
//routeStore := store.GetStore(store.HubKeyRoute)
ret, err := routeStore.List(ctx, store.ListInput{
Predicate: func(obj interface{}) bool {
id := utils.InterfaceToString(route.ID)
item := obj.(*entity.Route)
if id != "" && id != utils.InterfaceToString(item.ID) {
return false
}
if !(item.Host == route.Host && item.URI == route.URI && utils.StringSliceEqual(item.Uris, route.Uris) &&
utils.StringSliceEqual(item.RemoteAddrs, route.RemoteAddrs) && item.RemoteAddr == route.RemoteAddr &&
utils.StringSliceEqual(item.Hosts, route.Hosts) && item.Priority == route.Priority &&
utils.ValueEqual(item.Vars, route.Vars) && item.FilterFunc == route.FilterFunc) {
return false
}
return true
},
PageSize: 0,
PageNumber: 0,
})
if err != nil {
return err
}
if len(ret.Rows) > 0 {
return consts.InvalidParam("route is duplicate")
}
return nil
}
func parseExtension(val *openapi3.Operation) (*entity.Route, error) {
routeMap := map[string]interface{}{}
for key, val := range val.Extensions {
if strings.HasPrefix(key, "x-apisix-") {
routeMap[strings.TrimPrefix(key, "x-apisix-")] = val
}
}
route := new(entity.Route)
routeJson, err := json.Marshal(routeMap)
if err != nil {
return nil, err
}
err = json.Unmarshal(routeJson, &route)
if err != nil {
return nil, err
}
return route, nil
}
type PathValue struct {
Method string
Value *openapi3.Operation
}
func mergePathValue(key string, values []PathValue, swagger *openapi3.Swagger) (map[string]*entity.Route, error) |
func OpenAPI3ToRoute(swagger *openapi3.Swagger) ([]*entity.Route, error) {
var routes []*entity.Route
paths := swagger.Paths
var upstream *entity.UpstreamDef
var err error
for k, v := range paths {
k = regPathRepeat.ReplaceAllString(k, "")
upstream = &entity.UpstreamDef{}
if up, ok := v.Extensions["x-apisix-upstream"]; ok {
err = json.Unmarshal(up.(json.RawMessage), upstream)
if err != nil {
return nil, err
}
}
var values []PathValue
if v.Get != nil {
value := PathValue{
Method: http.MethodGet,
Value: v.Get,
}
values = append(values, value)
}
if v.Post != nil {
value := PathValue{
Method: http.MethodPost,
Value: v.Post,
}
values = append(values, value)
}
if v.Head != nil {
value := PathValue{
Method: http.MethodHead,
Value: v.Head,
}
values = append(values, value)
}
if v.Put != nil {
value := PathValue{
Method: http.MethodPut,
Value: v.Put,
}
values = append(values, value)
}
if v.Patch != nil {
value := PathValue{
Method: http.MethodPatch,
Value: v.Patch,
}
values = append(values, value)
}
if v.Delete != nil {
value := PathValue{
Method: http.MethodDelete,
Value: v.Delete,
}
values = append(values, value)
}
// merge same route
tmp, err := mergePathValue(k, values, swagger)
if err != nil {
return nil, err
}
for _, route := range tmp {
routes = append(routes, route)
}
}
return routes, nil
}
func parseParameters(parameters openapi3.Parameters, plugins map[string]interface{}) {
props := make(map[string]interface{})
var required []string
for _, v := range parameters {
if v.Value.Schema != nil {
v.Value.Schema.Value.Format = ""
v.Value.Schema.Value.XML = nil
}
switch v.Value.In {
case "header":
if v.Value.Schema != nil && v.Value.Schema.Value != nil {
props[v.Value.Name] = v.Value.Schema.Value
}
if v.Value.Required {
required = append(required, v.Value.Name)
}
}
}
requestValidation := make(map[string]interface{})
if rv, ok := plugins["request-validation"]; ok {
requestValidation = rv.(map[string]interface{})
}
requestValidation["header_schema"] = &entity.RequestValidation{
Type: "object",
Required: required,
Properties: props,
}
plugins["request-validation"] = requestValidation
}
func parseRequestBody(requestBody *openapi3.RequestBodyRef, swagger *openapi3.Swagger, plugins map[string]interface{}) {
schema := requestBody.Value.Content
requestValidation := make(map[string]interface{})
if rv, ok := plugins["request-validation"]; ok {
requestValidation = rv.(map[string]interface{})
}
for _, v := range schema {
if v.Schema.Ref != "" {
s := getParameters(v.Schema.Ref, &swagger.Components).Value
requestValidation["body_schema"] = &entity.RequestValidation{
Type: s.Type,
Required: s.Required,
Properties: s.Properties,
}
plugins["request-validation"] = requestValidation
} else if v.Schema.Value != nil {
if v.Schema.Value.Properties != nil {
for k1, v1 := range v.Schema.Value.Properties {
if v1.Ref != "" {
s := getParameters(v1.Ref, &swagger.Components)
v.Schema.Value.Properties[k1] = s
}
v1.Value.Format = ""
}
requestValidation["body_schema"] = &entity.RequestValidation{
Type: v.Schema.Value.Type,
Required: v.Schema.Value.Required,
Properties: v.Schema.Value.Properties,
}
plugins["request-validation"] = requestValidation
} else if v.Schema.Value.Items != nil {
if v.Schema.Value.Items.Ref != "" {
s := getParameters(v.Schema.Value.Items.Ref, &swagger.Components).Value
requestValidation["body_schema"] = &entity.RequestValidation{
Type: s.Type,
Required: s.Required,
Properties: s.Properties,
}
plugins["request-validation"] = requestValidation
}
} else {
requestValidation["body_schema"] = &entity.RequestValidation{
Type: "object",
Required: []string{},
Properties: v.Schema.Value.Properties,
}
}
}
plugins["request-validation"] = requestValidation
}
}
func parseSecurity(security openapi3.SecurityRequirements, securitySchemes openapi3.SecuritySchemes, plugins map[string]interface{}) {
// todo: import consumers
for _, securities := range security {
for name := range securities {
if schema, ok := securitySchemes[name]; ok {
value := schema.Value
if value == nil {
continue
}
// basic auth
if value.Type == "http" && value.Scheme == "basic" {
plugins["basic-auth"] = map[string]interface{}{}
//username, ok := value.Extensions["username"]
//if !ok {
// continue
//}
//password, ok := value.Extensions["password"]
//if !ok {
// continue
//}
//plugins["basic-auth"] = map[string]interface{}{
// "username": username,
// "password": password,
//}
// jwt auth
} else if value.Type == "http" && value.Scheme == "bearer" && value.BearerFormat == "JWT" {
plugins["jwt-auth"] = map[string]interface{}{}
//key, ok := value.Extensions["key"]
//if !ok {
// continue
//}
//secret, ok := value.Extensions["secret"]
//if !ok {
// continue
//}
//plugins["jwt-auth"] = map[string]interface{}{
// "key": key,
// "secret": secret,
//}
// key auth
} else if value.Type == "apiKey" {
plugins["key-auth"] = map[string]interface{}{}
//key, ok := value.Extensions["key"]
//if !ok {
// continue
//}
//plugins["key-auth"] = map[string]interface{}{
// "key": key,
//}
}
}
}
}
}
func getRouteFromPaths(method, key string, value *openapi3.Operation, swagger *openapi3.Swagger) (*entity.Route, error) {
// transform /path/{var} to /path/*
foundStr := regPathVar.FindString(key)
if foundStr != "" {
key = strings.Split(key, foundStr)[0] + "*"
}
route, err := parseExtension(value)
if err != nil {
return nil, err
}
route.Uris = []string{key}
route.Name = value.OperationID
route.Desc = value.Summary
route.Methods = []string{method}
if route.Plugins == nil {
route.Plugins = make(map[string]interface{})
}
if value.Parameters != nil {
parseParameters(value.Parameters, route.Plugins)
}
if value.RequestBody != nil {
parseRequestBody(value.RequestBody, swagger, route.Plugins)
}
if value.Security != nil && swagger.Components.SecuritySchemes != nil {
parseSecurity(*value.Security, swagger.Components.SecuritySchemes, route.Plugins)
}
return route, nil
}
func getParameters(ref string, components *openapi3.Components) *openapi3.SchemaRef {
schemaRef := &openapi3.SchemaRef{}
arr := strings.Split(ref, "/")
if arr[0] == "#" && arr[1] == "components" && arr[2] == "schemas" {
schemaRef = components.Schemas[arr[3]]
schemaRef.Value.XML = nil
// traverse properties to find another ref
for k, v := range schemaRef.Value.Properties {
if v.Value != nil {
v.Value.XML = nil
v.Value.Format = ""
}
if v.Ref != "" {
schemaRef.Value.Properties[k] = getParameters(v.Ref, components)
} else if v.Value.Items != nil && v.Value.Items.Ref != "" {
v.Value.Items = getParameters(v.Value.Items.Ref, components)
} else if v.Value.Items != nil && v.Value.Items.Value != nil {
v.Value.Items.Value.XML = nil
v.Value.Items.Value.Format = ""
}
}
}
return schemaRef
}
| {
var parsed []PathValue
var routes = map[string]*entity.Route{}
for _, value := range values {
value.Value.OperationID = strings.Replace(value.Value.OperationID, value.Method, "", 1)
var eq = false
for _, v := range parsed {
if utils.ValueEqual(v.Value, value.Value) {
eq = true
if routes[v.Method].Methods == nil {
routes[v.Method].Methods = []string{}
}
routes[v.Method].Methods = append(routes[v.Method].Methods, value.Method)
}
}
// not equal to the previous ones
if !eq {
route, err := getRouteFromPaths(value.Method, key, value.Value, swagger)
if err != nil {
return nil, err
}
routes[value.Method] = route
parsed = append(parsed, value)
}
}
return routes, nil
} | identifier_body |
route_import.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package data_loader
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"path"
"reflect"
"regexp"
"strings"
"github.com/getkin/kin-openapi/openapi3"
"github.com/gin-gonic/gin"
"github.com/shiningrush/droplet"
"github.com/shiningrush/droplet/data"
"github.com/shiningrush/droplet/wrapper"
wgin "github.com/shiningrush/droplet/wrapper/gin"
"github.com/apisix/manager-api/internal/conf"
"github.com/apisix/manager-api/internal/core/entity"
"github.com/apisix/manager-api/internal/core/store"
"github.com/apisix/manager-api/internal/handler"
"github.com/apisix/manager-api/internal/log"
"github.com/apisix/manager-api/internal/utils"
"github.com/apisix/manager-api/internal/utils/consts"
)
type ImportHandler struct {
routeStore *store.GenericStore
svcStore store.Interface
upstreamStore store.Interface
}
func NewImportHandler() (handler.RouteRegister, error) {
return &ImportHandler{
routeStore: store.GetStore(store.HubKeyRoute),
svcStore: store.GetStore(store.HubKeyService),
upstreamStore: store.GetStore(store.HubKeyUpstream),
}, nil
}
var regPathVar = regexp.MustCompile(`{[\w.]*}`)
var regPathRepeat = regexp.MustCompile(`-APISIX-REPEAT-URI-[\d]*`)
func (h *ImportHandler) ApplyRoute(r *gin.Engine) {
r.POST("/apisix/admin/import/routes", wgin.Wraps(h.Import,
wrapper.InputType(reflect.TypeOf(ImportInput{}))))
}
type ImportInput struct {
Force bool `auto_read:"force,query"`
FileName string `auto_read:"_file"`
FileContent []byte `auto_read:"file"`
}
func (h *ImportHandler) Import(c droplet.Context) (interface{}, error) {
input := c.Input().(*ImportInput)
Force := input.Force
// file check
suffix := path.Ext(input.FileName)
if suffix != ".json" && suffix != ".yaml" && suffix != ".yml" {
return nil, fmt.Errorf("required file type is .yaml, .yml or .json but got: %s", suffix)
}
contentLen := bytes.Count(input.FileContent, nil) - 1
if contentLen > conf.ImportSizeLimit {
log.Warnf("upload file size exceeds limit: %d", contentLen)
return nil, fmt.Errorf("the file size exceeds the limit; limit %d", conf.ImportSizeLimit)
}
swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromData(input.FileContent)
if err != nil {
return nil, err
}
if len(swagger.Paths) < 1 {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
consts.ErrImportFile
}
routes, err := OpenAPI3ToRoute(swagger)
if err != nil {
return nil, err
}
// check route
for _, route := range routes {
err := checkRouteExist(c.Context(), h.routeStore, route)
if err != nil && !Force {
log.Warnf("import duplicate: %s, route: %#v", err, route)
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf("route(uris:%v) conflict, %s", route.Uris, err)
}
if route.ServiceID != nil {
_, err := h.svcStore.Get(c.Context(), utils.InterfaceToString(route.ServiceID))
if err != nil {
if err == data.ErrNotFound {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf(consts.IDNotFound, "service", route.ServiceID)
}
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest}, err
}
}
if route.UpstreamID != nil {
_, err := h.upstreamStore.Get(c.Context(), utils.InterfaceToString(route.UpstreamID))
if err != nil {
if err == data.ErrNotFound {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf(consts.IDNotFound, "upstream", route.UpstreamID)
}
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest}, err
}
}
if _, err := h.routeStore.CreateCheck(route); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("create route(uris:%v) failed: %s", route.Uris, err)
}
}
// create route
for _, route := range routes {
if Force && route.ID != nil {
if _, err := h.routeStore.Update(c.Context(), route, true); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("update route(uris:%v) failed: %s", route.Uris, err)
}
} else {
if _, err := h.routeStore.Create(c.Context(), route); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("create route(uris:%v) failed: %s", route.Uris, err)
}
}
}
return map[string]int{
"paths": len(swagger.Paths),
"routes": len(routes),
}, nil
}
func checkRouteExist(ctx context.Context, routeStore *store.GenericStore, route *entity.Route) error {
//routeStore := store.GetStore(store.HubKeyRoute)
ret, err := routeStore.List(ctx, store.ListInput{
Predicate: func(obj interface{}) bool {
id := utils.InterfaceToString(route.ID)
item := obj.(*entity.Route)
if id != "" && id != utils.InterfaceToString(item.ID) {
return false
}
if !(item.Host == route.Host && item.URI == route.URI && utils.StringSliceEqual(item.Uris, route.Uris) &&
utils.StringSliceEqual(item.RemoteAddrs, route.RemoteAddrs) && item.RemoteAddr == route.RemoteAddr &&
utils.StringSliceEqual(item.Hosts, route.Hosts) && item.Priority == route.Priority &&
utils.ValueEqual(item.Vars, route.Vars) && item.FilterFunc == route.FilterFunc) {
return false
}
return true
},
PageSize: 0,
PageNumber: 0,
})
if err != nil {
return err
}
if len(ret.Rows) > 0 {
return consts.InvalidParam("route is duplicate")
}
return nil
}
func parseExtension(val *openapi3.Operation) (*entity.Route, error) {
routeMap := map[string]interface{}{}
for key, val := range val.Extensions {
if strings.HasPrefix(key, "x-apisix-") {
routeMap[strings.TrimPrefix(key, "x-apisix-")] = val
}
}
route := new(entity.Route)
routeJson, err := json.Marshal(routeMap)
if err != nil {
return nil, err
}
err = json.Unmarshal(routeJson, &route)
if err != nil {
return nil, err
}
return route, nil
}
type PathValue struct {
Method string
Value *openapi3.Operation
}
func mergePathValue(key string, values []PathValue, swagger *openapi3.Swagger) (map[string]*entity.Route, error) {
var parsed []PathValue
var routes = map[string]*entity.Route{}
for _, value := range values {
value.Value.OperationID = strings.Replace(value.Value.OperationID, value.Method, "", 1)
var eq = false
for _, v := range parsed {
if utils.ValueEqual(v.Value, value.Value) {
eq = true
if routes[v.Method].Methods == nil {
routes[v.Method].Methods = []string{}
}
routes[v.Method].Methods = append(routes[v.Method].Methods, value.Method)
}
}
// not equal to the previous ones
if !eq {
route, err := getRouteFromPaths(value.Method, key, value.Value, swagger)
if err != nil {
return nil, err
}
routes[value.Method] = route
parsed = append(parsed, value)
}
}
return routes, nil
}
func OpenAPI3ToRoute(swagger *openapi3.Swagger) ([]*entity.Route, error) {
var routes []*entity.Route
paths := swagger.Paths
var upstream *entity.UpstreamDef
var err error
for k, v := range paths {
k = regPathRepeat.ReplaceAllString(k, "")
upstream = &entity.UpstreamDef{}
if up, ok := v.Extensions["x-apisix-upstream"]; ok {
err = json.Unmarshal(up.(json.RawMessage), upstream)
if err != nil {
return nil, err
}
}
var values []PathValue
if v.Get != nil {
value := PathValue{
Method: http.MethodGet,
Value: v.Get,
}
values = append(values, value)
}
if v.Post != nil {
value := PathValue{
Method: http.MethodPost,
Value: v.Post,
}
values = append(values, value)
}
if v.Head != nil {
value := PathValue{
Method: http.MethodHead,
Value: v.Head,
}
values = append(values, value)
}
if v.Put != nil {
value := PathValue{
Method: http.MethodPut,
Value: v.Put,
}
values = append(values, value)
}
if v.Patch != nil {
value := PathValue{
Method: http.MethodPatch,
Value: v.Patch,
}
values = append(values, value)
}
if v.Delete != nil {
value := PathValue{
Method: http.MethodDelete,
Value: v.Delete,
}
values = append(values, value)
}
// merge same route
tmp, err := mergePathValue(k, values, swagger)
if err != nil {
return nil, err
}
for _, route := range tmp {
routes = append(routes, route)
}
}
return routes, nil
}
func parseParameters(parameters openapi3.Parameters, plugins map[string]interface{}) {
props := make(map[string]interface{})
var required []string
for _, v := range parameters {
if v.Value.Schema != nil {
v.Value.Schema.Value.Format = ""
v.Value.Schema.Value.XML = nil
}
switch v.Value.In {
case "header":
if v.Value.Schema != nil && v.Value.Schema.Value != nil {
props[v.Value.Name] = v.Value.Schema.Value
}
if v.Value.Required {
required = append(required, v.Value.Name)
}
}
}
requestValidation := make(map[string]interface{})
if rv, ok := plugins["request-validation"]; ok {
requestValidation = rv.(map[string]interface{})
}
requestValidation["header_schema"] = &entity.RequestValidation{
Type: "object",
Required: required,
Properties: props,
}
plugins["request-validation"] = requestValidation
}
func parseRequestBody(requestBody *openapi3.RequestBodyRef, swagger *openapi3.Swagger, plugins map[string]interface{}) {
schema := requestBody.Value.Content
requestValidation := make(map[string]interface{})
if rv, ok := plugins["request-validation"]; ok {
requestValidation = rv.(map[string]interface{})
}
for _, v := range schema {
if v.Schema.Ref != "" {
s := getParameters(v.Schema.Ref, &swagger.Components).Value
requestValidation["body_schema"] = &entity.RequestValidation{
Type: s.Type,
Required: s.Required,
Properties: s.Properties,
}
plugins["request-validation"] = requestValidation
} else if v.Schema.Value != nil {
if v.Schema.Value.Properties != nil {
for k1, v1 := range v.Schema.Value.Properties {
if v1.Ref != "" {
s := getParameters(v1.Ref, &swagger.Components)
v.Schema.Value.Properties[k1] = s
}
v1.Value.Format = ""
}
requestValidation["body_schema"] = &entity.RequestValidation{
Type: v.Schema.Value.Type,
Required: v.Schema.Value.Required,
Properties: v.Schema.Value.Properties,
}
plugins["request-validation"] = requestValidation
} else if v.Schema.Value.Items != nil {
if v.Schema.Value.Items.Ref != "" {
s := getParameters(v.Schema.Value.Items.Ref, &swagger.Components).Value
requestValidation["body_schema"] = &entity.RequestValidation{
Type: s.Type,
Required: s.Required,
Properties: s.Properties,
}
plugins["request-validation"] = requestValidation
}
} else {
requestValidation["body_schema"] = &entity.RequestValidation{
Type: "object",
Required: []string{},
Properties: v.Schema.Value.Properties,
}
}
}
plugins["request-validation"] = requestValidation
}
}
func parseSecurity(security openapi3.SecurityRequirements, securitySchemes openapi3.SecuritySchemes, plugins map[string]interface{}) { | if schema, ok := securitySchemes[name]; ok {
value := schema.Value
if value == nil {
continue
}
// basic auth
if value.Type == "http" && value.Scheme == "basic" {
plugins["basic-auth"] = map[string]interface{}{}
//username, ok := value.Extensions["username"]
//if !ok {
// continue
//}
//password, ok := value.Extensions["password"]
//if !ok {
// continue
//}
//plugins["basic-auth"] = map[string]interface{}{
// "username": username,
// "password": password,
//}
// jwt auth
} else if value.Type == "http" && value.Scheme == "bearer" && value.BearerFormat == "JWT" {
plugins["jwt-auth"] = map[string]interface{}{}
//key, ok := value.Extensions["key"]
//if !ok {
// continue
//}
//secret, ok := value.Extensions["secret"]
//if !ok {
// continue
//}
//plugins["jwt-auth"] = map[string]interface{}{
// "key": key,
// "secret": secret,
//}
// key auth
} else if value.Type == "apiKey" {
plugins["key-auth"] = map[string]interface{}{}
//key, ok := value.Extensions["key"]
//if !ok {
// continue
//}
//plugins["key-auth"] = map[string]interface{}{
// "key": key,
//}
}
}
}
}
}
func getRouteFromPaths(method, key string, value *openapi3.Operation, swagger *openapi3.Swagger) (*entity.Route, error) {
// transform /path/{var} to /path/*
foundStr := regPathVar.FindString(key)
if foundStr != "" {
key = strings.Split(key, foundStr)[0] + "*"
}
route, err := parseExtension(value)
if err != nil {
return nil, err
}
route.Uris = []string{key}
route.Name = value.OperationID
route.Desc = value.Summary
route.Methods = []string{method}
if route.Plugins == nil {
route.Plugins = make(map[string]interface{})
}
if value.Parameters != nil {
parseParameters(value.Parameters, route.Plugins)
}
if value.RequestBody != nil {
parseRequestBody(value.RequestBody, swagger, route.Plugins)
}
if value.Security != nil && swagger.Components.SecuritySchemes != nil {
parseSecurity(*value.Security, swagger.Components.SecuritySchemes, route.Plugins)
}
return route, nil
}
func getParameters(ref string, components *openapi3.Components) *openapi3.SchemaRef {
schemaRef := &openapi3.SchemaRef{}
arr := strings.Split(ref, "/")
if arr[0] == "#" && arr[1] == "components" && arr[2] == "schemas" {
schemaRef = components.Schemas[arr[3]]
schemaRef.Value.XML = nil
// traverse properties to find another ref
for k, v := range schemaRef.Value.Properties {
if v.Value != nil {
v.Value.XML = nil
v.Value.Format = ""
}
if v.Ref != "" {
schemaRef.Value.Properties[k] = getParameters(v.Ref, components)
} else if v.Value.Items != nil && v.Value.Items.Ref != "" {
v.Value.Items = getParameters(v.Value.Items.Ref, components)
} else if v.Value.Items != nil && v.Value.Items.Value != nil {
v.Value.Items.Value.XML = nil
v.Value.Items.Value.Format = ""
}
}
}
return schemaRef
} | // todo: import consumers
for _, securities := range security {
for name := range securities { | random_line_split |
route_import.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package data_loader
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"path"
"reflect"
"regexp"
"strings"
"github.com/getkin/kin-openapi/openapi3"
"github.com/gin-gonic/gin"
"github.com/shiningrush/droplet"
"github.com/shiningrush/droplet/data"
"github.com/shiningrush/droplet/wrapper"
wgin "github.com/shiningrush/droplet/wrapper/gin"
"github.com/apisix/manager-api/internal/conf"
"github.com/apisix/manager-api/internal/core/entity"
"github.com/apisix/manager-api/internal/core/store"
"github.com/apisix/manager-api/internal/handler"
"github.com/apisix/manager-api/internal/log"
"github.com/apisix/manager-api/internal/utils"
"github.com/apisix/manager-api/internal/utils/consts"
)
type ImportHandler struct {
routeStore *store.GenericStore
svcStore store.Interface
upstreamStore store.Interface
}
func NewImportHandler() (handler.RouteRegister, error) {
return &ImportHandler{
routeStore: store.GetStore(store.HubKeyRoute),
svcStore: store.GetStore(store.HubKeyService),
upstreamStore: store.GetStore(store.HubKeyUpstream),
}, nil
}
var regPathVar = regexp.MustCompile(`{[\w.]*}`)
var regPathRepeat = regexp.MustCompile(`-APISIX-REPEAT-URI-[\d]*`)
func (h *ImportHandler) ApplyRoute(r *gin.Engine) {
r.POST("/apisix/admin/import/routes", wgin.Wraps(h.Import,
wrapper.InputType(reflect.TypeOf(ImportInput{}))))
}
type ImportInput struct {
Force bool `auto_read:"force,query"`
FileName string `auto_read:"_file"`
FileContent []byte `auto_read:"file"`
}
func (h *ImportHandler) Import(c droplet.Context) (interface{}, error) {
input := c.Input().(*ImportInput)
Force := input.Force
// file check
suffix := path.Ext(input.FileName)
if suffix != ".json" && suffix != ".yaml" && suffix != ".yml" {
return nil, fmt.Errorf("required file type is .yaml, .yml or .json but got: %s", suffix)
}
contentLen := bytes.Count(input.FileContent, nil) - 1
if contentLen > conf.ImportSizeLimit {
log.Warnf("upload file size exceeds limit: %d", contentLen)
return nil, fmt.Errorf("the file size exceeds the limit; limit %d", conf.ImportSizeLimit)
}
swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromData(input.FileContent)
if err != nil {
return nil, err
}
if len(swagger.Paths) < 1 {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
consts.ErrImportFile
}
routes, err := OpenAPI3ToRoute(swagger)
if err != nil {
return nil, err
}
// check route
for _, route := range routes {
err := checkRouteExist(c.Context(), h.routeStore, route)
if err != nil && !Force {
log.Warnf("import duplicate: %s, route: %#v", err, route)
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf("route(uris:%v) conflict, %s", route.Uris, err)
}
if route.ServiceID != nil {
_, err := h.svcStore.Get(c.Context(), utils.InterfaceToString(route.ServiceID))
if err != nil {
if err == data.ErrNotFound {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf(consts.IDNotFound, "service", route.ServiceID)
}
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest}, err
}
}
if route.UpstreamID != nil {
_, err := h.upstreamStore.Get(c.Context(), utils.InterfaceToString(route.UpstreamID))
if err != nil {
if err == data.ErrNotFound {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf(consts.IDNotFound, "upstream", route.UpstreamID)
}
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest}, err
}
}
if _, err := h.routeStore.CreateCheck(route); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("create route(uris:%v) failed: %s", route.Uris, err)
}
}
// create route
for _, route := range routes {
if Force && route.ID != nil {
if _, err := h.routeStore.Update(c.Context(), route, true); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("update route(uris:%v) failed: %s", route.Uris, err)
}
} else {
if _, err := h.routeStore.Create(c.Context(), route); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("create route(uris:%v) failed: %s", route.Uris, err)
}
}
}
return map[string]int{
"paths": len(swagger.Paths),
"routes": len(routes),
}, nil
}
func checkRouteExist(ctx context.Context, routeStore *store.GenericStore, route *entity.Route) error {
//routeStore := store.GetStore(store.HubKeyRoute)
ret, err := routeStore.List(ctx, store.ListInput{
Predicate: func(obj interface{}) bool {
id := utils.InterfaceToString(route.ID)
item := obj.(*entity.Route)
if id != "" && id != utils.InterfaceToString(item.ID) {
return false
}
if !(item.Host == route.Host && item.URI == route.URI && utils.StringSliceEqual(item.Uris, route.Uris) &&
utils.StringSliceEqual(item.RemoteAddrs, route.RemoteAddrs) && item.RemoteAddr == route.RemoteAddr &&
utils.StringSliceEqual(item.Hosts, route.Hosts) && item.Priority == route.Priority &&
utils.ValueEqual(item.Vars, route.Vars) && item.FilterFunc == route.FilterFunc) {
return false
}
return true
},
PageSize: 0,
PageNumber: 0,
})
if err != nil {
return err
}
if len(ret.Rows) > 0 {
return consts.InvalidParam("route is duplicate")
}
return nil
}
func parseExtension(val *openapi3.Operation) (*entity.Route, error) {
routeMap := map[string]interface{}{}
for key, val := range val.Extensions {
if strings.HasPrefix(key, "x-apisix-") {
routeMap[strings.TrimPrefix(key, "x-apisix-")] = val
}
}
route := new(entity.Route)
routeJson, err := json.Marshal(routeMap)
if err != nil {
return nil, err
}
err = json.Unmarshal(routeJson, &route)
if err != nil {
return nil, err
}
return route, nil
}
type PathValue struct {
Method string
Value *openapi3.Operation
}
func mergePathValue(key string, values []PathValue, swagger *openapi3.Swagger) (map[string]*entity.Route, error) {
var parsed []PathValue
var routes = map[string]*entity.Route{}
for _, value := range values {
value.Value.OperationID = strings.Replace(value.Value.OperationID, value.Method, "", 1)
var eq = false
for _, v := range parsed {
if utils.ValueEqual(v.Value, value.Value) {
eq = true
if routes[v.Method].Methods == nil {
routes[v.Method].Methods = []string{}
}
routes[v.Method].Methods = append(routes[v.Method].Methods, value.Method)
}
}
// not equal to the previous ones
if !eq {
route, err := getRouteFromPaths(value.Method, key, value.Value, swagger)
if err != nil {
return nil, err
}
routes[value.Method] = route
parsed = append(parsed, value)
}
}
return routes, nil
}
func OpenAPI3ToRoute(swagger *openapi3.Swagger) ([]*entity.Route, error) {
var routes []*entity.Route
paths := swagger.Paths
var upstream *entity.UpstreamDef
var err error
for k, v := range paths {
k = regPathRepeat.ReplaceAllString(k, "")
upstream = &entity.UpstreamDef{}
if up, ok := v.Extensions["x-apisix-upstream"]; ok {
err = json.Unmarshal(up.(json.RawMessage), upstream)
if err != nil |
}
var values []PathValue
if v.Get != nil {
value := PathValue{
Method: http.MethodGet,
Value: v.Get,
}
values = append(values, value)
}
if v.Post != nil {
value := PathValue{
Method: http.MethodPost,
Value: v.Post,
}
values = append(values, value)
}
if v.Head != nil {
value := PathValue{
Method: http.MethodHead,
Value: v.Head,
}
values = append(values, value)
}
if v.Put != nil {
value := PathValue{
Method: http.MethodPut,
Value: v.Put,
}
values = append(values, value)
}
if v.Patch != nil {
value := PathValue{
Method: http.MethodPatch,
Value: v.Patch,
}
values = append(values, value)
}
if v.Delete != nil {
value := PathValue{
Method: http.MethodDelete,
Value: v.Delete,
}
values = append(values, value)
}
// merge same route
tmp, err := mergePathValue(k, values, swagger)
if err != nil {
return nil, err
}
for _, route := range tmp {
routes = append(routes, route)
}
}
return routes, nil
}
func parseParameters(parameters openapi3.Parameters, plugins map[string]interface{}) {
props := make(map[string]interface{})
var required []string
for _, v := range parameters {
if v.Value.Schema != nil {
v.Value.Schema.Value.Format = ""
v.Value.Schema.Value.XML = nil
}
switch v.Value.In {
case "header":
if v.Value.Schema != nil && v.Value.Schema.Value != nil {
props[v.Value.Name] = v.Value.Schema.Value
}
if v.Value.Required {
required = append(required, v.Value.Name)
}
}
}
requestValidation := make(map[string]interface{})
if rv, ok := plugins["request-validation"]; ok {
requestValidation = rv.(map[string]interface{})
}
requestValidation["header_schema"] = &entity.RequestValidation{
Type: "object",
Required: required,
Properties: props,
}
plugins["request-validation"] = requestValidation
}
func parseRequestBody(requestBody *openapi3.RequestBodyRef, swagger *openapi3.Swagger, plugins map[string]interface{}) {
schema := requestBody.Value.Content
requestValidation := make(map[string]interface{})
if rv, ok := plugins["request-validation"]; ok {
requestValidation = rv.(map[string]interface{})
}
for _, v := range schema {
if v.Schema.Ref != "" {
s := getParameters(v.Schema.Ref, &swagger.Components).Value
requestValidation["body_schema"] = &entity.RequestValidation{
Type: s.Type,
Required: s.Required,
Properties: s.Properties,
}
plugins["request-validation"] = requestValidation
} else if v.Schema.Value != nil {
if v.Schema.Value.Properties != nil {
for k1, v1 := range v.Schema.Value.Properties {
if v1.Ref != "" {
s := getParameters(v1.Ref, &swagger.Components)
v.Schema.Value.Properties[k1] = s
}
v1.Value.Format = ""
}
requestValidation["body_schema"] = &entity.RequestValidation{
Type: v.Schema.Value.Type,
Required: v.Schema.Value.Required,
Properties: v.Schema.Value.Properties,
}
plugins["request-validation"] = requestValidation
} else if v.Schema.Value.Items != nil {
if v.Schema.Value.Items.Ref != "" {
s := getParameters(v.Schema.Value.Items.Ref, &swagger.Components).Value
requestValidation["body_schema"] = &entity.RequestValidation{
Type: s.Type,
Required: s.Required,
Properties: s.Properties,
}
plugins["request-validation"] = requestValidation
}
} else {
requestValidation["body_schema"] = &entity.RequestValidation{
Type: "object",
Required: []string{},
Properties: v.Schema.Value.Properties,
}
}
}
plugins["request-validation"] = requestValidation
}
}
func parseSecurity(security openapi3.SecurityRequirements, securitySchemes openapi3.SecuritySchemes, plugins map[string]interface{}) {
// todo: import consumers
for _, securities := range security {
for name := range securities {
if schema, ok := securitySchemes[name]; ok {
value := schema.Value
if value == nil {
continue
}
// basic auth
if value.Type == "http" && value.Scheme == "basic" {
plugins["basic-auth"] = map[string]interface{}{}
//username, ok := value.Extensions["username"]
//if !ok {
// continue
//}
//password, ok := value.Extensions["password"]
//if !ok {
// continue
//}
//plugins["basic-auth"] = map[string]interface{}{
// "username": username,
// "password": password,
//}
// jwt auth
} else if value.Type == "http" && value.Scheme == "bearer" && value.BearerFormat == "JWT" {
plugins["jwt-auth"] = map[string]interface{}{}
//key, ok := value.Extensions["key"]
//if !ok {
// continue
//}
//secret, ok := value.Extensions["secret"]
//if !ok {
// continue
//}
//plugins["jwt-auth"] = map[string]interface{}{
// "key": key,
// "secret": secret,
//}
// key auth
} else if value.Type == "apiKey" {
plugins["key-auth"] = map[string]interface{}{}
//key, ok := value.Extensions["key"]
//if !ok {
// continue
//}
//plugins["key-auth"] = map[string]interface{}{
// "key": key,
//}
}
}
}
}
}
func getRouteFromPaths(method, key string, value *openapi3.Operation, swagger *openapi3.Swagger) (*entity.Route, error) {
// transform /path/{var} to /path/*
foundStr := regPathVar.FindString(key)
if foundStr != "" {
key = strings.Split(key, foundStr)[0] + "*"
}
route, err := parseExtension(value)
if err != nil {
return nil, err
}
route.Uris = []string{key}
route.Name = value.OperationID
route.Desc = value.Summary
route.Methods = []string{method}
if route.Plugins == nil {
route.Plugins = make(map[string]interface{})
}
if value.Parameters != nil {
parseParameters(value.Parameters, route.Plugins)
}
if value.RequestBody != nil {
parseRequestBody(value.RequestBody, swagger, route.Plugins)
}
if value.Security != nil && swagger.Components.SecuritySchemes != nil {
parseSecurity(*value.Security, swagger.Components.SecuritySchemes, route.Plugins)
}
return route, nil
}
func getParameters(ref string, components *openapi3.Components) *openapi3.SchemaRef {
schemaRef := &openapi3.SchemaRef{}
arr := strings.Split(ref, "/")
if arr[0] == "#" && arr[1] == "components" && arr[2] == "schemas" {
schemaRef = components.Schemas[arr[3]]
schemaRef.Value.XML = nil
// traverse properties to find another ref
for k, v := range schemaRef.Value.Properties {
if v.Value != nil {
v.Value.XML = nil
v.Value.Format = ""
}
if v.Ref != "" {
schemaRef.Value.Properties[k] = getParameters(v.Ref, components)
} else if v.Value.Items != nil && v.Value.Items.Ref != "" {
v.Value.Items = getParameters(v.Value.Items.Ref, components)
} else if v.Value.Items != nil && v.Value.Items.Value != nil {
v.Value.Items.Value.XML = nil
v.Value.Items.Value.Format = ""
}
}
}
return schemaRef
}
| {
return nil, err
} | conditional_block |
route_import.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package data_loader
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"path"
"reflect"
"regexp"
"strings"
"github.com/getkin/kin-openapi/openapi3"
"github.com/gin-gonic/gin"
"github.com/shiningrush/droplet"
"github.com/shiningrush/droplet/data"
"github.com/shiningrush/droplet/wrapper"
wgin "github.com/shiningrush/droplet/wrapper/gin"
"github.com/apisix/manager-api/internal/conf"
"github.com/apisix/manager-api/internal/core/entity"
"github.com/apisix/manager-api/internal/core/store"
"github.com/apisix/manager-api/internal/handler"
"github.com/apisix/manager-api/internal/log"
"github.com/apisix/manager-api/internal/utils"
"github.com/apisix/manager-api/internal/utils/consts"
)
type ImportHandler struct {
routeStore *store.GenericStore
svcStore store.Interface
upstreamStore store.Interface
}
func NewImportHandler() (handler.RouteRegister, error) {
return &ImportHandler{
routeStore: store.GetStore(store.HubKeyRoute),
svcStore: store.GetStore(store.HubKeyService),
upstreamStore: store.GetStore(store.HubKeyUpstream),
}, nil
}
var regPathVar = regexp.MustCompile(`{[\w.]*}`)
var regPathRepeat = regexp.MustCompile(`-APISIX-REPEAT-URI-[\d]*`)
func (h *ImportHandler) ApplyRoute(r *gin.Engine) {
r.POST("/apisix/admin/import/routes", wgin.Wraps(h.Import,
wrapper.InputType(reflect.TypeOf(ImportInput{}))))
}
type ImportInput struct {
Force bool `auto_read:"force,query"`
FileName string `auto_read:"_file"`
FileContent []byte `auto_read:"file"`
}
func (h *ImportHandler) Import(c droplet.Context) (interface{}, error) {
input := c.Input().(*ImportInput)
Force := input.Force
// file check
suffix := path.Ext(input.FileName)
if suffix != ".json" && suffix != ".yaml" && suffix != ".yml" {
return nil, fmt.Errorf("required file type is .yaml, .yml or .json but got: %s", suffix)
}
contentLen := bytes.Count(input.FileContent, nil) - 1
if contentLen > conf.ImportSizeLimit {
log.Warnf("upload file size exceeds limit: %d", contentLen)
return nil, fmt.Errorf("the file size exceeds the limit; limit %d", conf.ImportSizeLimit)
}
swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromData(input.FileContent)
if err != nil {
return nil, err
}
if len(swagger.Paths) < 1 {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
consts.ErrImportFile
}
routes, err := OpenAPI3ToRoute(swagger)
if err != nil {
return nil, err
}
// check route
for _, route := range routes {
err := checkRouteExist(c.Context(), h.routeStore, route)
if err != nil && !Force {
log.Warnf("import duplicate: %s, route: %#v", err, route)
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf("route(uris:%v) conflict, %s", route.Uris, err)
}
if route.ServiceID != nil {
_, err := h.svcStore.Get(c.Context(), utils.InterfaceToString(route.ServiceID))
if err != nil {
if err == data.ErrNotFound {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf(consts.IDNotFound, "service", route.ServiceID)
}
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest}, err
}
}
if route.UpstreamID != nil {
_, err := h.upstreamStore.Get(c.Context(), utils.InterfaceToString(route.UpstreamID))
if err != nil {
if err == data.ErrNotFound {
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest},
fmt.Errorf(consts.IDNotFound, "upstream", route.UpstreamID)
}
return &data.SpecCodeResponse{StatusCode: http.StatusBadRequest}, err
}
}
if _, err := h.routeStore.CreateCheck(route); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("create route(uris:%v) failed: %s", route.Uris, err)
}
}
// create route
for _, route := range routes {
if Force && route.ID != nil {
if _, err := h.routeStore.Update(c.Context(), route, true); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("update route(uris:%v) failed: %s", route.Uris, err)
}
} else {
if _, err := h.routeStore.Create(c.Context(), route); err != nil {
return handler.SpecCodeResponse(err),
fmt.Errorf("create route(uris:%v) failed: %s", route.Uris, err)
}
}
}
return map[string]int{
"paths": len(swagger.Paths),
"routes": len(routes),
}, nil
}
func checkRouteExist(ctx context.Context, routeStore *store.GenericStore, route *entity.Route) error {
//routeStore := store.GetStore(store.HubKeyRoute)
ret, err := routeStore.List(ctx, store.ListInput{
Predicate: func(obj interface{}) bool {
id := utils.InterfaceToString(route.ID)
item := obj.(*entity.Route)
if id != "" && id != utils.InterfaceToString(item.ID) {
return false
}
if !(item.Host == route.Host && item.URI == route.URI && utils.StringSliceEqual(item.Uris, route.Uris) &&
utils.StringSliceEqual(item.RemoteAddrs, route.RemoteAddrs) && item.RemoteAddr == route.RemoteAddr &&
utils.StringSliceEqual(item.Hosts, route.Hosts) && item.Priority == route.Priority &&
utils.ValueEqual(item.Vars, route.Vars) && item.FilterFunc == route.FilterFunc) {
return false
}
return true
},
PageSize: 0,
PageNumber: 0,
})
if err != nil {
return err
}
if len(ret.Rows) > 0 {
return consts.InvalidParam("route is duplicate")
}
return nil
}
func | (val *openapi3.Operation) (*entity.Route, error) {
routeMap := map[string]interface{}{}
for key, val := range val.Extensions {
if strings.HasPrefix(key, "x-apisix-") {
routeMap[strings.TrimPrefix(key, "x-apisix-")] = val
}
}
route := new(entity.Route)
routeJson, err := json.Marshal(routeMap)
if err != nil {
return nil, err
}
err = json.Unmarshal(routeJson, &route)
if err != nil {
return nil, err
}
return route, nil
}
type PathValue struct {
Method string
Value *openapi3.Operation
}
func mergePathValue(key string, values []PathValue, swagger *openapi3.Swagger) (map[string]*entity.Route, error) {
var parsed []PathValue
var routes = map[string]*entity.Route{}
for _, value := range values {
value.Value.OperationID = strings.Replace(value.Value.OperationID, value.Method, "", 1)
var eq = false
for _, v := range parsed {
if utils.ValueEqual(v.Value, value.Value) {
eq = true
if routes[v.Method].Methods == nil {
routes[v.Method].Methods = []string{}
}
routes[v.Method].Methods = append(routes[v.Method].Methods, value.Method)
}
}
// not equal to the previous ones
if !eq {
route, err := getRouteFromPaths(value.Method, key, value.Value, swagger)
if err != nil {
return nil, err
}
routes[value.Method] = route
parsed = append(parsed, value)
}
}
return routes, nil
}
func OpenAPI3ToRoute(swagger *openapi3.Swagger) ([]*entity.Route, error) {
var routes []*entity.Route
paths := swagger.Paths
var upstream *entity.UpstreamDef
var err error
for k, v := range paths {
k = regPathRepeat.ReplaceAllString(k, "")
upstream = &entity.UpstreamDef{}
if up, ok := v.Extensions["x-apisix-upstream"]; ok {
err = json.Unmarshal(up.(json.RawMessage), upstream)
if err != nil {
return nil, err
}
}
var values []PathValue
if v.Get != nil {
value := PathValue{
Method: http.MethodGet,
Value: v.Get,
}
values = append(values, value)
}
if v.Post != nil {
value := PathValue{
Method: http.MethodPost,
Value: v.Post,
}
values = append(values, value)
}
if v.Head != nil {
value := PathValue{
Method: http.MethodHead,
Value: v.Head,
}
values = append(values, value)
}
if v.Put != nil {
value := PathValue{
Method: http.MethodPut,
Value: v.Put,
}
values = append(values, value)
}
if v.Patch != nil {
value := PathValue{
Method: http.MethodPatch,
Value: v.Patch,
}
values = append(values, value)
}
if v.Delete != nil {
value := PathValue{
Method: http.MethodDelete,
Value: v.Delete,
}
values = append(values, value)
}
// merge same route
tmp, err := mergePathValue(k, values, swagger)
if err != nil {
return nil, err
}
for _, route := range tmp {
routes = append(routes, route)
}
}
return routes, nil
}
func parseParameters(parameters openapi3.Parameters, plugins map[string]interface{}) {
props := make(map[string]interface{})
var required []string
for _, v := range parameters {
if v.Value.Schema != nil {
v.Value.Schema.Value.Format = ""
v.Value.Schema.Value.XML = nil
}
switch v.Value.In {
case "header":
if v.Value.Schema != nil && v.Value.Schema.Value != nil {
props[v.Value.Name] = v.Value.Schema.Value
}
if v.Value.Required {
required = append(required, v.Value.Name)
}
}
}
requestValidation := make(map[string]interface{})
if rv, ok := plugins["request-validation"]; ok {
requestValidation = rv.(map[string]interface{})
}
requestValidation["header_schema"] = &entity.RequestValidation{
Type: "object",
Required: required,
Properties: props,
}
plugins["request-validation"] = requestValidation
}
func parseRequestBody(requestBody *openapi3.RequestBodyRef, swagger *openapi3.Swagger, plugins map[string]interface{}) {
schema := requestBody.Value.Content
requestValidation := make(map[string]interface{})
if rv, ok := plugins["request-validation"]; ok {
requestValidation = rv.(map[string]interface{})
}
for _, v := range schema {
if v.Schema.Ref != "" {
s := getParameters(v.Schema.Ref, &swagger.Components).Value
requestValidation["body_schema"] = &entity.RequestValidation{
Type: s.Type,
Required: s.Required,
Properties: s.Properties,
}
plugins["request-validation"] = requestValidation
} else if v.Schema.Value != nil {
if v.Schema.Value.Properties != nil {
for k1, v1 := range v.Schema.Value.Properties {
if v1.Ref != "" {
s := getParameters(v1.Ref, &swagger.Components)
v.Schema.Value.Properties[k1] = s
}
v1.Value.Format = ""
}
requestValidation["body_schema"] = &entity.RequestValidation{
Type: v.Schema.Value.Type,
Required: v.Schema.Value.Required,
Properties: v.Schema.Value.Properties,
}
plugins["request-validation"] = requestValidation
} else if v.Schema.Value.Items != nil {
if v.Schema.Value.Items.Ref != "" {
s := getParameters(v.Schema.Value.Items.Ref, &swagger.Components).Value
requestValidation["body_schema"] = &entity.RequestValidation{
Type: s.Type,
Required: s.Required,
Properties: s.Properties,
}
plugins["request-validation"] = requestValidation
}
} else {
requestValidation["body_schema"] = &entity.RequestValidation{
Type: "object",
Required: []string{},
Properties: v.Schema.Value.Properties,
}
}
}
plugins["request-validation"] = requestValidation
}
}
func parseSecurity(security openapi3.SecurityRequirements, securitySchemes openapi3.SecuritySchemes, plugins map[string]interface{}) {
// todo: import consumers
for _, securities := range security {
for name := range securities {
if schema, ok := securitySchemes[name]; ok {
value := schema.Value
if value == nil {
continue
}
// basic auth
if value.Type == "http" && value.Scheme == "basic" {
plugins["basic-auth"] = map[string]interface{}{}
//username, ok := value.Extensions["username"]
//if !ok {
// continue
//}
//password, ok := value.Extensions["password"]
//if !ok {
// continue
//}
//plugins["basic-auth"] = map[string]interface{}{
// "username": username,
// "password": password,
//}
// jwt auth
} else if value.Type == "http" && value.Scheme == "bearer" && value.BearerFormat == "JWT" {
plugins["jwt-auth"] = map[string]interface{}{}
//key, ok := value.Extensions["key"]
//if !ok {
// continue
//}
//secret, ok := value.Extensions["secret"]
//if !ok {
// continue
//}
//plugins["jwt-auth"] = map[string]interface{}{
// "key": key,
// "secret": secret,
//}
// key auth
} else if value.Type == "apiKey" {
plugins["key-auth"] = map[string]interface{}{}
//key, ok := value.Extensions["key"]
//if !ok {
// continue
//}
//plugins["key-auth"] = map[string]interface{}{
// "key": key,
//}
}
}
}
}
}
func getRouteFromPaths(method, key string, value *openapi3.Operation, swagger *openapi3.Swagger) (*entity.Route, error) {
// transform /path/{var} to /path/*
foundStr := regPathVar.FindString(key)
if foundStr != "" {
key = strings.Split(key, foundStr)[0] + "*"
}
route, err := parseExtension(value)
if err != nil {
return nil, err
}
route.Uris = []string{key}
route.Name = value.OperationID
route.Desc = value.Summary
route.Methods = []string{method}
if route.Plugins == nil {
route.Plugins = make(map[string]interface{})
}
if value.Parameters != nil {
parseParameters(value.Parameters, route.Plugins)
}
if value.RequestBody != nil {
parseRequestBody(value.RequestBody, swagger, route.Plugins)
}
if value.Security != nil && swagger.Components.SecuritySchemes != nil {
parseSecurity(*value.Security, swagger.Components.SecuritySchemes, route.Plugins)
}
return route, nil
}
func getParameters(ref string, components *openapi3.Components) *openapi3.SchemaRef {
schemaRef := &openapi3.SchemaRef{}
arr := strings.Split(ref, "/")
if arr[0] == "#" && arr[1] == "components" && arr[2] == "schemas" {
schemaRef = components.Schemas[arr[3]]
schemaRef.Value.XML = nil
// traverse properties to find another ref
for k, v := range schemaRef.Value.Properties {
if v.Value != nil {
v.Value.XML = nil
v.Value.Format = ""
}
if v.Ref != "" {
schemaRef.Value.Properties[k] = getParameters(v.Ref, components)
} else if v.Value.Items != nil && v.Value.Items.Ref != "" {
v.Value.Items = getParameters(v.Value.Items.Ref, components)
} else if v.Value.Items != nil && v.Value.Items.Value != nil {
v.Value.Items.Value.XML = nil
v.Value.Items.Value.Format = ""
}
}
}
return schemaRef
}
| parseExtension | identifier_name |
ml_ex_03.py | #!/usr/bin/python
import sys,os,csv
import pandas
import numpy as np
import math
from sklearn.model_selection import StratifiedKFold
from sklearn import svm as SVM
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
datFileName="secom.data"
labelsFileName="secom_labels.data"
dirPath=os.path.dirname(os.path.realpath(__file__))
classList=[]
data=[]
def load_data(fileName):
raw_data = open(fileName, 'rb')
rawData = pandas.read_csv(raw_data, delimiter=" ")
return rawData.values
def getData(rawData):
#print "\n---- Getting data from File ----"
|
def getLabels(fileName):
labelData = load_data(dirPath + "/" + fileName)
labels = labelData[:,0].clip(min=0)
return np.array(labels)
def svm_intern_folds(data_train, data_test, labelsTrain, labelsTest):
acxmax = 0
c_max=0
gamma_max=0
for c in [2**(-5), 1, 2**(5), 2**(10)]:
for gamm in [2**(-15), 2**(-10), 2**(-5), 1, 2**5]:
svm = SVM.SVC(C = c, gamma = gamm)
svm.fit(data_train, labelsTrain)
accuracy = svm.score(data_test, labelsTest)
if accuracy > acxmax:
acxmax = accuracy
c_max = c
gamma_max = gamm
return [acxmax, c_max, gamma_max]
def chooseComponentsNumber(matrix, percent):
print "\n---- PCA - Choose components number ----"
print "Variance :", percent
mat = np.matrix(matrix) * np.matrix(matrix).transpose()
U,S,V = np.linalg.svd(mat)
#print U.shape, S.shape, V.shape
s_sum_all = sum(S)
totalComponents = matrix.shape[1]
num = totalComponents
for i in range(totalComponents):
if sum(S[0:i]) / s_sum_all >= percent :
print "PCA dimension:",i ,"with variance =", sum(S[0:i]) / s_sum_all
num = i
break
return num
def applyPCA(data, numComponents):
pca = PCA(n_components=numComponents)
pcaData = pca.fit_transform(data)
return pcaData
def knn_intern_folds(data_train, data_test, labels_train, labels_test):
acxmax = 0
cores = 4
k_value = 0
for k in [1, 5, 11, 15, 21, 25]:
knn = KNeighborsClassifier(n_neighbors = k, n_jobs = cores)
knn.fit(data_train, labels_train)
accuracy = knn.score(data_test, labels_test)
if accuracy > acxmax:
acxmax = accuracy
k_value = k
return [acxmax, k]
def neural_intern_folds(data_train, data_test, labels_train, labels_test):
# 10, 20, 30 e 40 neuronios na camada escondida.
acxmax = 0
cores = 4
n_value = 0
for n in [10, 20, 30, 40]:
clf = MLPClassifier(hidden_layer_sizes=(n,), solver='lbfgs')
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
if accuracy > acxmax:
acxmax = accuracy
n_value = n
return [acxmax, n]
def rf_intern_folds(data_train, data_test, labels_train, labels_test):
# teste com mtry ou n_featrues = 10, 15, 20, 25 e ntrees = 100, 200, 300 e 400
acxmax = 0
n_feats = 0
n_trees = 0
for feat in [10, 15, 20, 25]:
for trees in [100, 200, 300, 400]:
clf = RandomForestClassifier (max_features = feat, n_estimators = trees)
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
#print "first acc:", accuracy
if accuracy > acxmax:
acxmax = accuracy
n_feats = feat
n_trees = trees
return [acxmax, n_feats, n_trees]
def gbm_intern_folds(data_train, data_test, labels_train, labels_test):
## numero de arvores = 30, 70, e 100, com learning rate de 0.1 e 0.05, e profundidade da arvore=5.
acxmax = 0
n_learn_rate = 0
n_trees = 0
depth_tree = 5
for trees in [30, 70, 100]:
for learn_rate in [0.1, 0.05]:
clf = GradientBoostingClassifier (n_estimators = trees, learning_rate = learn_rate, max_depth = depth_tree)
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
#print "first acc:", accuracy
if accuracy > acxmax:
acxmax = accuracy
n_trees = trees
n_learn_rate = learn_rate
return [acxmax, n_learn_rate, n_trees]
## Data preprocessing
def data_preprocess(fileName):
rawdata = load_data(dirPath + "/" + fileName)
## column mean
column_mean = np.nanmean(np.array(rawdata), axis=0)
## Nan values index
nan_indexes = np.where(np.isnan(rawdata))
## Replace Nan values
rawdata[nan_indexes] = np.take(column_mean, nan_indexes[1])
## Standarize each column individually
rawdata = (rawdata - np.mean(rawdata, axis=0)) / np.std(rawdata, axis=0)
rawdata = np.nan_to_num(rawdata)
return rawdata
def run_folds( alg, data, labels):
print "--- %s ---" % alg
final_accuracy = 0
params_final = [0.0, 0.0]
skf = StratifiedKFold(n_splits=5)
for train_index, test_index in skf.split(data, labels):
new_data_train = data[train_index]
new_data_test = data[test_index]
new_labels_train = labels[train_index]
new_labels_test = labels[test_index]
acx = 0
skf_intern = StratifiedKFold(n_splits=3)
for intern_train_index, intern_test_index in skf_intern.split(new_data_train, new_labels_train):
intern_data_train = new_data_train[intern_train_index]
intern_data_test = new_data_train[intern_test_index]
intern_labels_train = new_labels_train[intern_train_index]
intern_labels_test = new_labels_train[intern_test_index]
params = get_intern_folds (alg, intern_data_train, intern_data_test, intern_labels_train, intern_labels_test)
if params[0] > acx:
acx = params[0]
params_final[0] = params[1]
if len(params) > 2:
params_final[1] = params[2]
final_accuracy = final_accuracy + model_score(alg, params_final,
new_data_train,
new_labels_train,
new_data_test,
new_labels_test)
final_accuracy = final_accuracy / 5
print_results(alg, final_accuracy, params_final)
def model_score(alg, params, new_data_train, new_labels_train, new_data_test, new_labels_test):
if 'svm' == alg:
svm_model = SVM.SVC(C = params[0], gamma = params[1])
svm_model.fit(new_data_train, new_labels_train)
return svm_model.score(new_data_test, new_labels_test)
elif 'knn' == alg:
knn = KNeighborsClassifier(n_neighbors = params[0], n_jobs = 4)
knn.fit(new_data_train, new_labels_train)
return knn.score(new_data_test, new_labels_test)
elif 'neural' == alg:
clf = MLPClassifier(hidden_layer_sizes=(params[0],), solver='lbfgs')
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
elif 'rf' == alg:
clf = RandomForestClassifier (max_features = params[0], n_estimators = params[1])
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
elif 'gbm' == alg:
clf = GradientBoostingClassifier (learning_rate = params[0], n_estimators = params[1], max_depth = 5)
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
def get_intern_folds (alg, data_train, data_test, labels_train, labels_test):
if 'svm' == alg:
return svm_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'knn' == alg:
return knn_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'neural' == alg:
return neural_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'rf' == alg:
return rf_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'gbm' == alg:
return gbm_intern_folds(data_train, data_test, labels_train, labels_test)
def print_results(alg, final_accuracy, params):
if 'svm' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final hiperparametros (C=%s, Gamma=%s)" % (params[0], params[1]) )
elif 'knn' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final K (K=%s)" % (params[0]))
elif 'neural' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Neurons=%s)" % (params[0]) )
elif 'rf' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Feats=%s, Trees=%s)" % (params[0], params[1]) )
elif 'gbm' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Learn Rate=%s, Trees=%s)" % (params[0], params[1]))
def PCA_for_knn(data):
variance = 80
numComponents = chooseComponentsNumber(data, float(variance) / 100)
if numComponents == -1 : print "Invalid components number. Exit"; return
return applyPCA(data, numComponents)
def main(argv=None):
if argv is None:
arv = sys.argv
## Data pre-processing
data = data_preprocess(datFileName)
labels = getLabels(labelsFileName)
labels = np.array(list(labels[:data.shape[0]]))
## kNN , PCA com 80% da variancia
pcaData = PCA_for_knn(data)
run_folds('knn', pcaData, labels)
## SVM RBF
run_folds('svm', data, labels)
## Neural network
run_folds('neural', data, labels)
## RF
run_folds('rf', data, labels)
## GBM
run_folds('gbm', data, labels)
if __name__ == "__main__":
sys.exit(main())
| lineNum = rawData.shape[0]
colNum = rawData.shape[1]
data = np.array(rawData[0:lineNum, 0:colNum-1])
for i in range(lineNum):
classList.append(rawData[i][colNum - 1])
return [data, np.array(classList) ] | identifier_body |
ml_ex_03.py | #!/usr/bin/python
import sys,os,csv
import pandas
import numpy as np
import math
from sklearn.model_selection import StratifiedKFold
from sklearn import svm as SVM
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
datFileName="secom.data"
labelsFileName="secom_labels.data"
dirPath=os.path.dirname(os.path.realpath(__file__))
classList=[]
data=[]
def load_data(fileName):
raw_data = open(fileName, 'rb')
rawData = pandas.read_csv(raw_data, delimiter=" ")
return rawData.values
def getData(rawData):
#print "\n---- Getting data from File ----"
lineNum = rawData.shape[0]
colNum = rawData.shape[1]
data = np.array(rawData[0:lineNum, 0:colNum-1])
for i in range(lineNum):
classList.append(rawData[i][colNum - 1])
return [data, np.array(classList) ]
def getLabels(fileName):
labelData = load_data(dirPath + "/" + fileName)
labels = labelData[:,0].clip(min=0)
return np.array(labels)
def svm_intern_folds(data_train, data_test, labelsTrain, labelsTest):
acxmax = 0
c_max=0
gamma_max=0
for c in [2**(-5), 1, 2**(5), 2**(10)]:
for gamm in [2**(-15), 2**(-10), 2**(-5), 1, 2**5]:
svm = SVM.SVC(C = c, gamma = gamm)
svm.fit(data_train, labelsTrain)
accuracy = svm.score(data_test, labelsTest)
if accuracy > acxmax:
acxmax = accuracy
c_max = c
gamma_max = gamm
return [acxmax, c_max, gamma_max]
def | (matrix, percent):
print "\n---- PCA - Choose components number ----"
print "Variance :", percent
mat = np.matrix(matrix) * np.matrix(matrix).transpose()
U,S,V = np.linalg.svd(mat)
#print U.shape, S.shape, V.shape
s_sum_all = sum(S)
totalComponents = matrix.shape[1]
num = totalComponents
for i in range(totalComponents):
if sum(S[0:i]) / s_sum_all >= percent :
print "PCA dimension:",i ,"with variance =", sum(S[0:i]) / s_sum_all
num = i
break
return num
def applyPCA(data, numComponents):
pca = PCA(n_components=numComponents)
pcaData = pca.fit_transform(data)
return pcaData
def knn_intern_folds(data_train, data_test, labels_train, labels_test):
acxmax = 0
cores = 4
k_value = 0
for k in [1, 5, 11, 15, 21, 25]:
knn = KNeighborsClassifier(n_neighbors = k, n_jobs = cores)
knn.fit(data_train, labels_train)
accuracy = knn.score(data_test, labels_test)
if accuracy > acxmax:
acxmax = accuracy
k_value = k
return [acxmax, k]
def neural_intern_folds(data_train, data_test, labels_train, labels_test):
# 10, 20, 30 e 40 neuronios na camada escondida.
acxmax = 0
cores = 4
n_value = 0
for n in [10, 20, 30, 40]:
clf = MLPClassifier(hidden_layer_sizes=(n,), solver='lbfgs')
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
if accuracy > acxmax:
acxmax = accuracy
n_value = n
return [acxmax, n]
def rf_intern_folds(data_train, data_test, labels_train, labels_test):
# teste com mtry ou n_featrues = 10, 15, 20, 25 e ntrees = 100, 200, 300 e 400
acxmax = 0
n_feats = 0
n_trees = 0
for feat in [10, 15, 20, 25]:
for trees in [100, 200, 300, 400]:
clf = RandomForestClassifier (max_features = feat, n_estimators = trees)
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
#print "first acc:", accuracy
if accuracy > acxmax:
acxmax = accuracy
n_feats = feat
n_trees = trees
return [acxmax, n_feats, n_trees]
def gbm_intern_folds(data_train, data_test, labels_train, labels_test):
## numero de arvores = 30, 70, e 100, com learning rate de 0.1 e 0.05, e profundidade da arvore=5.
acxmax = 0
n_learn_rate = 0
n_trees = 0
depth_tree = 5
for trees in [30, 70, 100]:
for learn_rate in [0.1, 0.05]:
clf = GradientBoostingClassifier (n_estimators = trees, learning_rate = learn_rate, max_depth = depth_tree)
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
#print "first acc:", accuracy
if accuracy > acxmax:
acxmax = accuracy
n_trees = trees
n_learn_rate = learn_rate
return [acxmax, n_learn_rate, n_trees]
## Data preprocessing
def data_preprocess(fileName):
rawdata = load_data(dirPath + "/" + fileName)
## column mean
column_mean = np.nanmean(np.array(rawdata), axis=0)
## Nan values index
nan_indexes = np.where(np.isnan(rawdata))
## Replace Nan values
rawdata[nan_indexes] = np.take(column_mean, nan_indexes[1])
## Standarize each column individually
rawdata = (rawdata - np.mean(rawdata, axis=0)) / np.std(rawdata, axis=0)
rawdata = np.nan_to_num(rawdata)
return rawdata
def run_folds( alg, data, labels):
print "--- %s ---" % alg
final_accuracy = 0
params_final = [0.0, 0.0]
skf = StratifiedKFold(n_splits=5)
for train_index, test_index in skf.split(data, labels):
new_data_train = data[train_index]
new_data_test = data[test_index]
new_labels_train = labels[train_index]
new_labels_test = labels[test_index]
acx = 0
skf_intern = StratifiedKFold(n_splits=3)
for intern_train_index, intern_test_index in skf_intern.split(new_data_train, new_labels_train):
intern_data_train = new_data_train[intern_train_index]
intern_data_test = new_data_train[intern_test_index]
intern_labels_train = new_labels_train[intern_train_index]
intern_labels_test = new_labels_train[intern_test_index]
params = get_intern_folds (alg, intern_data_train, intern_data_test, intern_labels_train, intern_labels_test)
if params[0] > acx:
acx = params[0]
params_final[0] = params[1]
if len(params) > 2:
params_final[1] = params[2]
final_accuracy = final_accuracy + model_score(alg, params_final,
new_data_train,
new_labels_train,
new_data_test,
new_labels_test)
final_accuracy = final_accuracy / 5
print_results(alg, final_accuracy, params_final)
def model_score(alg, params, new_data_train, new_labels_train, new_data_test, new_labels_test):
if 'svm' == alg:
svm_model = SVM.SVC(C = params[0], gamma = params[1])
svm_model.fit(new_data_train, new_labels_train)
return svm_model.score(new_data_test, new_labels_test)
elif 'knn' == alg:
knn = KNeighborsClassifier(n_neighbors = params[0], n_jobs = 4)
knn.fit(new_data_train, new_labels_train)
return knn.score(new_data_test, new_labels_test)
elif 'neural' == alg:
clf = MLPClassifier(hidden_layer_sizes=(params[0],), solver='lbfgs')
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
elif 'rf' == alg:
clf = RandomForestClassifier (max_features = params[0], n_estimators = params[1])
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
elif 'gbm' == alg:
clf = GradientBoostingClassifier (learning_rate = params[0], n_estimators = params[1], max_depth = 5)
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
def get_intern_folds (alg, data_train, data_test, labels_train, labels_test):
if 'svm' == alg:
return svm_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'knn' == alg:
return knn_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'neural' == alg:
return neural_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'rf' == alg:
return rf_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'gbm' == alg:
return gbm_intern_folds(data_train, data_test, labels_train, labels_test)
def print_results(alg, final_accuracy, params):
if 'svm' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final hiperparametros (C=%s, Gamma=%s)" % (params[0], params[1]) )
elif 'knn' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final K (K=%s)" % (params[0]))
elif 'neural' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Neurons=%s)" % (params[0]) )
elif 'rf' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Feats=%s, Trees=%s)" % (params[0], params[1]) )
elif 'gbm' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Learn Rate=%s, Trees=%s)" % (params[0], params[1]))
def PCA_for_knn(data):
variance = 80
numComponents = chooseComponentsNumber(data, float(variance) / 100)
if numComponents == -1 : print "Invalid components number. Exit"; return
return applyPCA(data, numComponents)
def main(argv=None):
if argv is None:
arv = sys.argv
## Data pre-processing
data = data_preprocess(datFileName)
labels = getLabels(labelsFileName)
labels = np.array(list(labels[:data.shape[0]]))
## kNN , PCA com 80% da variancia
pcaData = PCA_for_knn(data)
run_folds('knn', pcaData, labels)
## SVM RBF
run_folds('svm', data, labels)
## Neural network
run_folds('neural', data, labels)
## RF
run_folds('rf', data, labels)
## GBM
run_folds('gbm', data, labels)
if __name__ == "__main__":
sys.exit(main())
| chooseComponentsNumber | identifier_name |
ml_ex_03.py | #!/usr/bin/python
import sys,os,csv
import pandas
import numpy as np
import math
from sklearn.model_selection import StratifiedKFold
from sklearn import svm as SVM
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
datFileName="secom.data"
labelsFileName="secom_labels.data"
dirPath=os.path.dirname(os.path.realpath(__file__))
classList=[]
data=[]
def load_data(fileName):
raw_data = open(fileName, 'rb')
rawData = pandas.read_csv(raw_data, delimiter=" ")
return rawData.values
def getData(rawData):
#print "\n---- Getting data from File ----"
lineNum = rawData.shape[0]
colNum = rawData.shape[1]
data = np.array(rawData[0:lineNum, 0:colNum-1])
for i in range(lineNum):
classList.append(rawData[i][colNum - 1])
return [data, np.array(classList) ]
def getLabels(fileName):
labelData = load_data(dirPath + "/" + fileName)
labels = labelData[:,0].clip(min=0)
return np.array(labels)
def svm_intern_folds(data_train, data_test, labelsTrain, labelsTest):
acxmax = 0
c_max=0
gamma_max=0
for c in [2**(-5), 1, 2**(5), 2**(10)]:
for gamm in [2**(-15), 2**(-10), 2**(-5), 1, 2**5]:
svm = SVM.SVC(C = c, gamma = gamm)
svm.fit(data_train, labelsTrain) | c_max = c
gamma_max = gamm
return [acxmax, c_max, gamma_max]
def chooseComponentsNumber(matrix, percent):
print "\n---- PCA - Choose components number ----"
print "Variance :", percent
mat = np.matrix(matrix) * np.matrix(matrix).transpose()
U,S,V = np.linalg.svd(mat)
#print U.shape, S.shape, V.shape
s_sum_all = sum(S)
totalComponents = matrix.shape[1]
num = totalComponents
for i in range(totalComponents):
if sum(S[0:i]) / s_sum_all >= percent :
print "PCA dimension:",i ,"with variance =", sum(S[0:i]) / s_sum_all
num = i
break
return num
def applyPCA(data, numComponents):
pca = PCA(n_components=numComponents)
pcaData = pca.fit_transform(data)
return pcaData
def knn_intern_folds(data_train, data_test, labels_train, labels_test):
acxmax = 0
cores = 4
k_value = 0
for k in [1, 5, 11, 15, 21, 25]:
knn = KNeighborsClassifier(n_neighbors = k, n_jobs = cores)
knn.fit(data_train, labels_train)
accuracy = knn.score(data_test, labels_test)
if accuracy > acxmax:
acxmax = accuracy
k_value = k
return [acxmax, k]
def neural_intern_folds(data_train, data_test, labels_train, labels_test):
# 10, 20, 30 e 40 neuronios na camada escondida.
acxmax = 0
cores = 4
n_value = 0
for n in [10, 20, 30, 40]:
clf = MLPClassifier(hidden_layer_sizes=(n,), solver='lbfgs')
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
if accuracy > acxmax:
acxmax = accuracy
n_value = n
return [acxmax, n]
def rf_intern_folds(data_train, data_test, labels_train, labels_test):
# teste com mtry ou n_featrues = 10, 15, 20, 25 e ntrees = 100, 200, 300 e 400
acxmax = 0
n_feats = 0
n_trees = 0
for feat in [10, 15, 20, 25]:
for trees in [100, 200, 300, 400]:
clf = RandomForestClassifier (max_features = feat, n_estimators = trees)
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
#print "first acc:", accuracy
if accuracy > acxmax:
acxmax = accuracy
n_feats = feat
n_trees = trees
return [acxmax, n_feats, n_trees]
def gbm_intern_folds(data_train, data_test, labels_train, labels_test):
## numero de arvores = 30, 70, e 100, com learning rate de 0.1 e 0.05, e profundidade da arvore=5.
acxmax = 0
n_learn_rate = 0
n_trees = 0
depth_tree = 5
for trees in [30, 70, 100]:
for learn_rate in [0.1, 0.05]:
clf = GradientBoostingClassifier (n_estimators = trees, learning_rate = learn_rate, max_depth = depth_tree)
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
#print "first acc:", accuracy
if accuracy > acxmax:
acxmax = accuracy
n_trees = trees
n_learn_rate = learn_rate
return [acxmax, n_learn_rate, n_trees]
## Data preprocessing
def data_preprocess(fileName):
rawdata = load_data(dirPath + "/" + fileName)
## column mean
column_mean = np.nanmean(np.array(rawdata), axis=0)
## Nan values index
nan_indexes = np.where(np.isnan(rawdata))
## Replace Nan values
rawdata[nan_indexes] = np.take(column_mean, nan_indexes[1])
## Standarize each column individually
rawdata = (rawdata - np.mean(rawdata, axis=0)) / np.std(rawdata, axis=0)
rawdata = np.nan_to_num(rawdata)
return rawdata
def run_folds( alg, data, labels):
print "--- %s ---" % alg
final_accuracy = 0
params_final = [0.0, 0.0]
skf = StratifiedKFold(n_splits=5)
for train_index, test_index in skf.split(data, labels):
new_data_train = data[train_index]
new_data_test = data[test_index]
new_labels_train = labels[train_index]
new_labels_test = labels[test_index]
acx = 0
skf_intern = StratifiedKFold(n_splits=3)
for intern_train_index, intern_test_index in skf_intern.split(new_data_train, new_labels_train):
intern_data_train = new_data_train[intern_train_index]
intern_data_test = new_data_train[intern_test_index]
intern_labels_train = new_labels_train[intern_train_index]
intern_labels_test = new_labels_train[intern_test_index]
params = get_intern_folds (alg, intern_data_train, intern_data_test, intern_labels_train, intern_labels_test)
if params[0] > acx:
acx = params[0]
params_final[0] = params[1]
if len(params) > 2:
params_final[1] = params[2]
final_accuracy = final_accuracy + model_score(alg, params_final,
new_data_train,
new_labels_train,
new_data_test,
new_labels_test)
final_accuracy = final_accuracy / 5
print_results(alg, final_accuracy, params_final)
def model_score(alg, params, new_data_train, new_labels_train, new_data_test, new_labels_test):
if 'svm' == alg:
svm_model = SVM.SVC(C = params[0], gamma = params[1])
svm_model.fit(new_data_train, new_labels_train)
return svm_model.score(new_data_test, new_labels_test)
elif 'knn' == alg:
knn = KNeighborsClassifier(n_neighbors = params[0], n_jobs = 4)
knn.fit(new_data_train, new_labels_train)
return knn.score(new_data_test, new_labels_test)
elif 'neural' == alg:
clf = MLPClassifier(hidden_layer_sizes=(params[0],), solver='lbfgs')
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
elif 'rf' == alg:
clf = RandomForestClassifier (max_features = params[0], n_estimators = params[1])
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
elif 'gbm' == alg:
clf = GradientBoostingClassifier (learning_rate = params[0], n_estimators = params[1], max_depth = 5)
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
def get_intern_folds (alg, data_train, data_test, labels_train, labels_test):
if 'svm' == alg:
return svm_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'knn' == alg:
return knn_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'neural' == alg:
return neural_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'rf' == alg:
return rf_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'gbm' == alg:
return gbm_intern_folds(data_train, data_test, labels_train, labels_test)
def print_results(alg, final_accuracy, params):
if 'svm' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final hiperparametros (C=%s, Gamma=%s)" % (params[0], params[1]) )
elif 'knn' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final K (K=%s)" % (params[0]))
elif 'neural' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Neurons=%s)" % (params[0]) )
elif 'rf' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Feats=%s, Trees=%s)" % (params[0], params[1]) )
elif 'gbm' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Learn Rate=%s, Trees=%s)" % (params[0], params[1]))
def PCA_for_knn(data):
variance = 80
numComponents = chooseComponentsNumber(data, float(variance) / 100)
if numComponents == -1 : print "Invalid components number. Exit"; return
return applyPCA(data, numComponents)
def main(argv=None):
if argv is None:
arv = sys.argv
## Data pre-processing
data = data_preprocess(datFileName)
labels = getLabels(labelsFileName)
labels = np.array(list(labels[:data.shape[0]]))
## kNN , PCA com 80% da variancia
pcaData = PCA_for_knn(data)
run_folds('knn', pcaData, labels)
## SVM RBF
run_folds('svm', data, labels)
## Neural network
run_folds('neural', data, labels)
## RF
run_folds('rf', data, labels)
## GBM
run_folds('gbm', data, labels)
if __name__ == "__main__":
sys.exit(main()) | accuracy = svm.score(data_test, labelsTest)
if accuracy > acxmax:
acxmax = accuracy | random_line_split |
ml_ex_03.py | #!/usr/bin/python
import sys,os,csv
import pandas
import numpy as np
import math
from sklearn.model_selection import StratifiedKFold
from sklearn import svm as SVM
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
datFileName="secom.data"
labelsFileName="secom_labels.data"
dirPath=os.path.dirname(os.path.realpath(__file__))
classList=[]
data=[]
def load_data(fileName):
raw_data = open(fileName, 'rb')
rawData = pandas.read_csv(raw_data, delimiter=" ")
return rawData.values
def getData(rawData):
#print "\n---- Getting data from File ----"
lineNum = rawData.shape[0]
colNum = rawData.shape[1]
data = np.array(rawData[0:lineNum, 0:colNum-1])
for i in range(lineNum):
classList.append(rawData[i][colNum - 1])
return [data, np.array(classList) ]
def getLabels(fileName):
labelData = load_data(dirPath + "/" + fileName)
labels = labelData[:,0].clip(min=0)
return np.array(labels)
def svm_intern_folds(data_train, data_test, labelsTrain, labelsTest):
acxmax = 0
c_max=0
gamma_max=0
for c in [2**(-5), 1, 2**(5), 2**(10)]:
for gamm in [2**(-15), 2**(-10), 2**(-5), 1, 2**5]:
svm = SVM.SVC(C = c, gamma = gamm)
svm.fit(data_train, labelsTrain)
accuracy = svm.score(data_test, labelsTest)
if accuracy > acxmax:
acxmax = accuracy
c_max = c
gamma_max = gamm
return [acxmax, c_max, gamma_max]
def chooseComponentsNumber(matrix, percent):
print "\n---- PCA - Choose components number ----"
print "Variance :", percent
mat = np.matrix(matrix) * np.matrix(matrix).transpose()
U,S,V = np.linalg.svd(mat)
#print U.shape, S.shape, V.shape
s_sum_all = sum(S)
totalComponents = matrix.shape[1]
num = totalComponents
for i in range(totalComponents):
if sum(S[0:i]) / s_sum_all >= percent :
print "PCA dimension:",i ,"with variance =", sum(S[0:i]) / s_sum_all
num = i
break
return num
def applyPCA(data, numComponents):
pca = PCA(n_components=numComponents)
pcaData = pca.fit_transform(data)
return pcaData
def knn_intern_folds(data_train, data_test, labels_train, labels_test):
acxmax = 0
cores = 4
k_value = 0
for k in [1, 5, 11, 15, 21, 25]:
knn = KNeighborsClassifier(n_neighbors = k, n_jobs = cores)
knn.fit(data_train, labels_train)
accuracy = knn.score(data_test, labels_test)
if accuracy > acxmax:
acxmax = accuracy
k_value = k
return [acxmax, k]
def neural_intern_folds(data_train, data_test, labels_train, labels_test):
# 10, 20, 30 e 40 neuronios na camada escondida.
acxmax = 0
cores = 4
n_value = 0
for n in [10, 20, 30, 40]:
clf = MLPClassifier(hidden_layer_sizes=(n,), solver='lbfgs')
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
if accuracy > acxmax:
acxmax = accuracy
n_value = n
return [acxmax, n]
def rf_intern_folds(data_train, data_test, labels_train, labels_test):
# teste com mtry ou n_featrues = 10, 15, 20, 25 e ntrees = 100, 200, 300 e 400
acxmax = 0
n_feats = 0
n_trees = 0
for feat in [10, 15, 20, 25]:
for trees in [100, 200, 300, 400]:
clf = RandomForestClassifier (max_features = feat, n_estimators = trees)
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
#print "first acc:", accuracy
if accuracy > acxmax:
acxmax = accuracy
n_feats = feat
n_trees = trees
return [acxmax, n_feats, n_trees]
def gbm_intern_folds(data_train, data_test, labels_train, labels_test):
## numero de arvores = 30, 70, e 100, com learning rate de 0.1 e 0.05, e profundidade da arvore=5.
acxmax = 0
n_learn_rate = 0
n_trees = 0
depth_tree = 5
for trees in [30, 70, 100]:
for learn_rate in [0.1, 0.05]:
clf = GradientBoostingClassifier (n_estimators = trees, learning_rate = learn_rate, max_depth = depth_tree)
clf.fit(data_train, labels_train)
accuracy = clf.score(data_test, labels_test)
#print "first acc:", accuracy
if accuracy > acxmax:
acxmax = accuracy
n_trees = trees
n_learn_rate = learn_rate
return [acxmax, n_learn_rate, n_trees]
## Data preprocessing
def data_preprocess(fileName):
rawdata = load_data(dirPath + "/" + fileName)
## column mean
column_mean = np.nanmean(np.array(rawdata), axis=0)
## Nan values index
nan_indexes = np.where(np.isnan(rawdata))
## Replace Nan values
rawdata[nan_indexes] = np.take(column_mean, nan_indexes[1])
## Standarize each column individually
rawdata = (rawdata - np.mean(rawdata, axis=0)) / np.std(rawdata, axis=0)
rawdata = np.nan_to_num(rawdata)
return rawdata
def run_folds( alg, data, labels):
print "--- %s ---" % alg
final_accuracy = 0
params_final = [0.0, 0.0]
skf = StratifiedKFold(n_splits=5)
for train_index, test_index in skf.split(data, labels):
new_data_train = data[train_index]
new_data_test = data[test_index]
new_labels_train = labels[train_index]
new_labels_test = labels[test_index]
acx = 0
skf_intern = StratifiedKFold(n_splits=3)
for intern_train_index, intern_test_index in skf_intern.split(new_data_train, new_labels_train):
intern_data_train = new_data_train[intern_train_index]
intern_data_test = new_data_train[intern_test_index]
intern_labels_train = new_labels_train[intern_train_index]
intern_labels_test = new_labels_train[intern_test_index]
params = get_intern_folds (alg, intern_data_train, intern_data_test, intern_labels_train, intern_labels_test)
if params[0] > acx:
|
final_accuracy = final_accuracy + model_score(alg, params_final,
new_data_train,
new_labels_train,
new_data_test,
new_labels_test)
final_accuracy = final_accuracy / 5
print_results(alg, final_accuracy, params_final)
def model_score(alg, params, new_data_train, new_labels_train, new_data_test, new_labels_test):
if 'svm' == alg:
svm_model = SVM.SVC(C = params[0], gamma = params[1])
svm_model.fit(new_data_train, new_labels_train)
return svm_model.score(new_data_test, new_labels_test)
elif 'knn' == alg:
knn = KNeighborsClassifier(n_neighbors = params[0], n_jobs = 4)
knn.fit(new_data_train, new_labels_train)
return knn.score(new_data_test, new_labels_test)
elif 'neural' == alg:
clf = MLPClassifier(hidden_layer_sizes=(params[0],), solver='lbfgs')
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
elif 'rf' == alg:
clf = RandomForestClassifier (max_features = params[0], n_estimators = params[1])
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
elif 'gbm' == alg:
clf = GradientBoostingClassifier (learning_rate = params[0], n_estimators = params[1], max_depth = 5)
clf.fit(new_data_train, new_labels_train)
return clf.score(new_data_test, new_labels_test)
def get_intern_folds (alg, data_train, data_test, labels_train, labels_test):
if 'svm' == alg:
return svm_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'knn' == alg:
return knn_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'neural' == alg:
return neural_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'rf' == alg:
return rf_intern_folds(data_train, data_test, labels_train, labels_test)
elif 'gbm' == alg:
return gbm_intern_folds(data_train, data_test, labels_train, labels_test)
def print_results(alg, final_accuracy, params):
if 'svm' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final hiperparametros (C=%s, Gamma=%s)" % (params[0], params[1]) )
elif 'knn' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final K (K=%s)" % (params[0]))
elif 'neural' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Neurons=%s)" % (params[0]) )
elif 'rf' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Feats=%s, Trees=%s)" % (params[0], params[1]) )
elif 'gbm' == alg:
print("Acuracia:%s" % final_accuracy)
print("Valor final parametros (Learn Rate=%s, Trees=%s)" % (params[0], params[1]))
def PCA_for_knn(data):
variance = 80
numComponents = chooseComponentsNumber(data, float(variance) / 100)
if numComponents == -1 : print "Invalid components number. Exit"; return
return applyPCA(data, numComponents)
def main(argv=None):
if argv is None:
arv = sys.argv
## Data pre-processing
data = data_preprocess(datFileName)
labels = getLabels(labelsFileName)
labels = np.array(list(labels[:data.shape[0]]))
## kNN , PCA com 80% da variancia
pcaData = PCA_for_knn(data)
run_folds('knn', pcaData, labels)
## SVM RBF
run_folds('svm', data, labels)
## Neural network
run_folds('neural', data, labels)
## RF
run_folds('rf', data, labels)
## GBM
run_folds('gbm', data, labels)
if __name__ == "__main__":
sys.exit(main())
| acx = params[0]
params_final[0] = params[1]
if len(params) > 2:
params_final[1] = params[2] | conditional_block |
fork_resolver.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use enclave_sgx::WaitCertificate;
use engine::consensus_state::*;
use engine::consensus_state_store::ConsensusStateStore;
use poet2_util;
use sawtooth_sdk::consensus::engine::*;
use serde_json;
use service::Poet2Service;
pub fn resolve_fork(
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
block_id: BlockId,
mut claim_block_dur: u64,
) -> bool {
let block_ = service.get_block(&block_id);
let mut published = false;
let chain_head = service.get_chain_head();
if block_.is_ok() {
let block = block_.unwrap();
let prev_block_ = service.get_block(&block.previous_id);
info!(
"Choosing between chain heads -- current: {:?} -- new: {:?}",
chain_head, block
);
// Commiting or Resolving fork if one exists
// Advance the chain if possible.
let new_block_dur = get_cert_from(&block).wait_time;
if claim_block_dur == 0 {
claim_block_dur = new_block_dur;
}
// Current block points to current head
// Check if block already claimed. Go on to
// compare duration then. Accept one of them
// and update it to be new chain head
if block.block_num == (1 + chain_head.block_num) && block.previous_id == chain_head.block_id
{
debug!(
"New block duration {} Claim block duration {}",
new_block_dur, claim_block_dur
);
if new_block_dur <= claim_block_dur {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("New block extends current chain. Committing {:?}", block);
let agg_chain_clock = service.get_chain_clock() + new_block_dur;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
debug!(
"Storing cummulative cc = {} for blockId : {:?}",
agg_chain_clock,
block_id.clone()
);
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
} else {
info!("New block has larger duration. Failing {:?}", block);
service.fail_block(block_id);
}
}
// Check if the previous block is strictly in the
// cache. If so, look for common ancestor and resolve fork.
else if prev_block_.is_ok() {
let prev_block = prev_block_.unwrap();
if state_store.get(prev_block.block_id).is_err() {
let mut cache_block = block.clone();
let block_state;
let mut block_state_;
let cc_upto_head = service.get_chain_clock();
let mut fork_cc: u64 = new_block_dur;
let mut fork_len: u64 = 1;
let mut cc_upto_ancestor = 0_u64;
let mut ancestor_found: bool = false;
info!("Looping over chain to find common ancestor.");
loop {
let cache_block_ = service.get_block(&cache_block.previous_id);
// If block's previous not in cache or statestore,
// break from loop and send block to cache
if cache_block_.is_ok() {
cache_block = cache_block_.unwrap();
if cache_block.block_num == 0 {
debug!("Genesis reached while finding common ancestor.");
ancestor_found = true;
break;
}
// get cc from certificate in cache_block
let ancestor_cc = get_cert_from(&cache_block).wait_time;
// Assuming here that we have the consensus state
// for each block that has been committed into the chain.
// Parse blocks from cache & states from the statestore
// to find a common ancestor.
// Keep account of the chainclocks from cache.
// Once common ancestor is found, compare the
// chainclocks of the forks to choose a fork
block_state_ = state_store.get(cache_block.block_id.clone());
if block_state_.is_ok() {
// Found common ancestor
info!("Found a common ancestor at block {:?}", block.clone());
ancestor_found = true;
block_state = block_state_.unwrap();
cc_upto_ancestor = block_state.aggregate_chain_clock;
break;
}
fork_cc += ancestor_cc;
fork_len += 1;
} else {
info!("Not a valid fork.");
}
}
let mut fork_won = false;
let mut chain_cc: u64 = 0;
if ancestor_found {
info!("Found a common ancestor. Comparing length.");
debug!(
"Chain clocks upto head = {}, upto common ancestor = {}",
cc_upto_head, cc_upto_ancestor
);
chain_cc = cc_upto_head - cc_upto_ancestor;
let chain_len: u64 = chain_head.block_num - cache_block.block_num;
if chain_len > fork_len {
fork_won = false;
} else if chain_len < fork_len {
fork_won = true;
}
// Fork lengths are equal
else {
if chain_cc == fork_cc {
fork_won = if get_cert_from(&block).duration_id
< get_cert_from(&chain_head).duration_id
{
true
} else {
false
};
} else {
fork_won = if fork_cc < chain_cc { true } else { false };
}
}
}
if fork_won {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("Switching to fork.");
// fork_cc is inclusive of new block
let agg_chain_clock = cc_upto_ancestor + fork_cc;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
debug!(
"Aggregate chain clock upto common ancestor = {}
Fork chain clock = {}. After switch aggregate = {}",
cc_upto_ancestor, fork_cc, agg_chain_clock
);
debug!("Storing cummulative cc = {}", agg_chain_clock);
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id); | // Delete states for all blocks not in chain
let chain_len_to_delete = chain_head.block_num - cache_block.block_num;
delete_states_upto(
cache_block.block_id,
chain_head.clone().block_id,
chain_len_to_delete,
service,
state_store,
);
} else {
info!("Not switching to fork");
service.ignore_block(block.block_id.clone());
}
}
}
}
published
// Fork Resolution done
}
fn delete_states_upto(
ancestor: BlockId,
head: BlockId,
delete_len: u64,
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
) -> () {
let mut next = head;
let mut count = 0_u64;
loop {
if ancestor == next || count >= delete_len {
break;
}
count += 1;
let state_ = state_store.get(next.clone());
if state_.is_err() {
debug!("State not found. Getting block via service.");
let block_ = service.get_block(&next);
if block_.is_ok() {
let block = block_.unwrap();
next = block.previous_id;
continue;
}
break;
} else {
debug!("Deleting state for {:?}", next.clone());
state_store.delete(next.clone());
next = BlockId::from(
state_
.unwrap()
.estimate_info
.previous_block_id
.as_bytes()
.to_vec(),
);
}
}
}
fn get_cert_from(block: &Block) -> WaitCertificate {
let payload = block.payload.clone();
debug!("Extracted payload from block: {:?}", payload.clone());
let (wait_certificate, _) = poet2_util::payload_to_wc_and_sig(&payload);
debug!("Serialized wait_cert : {:?}", &wait_certificate);
serde_json::from_str(&wait_certificate).unwrap()
} | // Mark all blocks upto common ancestor
// in the chain as invalid. | random_line_split |
fork_resolver.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use enclave_sgx::WaitCertificate;
use engine::consensus_state::*;
use engine::consensus_state_store::ConsensusStateStore;
use poet2_util;
use sawtooth_sdk::consensus::engine::*;
use serde_json;
use service::Poet2Service;
pub fn resolve_fork(
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
block_id: BlockId,
mut claim_block_dur: u64,
) -> bool {
let block_ = service.get_block(&block_id);
let mut published = false;
let chain_head = service.get_chain_head();
if block_.is_ok() {
let block = block_.unwrap();
let prev_block_ = service.get_block(&block.previous_id);
info!(
"Choosing between chain heads -- current: {:?} -- new: {:?}",
chain_head, block
);
// Commiting or Resolving fork if one exists
// Advance the chain if possible.
let new_block_dur = get_cert_from(&block).wait_time;
if claim_block_dur == 0 {
claim_block_dur = new_block_dur;
}
// Current block points to current head
// Check if block already claimed. Go on to
// compare duration then. Accept one of them
// and update it to be new chain head
if block.block_num == (1 + chain_head.block_num) && block.previous_id == chain_head.block_id
{
debug!(
"New block duration {} Claim block duration {}",
new_block_dur, claim_block_dur
);
if new_block_dur <= claim_block_dur {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("New block extends current chain. Committing {:?}", block);
let agg_chain_clock = service.get_chain_clock() + new_block_dur;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
debug!(
"Storing cummulative cc = {} for blockId : {:?}",
agg_chain_clock,
block_id.clone()
);
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
} else {
info!("New block has larger duration. Failing {:?}", block);
service.fail_block(block_id);
}
}
// Check if the previous block is strictly in the
// cache. If so, look for common ancestor and resolve fork.
else if prev_block_.is_ok() {
let prev_block = prev_block_.unwrap();
if state_store.get(prev_block.block_id).is_err() {
let mut cache_block = block.clone();
let block_state;
let mut block_state_;
let cc_upto_head = service.get_chain_clock();
let mut fork_cc: u64 = new_block_dur;
let mut fork_len: u64 = 1;
let mut cc_upto_ancestor = 0_u64;
let mut ancestor_found: bool = false;
info!("Looping over chain to find common ancestor.");
loop {
let cache_block_ = service.get_block(&cache_block.previous_id);
// If block's previous not in cache or statestore,
// break from loop and send block to cache
if cache_block_.is_ok() {
cache_block = cache_block_.unwrap();
if cache_block.block_num == 0 {
debug!("Genesis reached while finding common ancestor.");
ancestor_found = true;
break;
}
// get cc from certificate in cache_block
let ancestor_cc = get_cert_from(&cache_block).wait_time;
// Assuming here that we have the consensus state
// for each block that has been committed into the chain.
// Parse blocks from cache & states from the statestore
// to find a common ancestor.
// Keep account of the chainclocks from cache.
// Once common ancestor is found, compare the
// chainclocks of the forks to choose a fork
block_state_ = state_store.get(cache_block.block_id.clone());
if block_state_.is_ok() {
// Found common ancestor
info!("Found a common ancestor at block {:?}", block.clone());
ancestor_found = true;
block_state = block_state_.unwrap();
cc_upto_ancestor = block_state.aggregate_chain_clock;
break;
}
fork_cc += ancestor_cc;
fork_len += 1;
} else {
info!("Not a valid fork.");
}
}
let mut fork_won = false;
let mut chain_cc: u64 = 0;
if ancestor_found {
info!("Found a common ancestor. Comparing length.");
debug!(
"Chain clocks upto head = {}, upto common ancestor = {}",
cc_upto_head, cc_upto_ancestor
);
chain_cc = cc_upto_head - cc_upto_ancestor;
let chain_len: u64 = chain_head.block_num - cache_block.block_num;
if chain_len > fork_len {
fork_won = false;
} else if chain_len < fork_len {
fork_won = true;
}
// Fork lengths are equal
else {
if chain_cc == fork_cc | else {
fork_won = if fork_cc < chain_cc { true } else { false };
}
}
}
if fork_won {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("Switching to fork.");
// fork_cc is inclusive of new block
let agg_chain_clock = cc_upto_ancestor + fork_cc;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
debug!(
"Aggregate chain clock upto common ancestor = {}
Fork chain clock = {}. After switch aggregate = {}",
cc_upto_ancestor, fork_cc, agg_chain_clock
);
debug!("Storing cummulative cc = {}", agg_chain_clock);
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
// Mark all blocks upto common ancestor
// in the chain as invalid.
// Delete states for all blocks not in chain
let chain_len_to_delete = chain_head.block_num - cache_block.block_num;
delete_states_upto(
cache_block.block_id,
chain_head.clone().block_id,
chain_len_to_delete,
service,
state_store,
);
} else {
info!("Not switching to fork");
service.ignore_block(block.block_id.clone());
}
}
}
}
published
// Fork Resolution done
}
fn delete_states_upto(
ancestor: BlockId,
head: BlockId,
delete_len: u64,
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
) -> () {
let mut next = head;
let mut count = 0_u64;
loop {
if ancestor == next || count >= delete_len {
break;
}
count += 1;
let state_ = state_store.get(next.clone());
if state_.is_err() {
debug!("State not found. Getting block via service.");
let block_ = service.get_block(&next);
if block_.is_ok() {
let block = block_.unwrap();
next = block.previous_id;
continue;
}
break;
} else {
debug!("Deleting state for {:?}", next.clone());
state_store.delete(next.clone());
next = BlockId::from(
state_
.unwrap()
.estimate_info
.previous_block_id
.as_bytes()
.to_vec(),
);
}
}
}
fn get_cert_from(block: &Block) -> WaitCertificate {
let payload = block.payload.clone();
debug!("Extracted payload from block: {:?}", payload.clone());
let (wait_certificate, _) = poet2_util::payload_to_wc_and_sig(&payload);
debug!("Serialized wait_cert : {:?}", &wait_certificate);
serde_json::from_str(&wait_certificate).unwrap()
}
| {
fork_won = if get_cert_from(&block).duration_id
< get_cert_from(&chain_head).duration_id
{
true
} else {
false
};
} | conditional_block |
fork_resolver.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use enclave_sgx::WaitCertificate;
use engine::consensus_state::*;
use engine::consensus_state_store::ConsensusStateStore;
use poet2_util;
use sawtooth_sdk::consensus::engine::*;
use serde_json;
use service::Poet2Service;
pub fn resolve_fork(
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
block_id: BlockId,
mut claim_block_dur: u64,
) -> bool {
let block_ = service.get_block(&block_id);
let mut published = false;
let chain_head = service.get_chain_head();
if block_.is_ok() {
let block = block_.unwrap();
let prev_block_ = service.get_block(&block.previous_id);
info!(
"Choosing between chain heads -- current: {:?} -- new: {:?}",
chain_head, block
);
// Commiting or Resolving fork if one exists
// Advance the chain if possible.
let new_block_dur = get_cert_from(&block).wait_time;
if claim_block_dur == 0 {
claim_block_dur = new_block_dur;
}
// Current block points to current head
// Check if block already claimed. Go on to
// compare duration then. Accept one of them
// and update it to be new chain head
if block.block_num == (1 + chain_head.block_num) && block.previous_id == chain_head.block_id
{
debug!(
"New block duration {} Claim block duration {}",
new_block_dur, claim_block_dur
);
if new_block_dur <= claim_block_dur {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("New block extends current chain. Committing {:?}", block);
let agg_chain_clock = service.get_chain_clock() + new_block_dur;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
debug!(
"Storing cummulative cc = {} for blockId : {:?}",
agg_chain_clock,
block_id.clone()
);
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
} else {
info!("New block has larger duration. Failing {:?}", block);
service.fail_block(block_id);
}
}
// Check if the previous block is strictly in the
// cache. If so, look for common ancestor and resolve fork.
else if prev_block_.is_ok() {
let prev_block = prev_block_.unwrap();
if state_store.get(prev_block.block_id).is_err() {
let mut cache_block = block.clone();
let block_state;
let mut block_state_;
let cc_upto_head = service.get_chain_clock();
let mut fork_cc: u64 = new_block_dur;
let mut fork_len: u64 = 1;
let mut cc_upto_ancestor = 0_u64;
let mut ancestor_found: bool = false;
info!("Looping over chain to find common ancestor.");
loop {
let cache_block_ = service.get_block(&cache_block.previous_id);
// If block's previous not in cache or statestore,
// break from loop and send block to cache
if cache_block_.is_ok() {
cache_block = cache_block_.unwrap();
if cache_block.block_num == 0 {
debug!("Genesis reached while finding common ancestor.");
ancestor_found = true;
break;
}
// get cc from certificate in cache_block
let ancestor_cc = get_cert_from(&cache_block).wait_time;
// Assuming here that we have the consensus state
// for each block that has been committed into the chain.
// Parse blocks from cache & states from the statestore
// to find a common ancestor.
// Keep account of the chainclocks from cache.
// Once common ancestor is found, compare the
// chainclocks of the forks to choose a fork
block_state_ = state_store.get(cache_block.block_id.clone());
if block_state_.is_ok() {
// Found common ancestor
info!("Found a common ancestor at block {:?}", block.clone());
ancestor_found = true;
block_state = block_state_.unwrap();
cc_upto_ancestor = block_state.aggregate_chain_clock;
break;
}
fork_cc += ancestor_cc;
fork_len += 1;
} else {
info!("Not a valid fork.");
}
}
let mut fork_won = false;
let mut chain_cc: u64 = 0;
if ancestor_found {
info!("Found a common ancestor. Comparing length.");
debug!(
"Chain clocks upto head = {}, upto common ancestor = {}",
cc_upto_head, cc_upto_ancestor
);
chain_cc = cc_upto_head - cc_upto_ancestor;
let chain_len: u64 = chain_head.block_num - cache_block.block_num;
if chain_len > fork_len {
fork_won = false;
} else if chain_len < fork_len {
fork_won = true;
}
// Fork lengths are equal
else {
if chain_cc == fork_cc {
fork_won = if get_cert_from(&block).duration_id
< get_cert_from(&chain_head).duration_id
{
true
} else {
false
};
} else {
fork_won = if fork_cc < chain_cc { true } else { false };
}
}
}
if fork_won {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("Switching to fork.");
// fork_cc is inclusive of new block
let agg_chain_clock = cc_upto_ancestor + fork_cc;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
debug!(
"Aggregate chain clock upto common ancestor = {}
Fork chain clock = {}. After switch aggregate = {}",
cc_upto_ancestor, fork_cc, agg_chain_clock
);
debug!("Storing cummulative cc = {}", agg_chain_clock);
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
// Mark all blocks upto common ancestor
// in the chain as invalid.
// Delete states for all blocks not in chain
let chain_len_to_delete = chain_head.block_num - cache_block.block_num;
delete_states_upto(
cache_block.block_id,
chain_head.clone().block_id,
chain_len_to_delete,
service,
state_store,
);
} else {
info!("Not switching to fork");
service.ignore_block(block.block_id.clone());
}
}
}
}
published
// Fork Resolution done
}
fn | (
ancestor: BlockId,
head: BlockId,
delete_len: u64,
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
) -> () {
let mut next = head;
let mut count = 0_u64;
loop {
if ancestor == next || count >= delete_len {
break;
}
count += 1;
let state_ = state_store.get(next.clone());
if state_.is_err() {
debug!("State not found. Getting block via service.");
let block_ = service.get_block(&next);
if block_.is_ok() {
let block = block_.unwrap();
next = block.previous_id;
continue;
}
break;
} else {
debug!("Deleting state for {:?}", next.clone());
state_store.delete(next.clone());
next = BlockId::from(
state_
.unwrap()
.estimate_info
.previous_block_id
.as_bytes()
.to_vec(),
);
}
}
}
fn get_cert_from(block: &Block) -> WaitCertificate {
let payload = block.payload.clone();
debug!("Extracted payload from block: {:?}", payload.clone());
let (wait_certificate, _) = poet2_util::payload_to_wc_and_sig(&payload);
debug!("Serialized wait_cert : {:?}", &wait_certificate);
serde_json::from_str(&wait_certificate).unwrap()
}
| delete_states_upto | identifier_name |
fork_resolver.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use enclave_sgx::WaitCertificate;
use engine::consensus_state::*;
use engine::consensus_state_store::ConsensusStateStore;
use poet2_util;
use sawtooth_sdk::consensus::engine::*;
use serde_json;
use service::Poet2Service;
pub fn resolve_fork(
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
block_id: BlockId,
mut claim_block_dur: u64,
) -> bool {
let block_ = service.get_block(&block_id);
let mut published = false;
let chain_head = service.get_chain_head();
if block_.is_ok() {
let block = block_.unwrap();
let prev_block_ = service.get_block(&block.previous_id);
info!(
"Choosing between chain heads -- current: {:?} -- new: {:?}",
chain_head, block
);
// Commiting or Resolving fork if one exists
// Advance the chain if possible.
let new_block_dur = get_cert_from(&block).wait_time;
if claim_block_dur == 0 {
claim_block_dur = new_block_dur;
}
// Current block points to current head
// Check if block already claimed. Go on to
// compare duration then. Accept one of them
// and update it to be new chain head
if block.block_num == (1 + chain_head.block_num) && block.previous_id == chain_head.block_id
{
debug!(
"New block duration {} Claim block duration {}",
new_block_dur, claim_block_dur
);
if new_block_dur <= claim_block_dur {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("New block extends current chain. Committing {:?}", block);
let agg_chain_clock = service.get_chain_clock() + new_block_dur;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
debug!(
"Storing cummulative cc = {} for blockId : {:?}",
agg_chain_clock,
block_id.clone()
);
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
} else {
info!("New block has larger duration. Failing {:?}", block);
service.fail_block(block_id);
}
}
// Check if the previous block is strictly in the
// cache. If so, look for common ancestor and resolve fork.
else if prev_block_.is_ok() {
let prev_block = prev_block_.unwrap();
if state_store.get(prev_block.block_id).is_err() {
let mut cache_block = block.clone();
let block_state;
let mut block_state_;
let cc_upto_head = service.get_chain_clock();
let mut fork_cc: u64 = new_block_dur;
let mut fork_len: u64 = 1;
let mut cc_upto_ancestor = 0_u64;
let mut ancestor_found: bool = false;
info!("Looping over chain to find common ancestor.");
loop {
let cache_block_ = service.get_block(&cache_block.previous_id);
// If block's previous not in cache or statestore,
// break from loop and send block to cache
if cache_block_.is_ok() {
cache_block = cache_block_.unwrap();
if cache_block.block_num == 0 {
debug!("Genesis reached while finding common ancestor.");
ancestor_found = true;
break;
}
// get cc from certificate in cache_block
let ancestor_cc = get_cert_from(&cache_block).wait_time;
// Assuming here that we have the consensus state
// for each block that has been committed into the chain.
// Parse blocks from cache & states from the statestore
// to find a common ancestor.
// Keep account of the chainclocks from cache.
// Once common ancestor is found, compare the
// chainclocks of the forks to choose a fork
block_state_ = state_store.get(cache_block.block_id.clone());
if block_state_.is_ok() {
// Found common ancestor
info!("Found a common ancestor at block {:?}", block.clone());
ancestor_found = true;
block_state = block_state_.unwrap();
cc_upto_ancestor = block_state.aggregate_chain_clock;
break;
}
fork_cc += ancestor_cc;
fork_len += 1;
} else {
info!("Not a valid fork.");
}
}
let mut fork_won = false;
let mut chain_cc: u64 = 0;
if ancestor_found {
info!("Found a common ancestor. Comparing length.");
debug!(
"Chain clocks upto head = {}, upto common ancestor = {}",
cc_upto_head, cc_upto_ancestor
);
chain_cc = cc_upto_head - cc_upto_ancestor;
let chain_len: u64 = chain_head.block_num - cache_block.block_num;
if chain_len > fork_len {
fork_won = false;
} else if chain_len < fork_len {
fork_won = true;
}
// Fork lengths are equal
else {
if chain_cc == fork_cc {
fork_won = if get_cert_from(&block).duration_id
< get_cert_from(&chain_head).duration_id
{
true
} else {
false
};
} else {
fork_won = if fork_cc < chain_cc { true } else { false };
}
}
}
if fork_won {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("Switching to fork.");
// fork_cc is inclusive of new block
let agg_chain_clock = cc_upto_ancestor + fork_cc;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
debug!(
"Aggregate chain clock upto common ancestor = {}
Fork chain clock = {}. After switch aggregate = {}",
cc_upto_ancestor, fork_cc, agg_chain_clock
);
debug!("Storing cummulative cc = {}", agg_chain_clock);
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
// Mark all blocks upto common ancestor
// in the chain as invalid.
// Delete states for all blocks not in chain
let chain_len_to_delete = chain_head.block_num - cache_block.block_num;
delete_states_upto(
cache_block.block_id,
chain_head.clone().block_id,
chain_len_to_delete,
service,
state_store,
);
} else {
info!("Not switching to fork");
service.ignore_block(block.block_id.clone());
}
}
}
}
published
// Fork Resolution done
}
fn delete_states_upto(
ancestor: BlockId,
head: BlockId,
delete_len: u64,
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
) -> () |
fn get_cert_from(block: &Block) -> WaitCertificate {
let payload = block.payload.clone();
debug!("Extracted payload from block: {:?}", payload.clone());
let (wait_certificate, _) = poet2_util::payload_to_wc_and_sig(&payload);
debug!("Serialized wait_cert : {:?}", &wait_certificate);
serde_json::from_str(&wait_certificate).unwrap()
}
| {
let mut next = head;
let mut count = 0_u64;
loop {
if ancestor == next || count >= delete_len {
break;
}
count += 1;
let state_ = state_store.get(next.clone());
if state_.is_err() {
debug!("State not found. Getting block via service.");
let block_ = service.get_block(&next);
if block_.is_ok() {
let block = block_.unwrap();
next = block.previous_id;
continue;
}
break;
} else {
debug!("Deleting state for {:?}", next.clone());
state_store.delete(next.clone());
next = BlockId::from(
state_
.unwrap()
.estimate_info
.previous_block_id
.as_bytes()
.to_vec(),
);
}
}
} | identifier_body |
roll.go | // A dice parser and roller library.
package main
import (
"errors"
"fmt"
"math/rand"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"github.com/bwmarrin/discordgo"
"github.com/rs/zerolog/log"
)
/******************
COMMAND HANDLER
******************/
func RollHandler(s *discordgo.Session, m *discordgo.MessageCreate, args []string) {
// If they used !roll, remove that from the args list. Otherwise they used ![expr]
if args[0] == "roll" {
args = args[1:]
}
// Convert the input string into a token stream
tokens, err := tokenizeExpr(strings.Join(args, ""))
if err != nil {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Error: %s", err.Error()))
return
}
// Convert the token stream into a a syntax tree
parser := NewDiceParser(tokens)
if err != nil {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Error: %s", err.Error()))
}
// Assemble the AST
expr := parser.Expr()
if len(parser.errors) != 0 {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Errs: %v\n", parser.errors))
}
// Walk and Resolve the AST
result, work := expr.Eval()
// Send a nice stylish message.
embed := &discordgo.MessageEmbed{
Author: &discordgo.MessageEmbedAuthor{},
Color: 0x00ff00, // Green
Description: strings.Join(args, ""),
Fields: []*discordgo.MessageEmbedField{
{
Name: "Rolls",
Value: work,
Inline: false,
},
{
Name: "Result",
Value: strconv.Itoa(result),
Inline: false,
},
},
Timestamp: time.Now().Format(time.RFC3339), // Discord wants ISO8601; RFC3339 is an extension of ISO8601 and should be completely compatible.
Title: m.Author.Username + "#" + m.Author.Discriminator + " Rolled " + strconv.Itoa(result),
}
s.ChannelMessageSendEmbed(m.ChannelID, embed)
}
/******************
LEXER
******************/
func tokenizeExpr(raw string) ([]Token, error) {
var tokens []Token
var sb strings.Builder
for _, char := range raw {
// Consume until a transition token is reached
switch char {
case '\t', '\n', '\v', '\f', '\r', ' ', '\x85', '\xA0':
continue // Ignore whitespace.
case '+', '-', '*', '/', '(', ')':
// The previous token is over.
// Parse it before working on the current one.
if sb.Len() != 0 {
t, err := LexToken(sb.String())
if err != nil {
return nil, err
}
tokens = append(tokens, t)
}
// Now narrow down the token type to one of the three.
var typ TokenType
switch char {
case '(', ')':
typ = Group
case '*', '/':
typ = Factor
case '+', '-':
typ = Term
default:
panic("Unreachable!")
}
// Append the operator token to the queue.
tokens = append(tokens, Token{typ, string(char)})
// Clear the token buffer for the next batch.
sb.Reset()
continue
default:
// This is a non-transition token.
sb.WriteRune(char)
}
}
// Parse any remaining characters in the buffer
// that may have not been terminated by an operator.
if sb.Len() != 0 {
t, err := LexToken(sb.String())
if err != nil {
return nil, err
}
tokens = append(tokens, t)
}
return tokens, nil
}
type Token struct {
Type TokenType
Value string
}
type TokenType int
const (
Const TokenType = 0 // Number
Die TokenType = 1 // NdX i.e. 3d6
Term TokenType = 2 // +-
Factor TokenType = 3 // */
Group TokenType = 4 // ()
)
// Precompiled regular expression for matching on die expressions.
var dieExpr *regexp.Regexp = regexp.MustCompile(`^\d*d\d+$`)
// Parses either a die or value expression from a string.
// Returns an Error if the token is not valid.
func LexToken(token string) (Token, error) {
// Check for a Const Value Expr
if isInt(token) {
return Token{Type: Const, Value: token}, nil
}
// Check for a Die Value Expr.
if dieExpr.MatchString(token) {
if strings.HasPrefix(token, "d") {
// If the left hand of the expression is empty,
// that means it's an implied 1.
token = "1" + token
}
return Token{Type: Die, Value: token}, nil
}
return Token{}, errors.New(fmt.Sprintf("\"%s\" was not recognized as a valid number or dice expression.", token))
}
// Helper function for ParseToken. Checks if a string is only numbers.
func isInt(s string) bool {
for _, c := range s {
if !unicode.IsDigit(c) {
return false
}
}
return true
}
/******************
PARSER & AST
******************/
// A parser that converts a dice expression token stream to
// an AST and evaluates it according to the following grammar:
/*
Expr => Term
Term => Factor ([ '+' | '-' ]) Factor)*
Factor => Primary ([ '*' | '/' ] Primary)*
Primary => '(' Expr ')' | DIE | NUMBER
*/
type DiceParser struct {
tokens []Token
current int
errors []error
}
func NewDiceParser(tokens []Token) DiceParser {
return DiceParser{tokens, 0, make([]error, 0)}
}
// Satisfies the rule `Expr => Term`.
func (p *DiceParser) Expr() AstExpr {
return p.Term()
}
// Satisfies the rule for `Term => Factor ([ '+' | '-' ]) Factor)*`
func (p *DiceParser) Term() AstExpr {
var expr AstExpr = p.Factor() // Left
for p.check(Term) {
t := p.consume()
operator := t // A Token
right := p.Factor() // An AstExpr
expr = AstOp{expr, right, operator}
}
return expr
}
// Satisfies the rule for `Factor => Primary ([ '*' | '/' ] Primary)*`
func (p *DiceParser) Factor() AstExpr |
// Satisfies the rule for `Primary => '(' Expr ')' | DIE | NUMBER`
func (p *DiceParser) Primary() AstExpr {
//log.Error().Str("Val", fmt.Sprintf("%v", p.peek())).Bool("Eq?", p.peek().Type == Const).Msg("Fuck")
// If the current token is a Constant value..
if p.check(Const) {
t := p.consume()
// This should never fail because the tokenizer verifies that
// this kind of token is purely numeric.
value, err := strconv.Atoi(t.Value)
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("Found a NUMBER token that was not purely numeric: '%s'", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
return AstConst(value)
}
if p.check(Die) {
t := p.consume()
splitDie := strings.Split(t.Value, "d")
// A valid die expression is one with 2 parts, and the second part must be present and numeric.
if (len(splitDie) != 2) || (!isInt(splitDie[1])) {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" was not recognized as a valid number or dice expression.", t.Value)))
return nil
}
// An empty first string indicates that the die is of the format `dXX`
// in which case there is an implied preceding 1.
if splitDie[0] == "" {
splitDie[0] = "1"
}
// This should never fail because the tokenizer verifies that
// this kind of token is purely numeric.
left, err := strconv.Atoi(splitDie[0])
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" NUMBER in dice expression was not purely numeric.", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
right, err := strconv.Atoi(splitDie[1])
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" NUMBER in dice expression was not purely numeric.", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
return AstDie{left, right}
}
if p.check(Group) && p.peek().Value == "(" {
p.consume()
// In the case of a group, recurse back to the lowest priority and build a new subtree.
expr := p.Expr()
// Expect a closing paren.
if p.check(Group) && p.peek().Value == ")" {
p.consume()
return expr
} else {
// Error, unmatched Paren.
p.errors = append(p.errors, errors.New("Unmatched parenthesis."))
return nil
}
}
panic("Unreachable!")
}
// Consumes the current token if it matches the given type,
// advancing the cursor and returning it. Otherwise does nothing.
func (p *DiceParser) consume() Token {
if !p.isAtEnd() {
// Advance the cursor and return whatever was before it.
p.current += 1
return p.tokens[p.current-1]
}
// If we are at the end, then there's only one token left to consume.
return p.tokens[p.current]
}
// Returns whether the current token is of the
// given type. Does not consume.
func (p DiceParser) check(t TokenType) bool {
return p.peek().Type == t
}
// Get the current token without advancing nor consuming it.
func (p DiceParser) peek() Token {
return p.tokens[p.current]
}
// Returns whether the `current` field is equal to
// the length of the token buf - 1
func (p DiceParser) isAtEnd() bool {
return p.current == (len(p.tokens) - 1)
}
// An AST Expression is any object which can resolve itself
// to a final sum and a set of rolls (if any)
type AstExpr interface {
// Eval returns a result and a "steps string"
Eval() (int, string)
}
// A constant value's evaulation is just itself.
type AstConst int
func (c AstConst) Eval() (int, string) {
return int(c), strconv.Itoa(int(c))
}
// A die's value is rolled, 1-[right] rolled [left] times, then summed.
type AstDie struct {
left int
right int
}
func (t AstDie) Eval() (int, string) {
var sb strings.Builder
sb.WriteRune('[')
rand.Seed(time.Now().UnixNano())
rolls := make([]int, t.left)
for i := range rolls {
//out[i] = rand.Intn(max-min+1) + min
rolls[i] = rand.Intn(int(t.right)) + 1
sb.WriteString(strconv.Itoa(rolls[i]))
if i != (len(rolls) - 1) {
sb.WriteString(", ")
}
}
sb.WriteRune(']')
// Sum values
sum := 0
for _, v := range rolls {
sum += v
}
return sum, sb.String()
}
type AstOp struct {
Left AstExpr
Right AstExpr
Op Token
}
func (t AstOp) Eval() (int, string) {
left, lwork := t.Left.Eval()
right, rwork := t.Right.Eval()
var sum int = 0
var sb strings.Builder
sb.WriteString(lwork)
sb.WriteRune(' ')
sb.WriteString(t.Op.Value)
sb.WriteRune(' ')
sb.WriteString(rwork)
// If the lexer did its job, it should only be these discrete values.
switch t.Op.Value {
case "+":
sum = left + right
case "-":
sum = left - right
case "*":
sum = left * right
case "/":
if right == 0 {
return 0, "ERROR: DIVIDE BY ZERO"
} else {
sum = left / right
}
default:
panic("Unreachable! The Lexer failed to validate an Op Token!")
}
return sum, sb.String()
}
| {
expr := p.Primary()
for p.check(Factor) {
t := p.consume()
operator := t // A Token
right := p.Primary() // An AstExpr
expr = AstOp{expr, right, operator}
}
return expr
} | identifier_body |
roll.go | // A dice parser and roller library.
package main
import (
"errors"
"fmt"
"math/rand"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"github.com/bwmarrin/discordgo"
"github.com/rs/zerolog/log"
)
/******************
COMMAND HANDLER
******************/
func RollHandler(s *discordgo.Session, m *discordgo.MessageCreate, args []string) {
// If they used !roll, remove that from the args list. Otherwise they used ![expr]
if args[0] == "roll" {
args = args[1:]
}
// Convert the input string into a token stream
tokens, err := tokenizeExpr(strings.Join(args, ""))
if err != nil {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Error: %s", err.Error()))
return
}
// Convert the token stream into a a syntax tree
parser := NewDiceParser(tokens)
if err != nil {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Error: %s", err.Error()))
}
// Assemble the AST
expr := parser.Expr()
if len(parser.errors) != 0 {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Errs: %v\n", parser.errors))
}
// Walk and Resolve the AST
result, work := expr.Eval()
// Send a nice stylish message.
embed := &discordgo.MessageEmbed{
Author: &discordgo.MessageEmbedAuthor{},
Color: 0x00ff00, // Green
Description: strings.Join(args, ""),
Fields: []*discordgo.MessageEmbedField{
{
Name: "Rolls",
Value: work,
Inline: false,
},
{
Name: "Result",
Value: strconv.Itoa(result),
Inline: false,
},
},
Timestamp: time.Now().Format(time.RFC3339), // Discord wants ISO8601; RFC3339 is an extension of ISO8601 and should be completely compatible.
Title: m.Author.Username + "#" + m.Author.Discriminator + " Rolled " + strconv.Itoa(result),
}
s.ChannelMessageSendEmbed(m.ChannelID, embed)
}
/******************
LEXER
******************/
func tokenizeExpr(raw string) ([]Token, error) {
var tokens []Token
var sb strings.Builder
for _, char := range raw {
// Consume until a transition token is reached
switch char {
case '\t', '\n', '\v', '\f', '\r', ' ', '\x85', '\xA0':
continue // Ignore whitespace.
case '+', '-', '*', '/', '(', ')':
// The previous token is over.
// Parse it before working on the current one.
if sb.Len() != 0 {
t, err := LexToken(sb.String())
if err != nil {
return nil, err
}
tokens = append(tokens, t)
}
// Now narrow down the token type to one of the three.
var typ TokenType
switch char {
case '(', ')':
typ = Group
case '*', '/':
typ = Factor
case '+', '-':
typ = Term
default:
panic("Unreachable!")
}
// Append the operator token to the queue.
tokens = append(tokens, Token{typ, string(char)})
// Clear the token buffer for the next batch.
sb.Reset()
continue
default:
// This is a non-transition token.
sb.WriteRune(char)
}
}
// Parse any remaining characters in the buffer
// that may have not been terminated by an operator.
if sb.Len() != 0 {
t, err := LexToken(sb.String())
if err != nil {
return nil, err
}
tokens = append(tokens, t)
}
return tokens, nil
}
type Token struct {
Type TokenType
Value string
}
type TokenType int
const (
Const TokenType = 0 // Number
Die TokenType = 1 // NdX i.e. 3d6
Term TokenType = 2 // +-
Factor TokenType = 3 // */
Group TokenType = 4 // ()
)
// Precompiled regular expression for matching on die expressions.
var dieExpr *regexp.Regexp = regexp.MustCompile(`^\d*d\d+$`)
// Parses either a die or value expression from a string.
// Returns an Error if the token is not valid.
func LexToken(token string) (Token, error) {
// Check for a Const Value Expr
if isInt(token) {
return Token{Type: Const, Value: token}, nil
}
// Check for a Die Value Expr.
if dieExpr.MatchString(token) {
if strings.HasPrefix(token, "d") {
// If the left hand of the expression is empty,
// that means it's an implied 1.
token = "1" + token
}
return Token{Type: Die, Value: token}, nil
}
return Token{}, errors.New(fmt.Sprintf("\"%s\" was not recognized as a valid number or dice expression.", token))
}
// Helper function for ParseToken. Checks if a string is only numbers.
func isInt(s string) bool {
for _, c := range s {
if !unicode.IsDigit(c) {
return false
}
}
return true
}
/******************
PARSER & AST
******************/
// A parser that converts a dice expression token stream to
// an AST and evaluates it according to the following grammar:
/*
Expr => Term
Term => Factor ([ '+' | '-' ]) Factor)*
Factor => Primary ([ '*' | '/' ] Primary)*
Primary => '(' Expr ')' | DIE | NUMBER
*/
type DiceParser struct {
tokens []Token
current int
errors []error
}
func NewDiceParser(tokens []Token) DiceParser {
return DiceParser{tokens, 0, make([]error, 0)}
}
// Satisfies the rule `Expr => Term`.
func (p *DiceParser) Expr() AstExpr {
return p.Term()
}
// Satisfies the rule for `Term => Factor ([ '+' | '-' ]) Factor)*`
func (p *DiceParser) Term() AstExpr {
var expr AstExpr = p.Factor() // Left
for p.check(Term) {
t := p.consume()
operator := t // A Token
right := p.Factor() // An AstExpr
expr = AstOp{expr, right, operator}
}
return expr
}
// Satisfies the rule for `Factor => Primary ([ '*' | '/' ] Primary)*`
func (p *DiceParser) Factor() AstExpr {
expr := p.Primary()
for p.check(Factor) {
t := p.consume()
operator := t // A Token
right := p.Primary() // An AstExpr
expr = AstOp{expr, right, operator}
}
return expr
}
// Satisfies the rule for `Primary => '(' Expr ')' | DIE | NUMBER`
func (p *DiceParser) Primary() AstExpr {
//log.Error().Str("Val", fmt.Sprintf("%v", p.peek())).Bool("Eq?", p.peek().Type == Const).Msg("Fuck")
// If the current token is a Constant value..
if p.check(Const) {
t := p.consume()
// This should never fail because the tokenizer verifies that
// this kind of token is purely numeric.
value, err := strconv.Atoi(t.Value)
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("Found a NUMBER token that was not purely numeric: '%s'", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
return AstConst(value)
}
if p.check(Die) {
t := p.consume()
splitDie := strings.Split(t.Value, "d")
// A valid die expression is one with 2 parts, and the second part must be present and numeric.
if (len(splitDie) != 2) || (!isInt(splitDie[1])) {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" was not recognized as a valid number or dice expression.", t.Value)))
return nil
}
// An empty first string indicates that the die is of the format `dXX`
// in which case there is an implied preceding 1.
if splitDie[0] == "" {
splitDie[0] = "1"
}
// This should never fail because the tokenizer verifies that
// this kind of token is purely numeric.
left, err := strconv.Atoi(splitDie[0])
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" NUMBER in dice expression was not purely numeric.", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
right, err := strconv.Atoi(splitDie[1])
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" NUMBER in dice expression was not purely numeric.", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
return AstDie{left, right}
}
if p.check(Group) && p.peek().Value == "(" {
p.consume()
// In the case of a group, recurse back to the lowest priority and build a new subtree.
expr := p.Expr()
// Expect a closing paren.
if p.check(Group) && p.peek().Value == ")" {
p.consume()
return expr
} else {
// Error, unmatched Paren.
p.errors = append(p.errors, errors.New("Unmatched parenthesis."))
return nil
}
}
panic("Unreachable!")
}
// Consumes the current token if it matches the given type,
// advancing the cursor and returning it. Otherwise does nothing.
func (p *DiceParser) consume() Token {
if !p.isAtEnd() {
// Advance the cursor and return whatever was before it.
p.current += 1
return p.tokens[p.current-1]
}
// If we are at the end, then there's only one token left to consume.
return p.tokens[p.current]
}
// Returns whether the current token is of the
// given type. Does not consume.
func (p DiceParser) check(t TokenType) bool {
return p.peek().Type == t
}
// Get the current token without advancing nor consuming it.
func (p DiceParser) peek() Token {
return p.tokens[p.current]
}
// Returns whether the `current` field is equal to
// the length of the token buf - 1
func (p DiceParser) isAtEnd() bool {
return p.current == (len(p.tokens) - 1)
}
// An AST Expression is any object which can resolve itself
// to a final sum and a set of rolls (if any)
type AstExpr interface {
// Eval returns a result and a "steps string"
Eval() (int, string)
}
// A constant value's evaulation is just itself.
type AstConst int
func (c AstConst) Eval() (int, string) { | left int
right int
}
func (t AstDie) Eval() (int, string) {
var sb strings.Builder
sb.WriteRune('[')
rand.Seed(time.Now().UnixNano())
rolls := make([]int, t.left)
for i := range rolls {
//out[i] = rand.Intn(max-min+1) + min
rolls[i] = rand.Intn(int(t.right)) + 1
sb.WriteString(strconv.Itoa(rolls[i]))
if i != (len(rolls) - 1) {
sb.WriteString(", ")
}
}
sb.WriteRune(']')
// Sum values
sum := 0
for _, v := range rolls {
sum += v
}
return sum, sb.String()
}
type AstOp struct {
Left AstExpr
Right AstExpr
Op Token
}
func (t AstOp) Eval() (int, string) {
left, lwork := t.Left.Eval()
right, rwork := t.Right.Eval()
var sum int = 0
var sb strings.Builder
sb.WriteString(lwork)
sb.WriteRune(' ')
sb.WriteString(t.Op.Value)
sb.WriteRune(' ')
sb.WriteString(rwork)
// If the lexer did its job, it should only be these discrete values.
switch t.Op.Value {
case "+":
sum = left + right
case "-":
sum = left - right
case "*":
sum = left * right
case "/":
if right == 0 {
return 0, "ERROR: DIVIDE BY ZERO"
} else {
sum = left / right
}
default:
panic("Unreachable! The Lexer failed to validate an Op Token!")
}
return sum, sb.String()
} | return int(c), strconv.Itoa(int(c))
}
// A die's value is rolled, 1-[right] rolled [left] times, then summed.
type AstDie struct { | random_line_split |
roll.go | // A dice parser and roller library.
package main
import (
"errors"
"fmt"
"math/rand"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"github.com/bwmarrin/discordgo"
"github.com/rs/zerolog/log"
)
/******************
COMMAND HANDLER
******************/
func RollHandler(s *discordgo.Session, m *discordgo.MessageCreate, args []string) {
// If they used !roll, remove that from the args list. Otherwise they used ![expr]
if args[0] == "roll" {
args = args[1:]
}
// Convert the input string into a token stream
tokens, err := tokenizeExpr(strings.Join(args, ""))
if err != nil {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Error: %s", err.Error()))
return
}
// Convert the token stream into a a syntax tree
parser := NewDiceParser(tokens)
if err != nil {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Error: %s", err.Error()))
}
// Assemble the AST
expr := parser.Expr()
if len(parser.errors) != 0 |
// Walk and Resolve the AST
result, work := expr.Eval()
// Send a nice stylish message.
embed := &discordgo.MessageEmbed{
Author: &discordgo.MessageEmbedAuthor{},
Color: 0x00ff00, // Green
Description: strings.Join(args, ""),
Fields: []*discordgo.MessageEmbedField{
{
Name: "Rolls",
Value: work,
Inline: false,
},
{
Name: "Result",
Value: strconv.Itoa(result),
Inline: false,
},
},
Timestamp: time.Now().Format(time.RFC3339), // Discord wants ISO8601; RFC3339 is an extension of ISO8601 and should be completely compatible.
Title: m.Author.Username + "#" + m.Author.Discriminator + " Rolled " + strconv.Itoa(result),
}
s.ChannelMessageSendEmbed(m.ChannelID, embed)
}
/******************
LEXER
******************/
func tokenizeExpr(raw string) ([]Token, error) {
var tokens []Token
var sb strings.Builder
for _, char := range raw {
// Consume until a transition token is reached
switch char {
case '\t', '\n', '\v', '\f', '\r', ' ', '\x85', '\xA0':
continue // Ignore whitespace.
case '+', '-', '*', '/', '(', ')':
// The previous token is over.
// Parse it before working on the current one.
if sb.Len() != 0 {
t, err := LexToken(sb.String())
if err != nil {
return nil, err
}
tokens = append(tokens, t)
}
// Now narrow down the token type to one of the three.
var typ TokenType
switch char {
case '(', ')':
typ = Group
case '*', '/':
typ = Factor
case '+', '-':
typ = Term
default:
panic("Unreachable!")
}
// Append the operator token to the queue.
tokens = append(tokens, Token{typ, string(char)})
// Clear the token buffer for the next batch.
sb.Reset()
continue
default:
// This is a non-transition token.
sb.WriteRune(char)
}
}
// Parse any remaining characters in the buffer
// that may have not been terminated by an operator.
if sb.Len() != 0 {
t, err := LexToken(sb.String())
if err != nil {
return nil, err
}
tokens = append(tokens, t)
}
return tokens, nil
}
type Token struct {
Type TokenType
Value string
}
type TokenType int
const (
Const TokenType = 0 // Number
Die TokenType = 1 // NdX i.e. 3d6
Term TokenType = 2 // +-
Factor TokenType = 3 // */
Group TokenType = 4 // ()
)
// Precompiled regular expression for matching on die expressions.
var dieExpr *regexp.Regexp = regexp.MustCompile(`^\d*d\d+$`)
// Parses either a die or value expression from a string.
// Returns an Error if the token is not valid.
func LexToken(token string) (Token, error) {
// Check for a Const Value Expr
if isInt(token) {
return Token{Type: Const, Value: token}, nil
}
// Check for a Die Value Expr.
if dieExpr.MatchString(token) {
if strings.HasPrefix(token, "d") {
// If the left hand of the expression is empty,
// that means it's an implied 1.
token = "1" + token
}
return Token{Type: Die, Value: token}, nil
}
return Token{}, errors.New(fmt.Sprintf("\"%s\" was not recognized as a valid number or dice expression.", token))
}
// Helper function for ParseToken. Checks if a string is only numbers.
func isInt(s string) bool {
for _, c := range s {
if !unicode.IsDigit(c) {
return false
}
}
return true
}
/******************
PARSER & AST
******************/
// A parser that converts a dice expression token stream to
// an AST and evaluates it according to the following grammar:
/*
Expr => Term
Term => Factor ([ '+' | '-' ]) Factor)*
Factor => Primary ([ '*' | '/' ] Primary)*
Primary => '(' Expr ')' | DIE | NUMBER
*/
type DiceParser struct {
tokens []Token
current int
errors []error
}
func NewDiceParser(tokens []Token) DiceParser {
return DiceParser{tokens, 0, make([]error, 0)}
}
// Satisfies the rule `Expr => Term`.
func (p *DiceParser) Expr() AstExpr {
return p.Term()
}
// Satisfies the rule for `Term => Factor ([ '+' | '-' ]) Factor)*`
func (p *DiceParser) Term() AstExpr {
var expr AstExpr = p.Factor() // Left
for p.check(Term) {
t := p.consume()
operator := t // A Token
right := p.Factor() // An AstExpr
expr = AstOp{expr, right, operator}
}
return expr
}
// Satisfies the rule for `Factor => Primary ([ '*' | '/' ] Primary)*`
func (p *DiceParser) Factor() AstExpr {
expr := p.Primary()
for p.check(Factor) {
t := p.consume()
operator := t // A Token
right := p.Primary() // An AstExpr
expr = AstOp{expr, right, operator}
}
return expr
}
// Satisfies the rule for `Primary => '(' Expr ')' | DIE | NUMBER`
func (p *DiceParser) Primary() AstExpr {
//log.Error().Str("Val", fmt.Sprintf("%v", p.peek())).Bool("Eq?", p.peek().Type == Const).Msg("Fuck")
// If the current token is a Constant value..
if p.check(Const) {
t := p.consume()
// This should never fail because the tokenizer verifies that
// this kind of token is purely numeric.
value, err := strconv.Atoi(t.Value)
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("Found a NUMBER token that was not purely numeric: '%s'", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
return AstConst(value)
}
if p.check(Die) {
t := p.consume()
splitDie := strings.Split(t.Value, "d")
// A valid die expression is one with 2 parts, and the second part must be present and numeric.
if (len(splitDie) != 2) || (!isInt(splitDie[1])) {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" was not recognized as a valid number or dice expression.", t.Value)))
return nil
}
// An empty first string indicates that the die is of the format `dXX`
// in which case there is an implied preceding 1.
if splitDie[0] == "" {
splitDie[0] = "1"
}
// This should never fail because the tokenizer verifies that
// this kind of token is purely numeric.
left, err := strconv.Atoi(splitDie[0])
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" NUMBER in dice expression was not purely numeric.", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
right, err := strconv.Atoi(splitDie[1])
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" NUMBER in dice expression was not purely numeric.", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
return AstDie{left, right}
}
if p.check(Group) && p.peek().Value == "(" {
p.consume()
// In the case of a group, recurse back to the lowest priority and build a new subtree.
expr := p.Expr()
// Expect a closing paren.
if p.check(Group) && p.peek().Value == ")" {
p.consume()
return expr
} else {
// Error, unmatched Paren.
p.errors = append(p.errors, errors.New("Unmatched parenthesis."))
return nil
}
}
panic("Unreachable!")
}
// Consumes the current token if it matches the given type,
// advancing the cursor and returning it. Otherwise does nothing.
func (p *DiceParser) consume() Token {
if !p.isAtEnd() {
// Advance the cursor and return whatever was before it.
p.current += 1
return p.tokens[p.current-1]
}
// If we are at the end, then there's only one token left to consume.
return p.tokens[p.current]
}
// Returns whether the current token is of the
// given type. Does not consume.
func (p DiceParser) check(t TokenType) bool {
return p.peek().Type == t
}
// Get the current token without advancing nor consuming it.
func (p DiceParser) peek() Token {
return p.tokens[p.current]
}
// Returns whether the `current` field is equal to
// the length of the token buf - 1
func (p DiceParser) isAtEnd() bool {
return p.current == (len(p.tokens) - 1)
}
// An AST Expression is any object which can resolve itself
// to a final sum and a set of rolls (if any)
type AstExpr interface {
// Eval returns a result and a "steps string"
Eval() (int, string)
}
// A constant value's evaulation is just itself.
type AstConst int
func (c AstConst) Eval() (int, string) {
return int(c), strconv.Itoa(int(c))
}
// A die's value is rolled, 1-[right] rolled [left] times, then summed.
type AstDie struct {
left int
right int
}
func (t AstDie) Eval() (int, string) {
var sb strings.Builder
sb.WriteRune('[')
rand.Seed(time.Now().UnixNano())
rolls := make([]int, t.left)
for i := range rolls {
//out[i] = rand.Intn(max-min+1) + min
rolls[i] = rand.Intn(int(t.right)) + 1
sb.WriteString(strconv.Itoa(rolls[i]))
if i != (len(rolls) - 1) {
sb.WriteString(", ")
}
}
sb.WriteRune(']')
// Sum values
sum := 0
for _, v := range rolls {
sum += v
}
return sum, sb.String()
}
type AstOp struct {
Left AstExpr
Right AstExpr
Op Token
}
func (t AstOp) Eval() (int, string) {
left, lwork := t.Left.Eval()
right, rwork := t.Right.Eval()
var sum int = 0
var sb strings.Builder
sb.WriteString(lwork)
sb.WriteRune(' ')
sb.WriteString(t.Op.Value)
sb.WriteRune(' ')
sb.WriteString(rwork)
// If the lexer did its job, it should only be these discrete values.
switch t.Op.Value {
case "+":
sum = left + right
case "-":
sum = left - right
case "*":
sum = left * right
case "/":
if right == 0 {
return 0, "ERROR: DIVIDE BY ZERO"
} else {
sum = left / right
}
default:
panic("Unreachable! The Lexer failed to validate an Op Token!")
}
return sum, sb.String()
}
| {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Errs: %v\n", parser.errors))
} | conditional_block |
roll.go | // A dice parser and roller library.
package main
import (
"errors"
"fmt"
"math/rand"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"github.com/bwmarrin/discordgo"
"github.com/rs/zerolog/log"
)
/******************
COMMAND HANDLER
******************/
func | (s *discordgo.Session, m *discordgo.MessageCreate, args []string) {
// If they used !roll, remove that from the args list. Otherwise they used ![expr]
if args[0] == "roll" {
args = args[1:]
}
// Convert the input string into a token stream
tokens, err := tokenizeExpr(strings.Join(args, ""))
if err != nil {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Error: %s", err.Error()))
return
}
// Convert the token stream into a a syntax tree
parser := NewDiceParser(tokens)
if err != nil {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Error: %s", err.Error()))
}
// Assemble the AST
expr := parser.Expr()
if len(parser.errors) != 0 {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Errs: %v\n", parser.errors))
}
// Walk and Resolve the AST
result, work := expr.Eval()
// Send a nice stylish message.
embed := &discordgo.MessageEmbed{
Author: &discordgo.MessageEmbedAuthor{},
Color: 0x00ff00, // Green
Description: strings.Join(args, ""),
Fields: []*discordgo.MessageEmbedField{
{
Name: "Rolls",
Value: work,
Inline: false,
},
{
Name: "Result",
Value: strconv.Itoa(result),
Inline: false,
},
},
Timestamp: time.Now().Format(time.RFC3339), // Discord wants ISO8601; RFC3339 is an extension of ISO8601 and should be completely compatible.
Title: m.Author.Username + "#" + m.Author.Discriminator + " Rolled " + strconv.Itoa(result),
}
s.ChannelMessageSendEmbed(m.ChannelID, embed)
}
/******************
LEXER
******************/
func tokenizeExpr(raw string) ([]Token, error) {
var tokens []Token
var sb strings.Builder
for _, char := range raw {
// Consume until a transition token is reached
switch char {
case '\t', '\n', '\v', '\f', '\r', ' ', '\x85', '\xA0':
continue // Ignore whitespace.
case '+', '-', '*', '/', '(', ')':
// The previous token is over.
// Parse it before working on the current one.
if sb.Len() != 0 {
t, err := LexToken(sb.String())
if err != nil {
return nil, err
}
tokens = append(tokens, t)
}
// Now narrow down the token type to one of the three.
var typ TokenType
switch char {
case '(', ')':
typ = Group
case '*', '/':
typ = Factor
case '+', '-':
typ = Term
default:
panic("Unreachable!")
}
// Append the operator token to the queue.
tokens = append(tokens, Token{typ, string(char)})
// Clear the token buffer for the next batch.
sb.Reset()
continue
default:
// This is a non-transition token.
sb.WriteRune(char)
}
}
// Parse any remaining characters in the buffer
// that may have not been terminated by an operator.
if sb.Len() != 0 {
t, err := LexToken(sb.String())
if err != nil {
return nil, err
}
tokens = append(tokens, t)
}
return tokens, nil
}
type Token struct {
Type TokenType
Value string
}
type TokenType int
const (
Const TokenType = 0 // Number
Die TokenType = 1 // NdX i.e. 3d6
Term TokenType = 2 // +-
Factor TokenType = 3 // */
Group TokenType = 4 // ()
)
// Precompiled regular expression for matching on die expressions.
var dieExpr *regexp.Regexp = regexp.MustCompile(`^\d*d\d+$`)
// Parses either a die or value expression from a string.
// Returns an Error if the token is not valid.
func LexToken(token string) (Token, error) {
// Check for a Const Value Expr
if isInt(token) {
return Token{Type: Const, Value: token}, nil
}
// Check for a Die Value Expr.
if dieExpr.MatchString(token) {
if strings.HasPrefix(token, "d") {
// If the left hand of the expression is empty,
// that means it's an implied 1.
token = "1" + token
}
return Token{Type: Die, Value: token}, nil
}
return Token{}, errors.New(fmt.Sprintf("\"%s\" was not recognized as a valid number or dice expression.", token))
}
// Helper function for ParseToken. Checks if a string is only numbers.
func isInt(s string) bool {
for _, c := range s {
if !unicode.IsDigit(c) {
return false
}
}
return true
}
/******************
PARSER & AST
******************/
// A parser that converts a dice expression token stream to
// an AST and evaluates it according to the following grammar:
/*
Expr => Term
Term => Factor ([ '+' | '-' ]) Factor)*
Factor => Primary ([ '*' | '/' ] Primary)*
Primary => '(' Expr ')' | DIE | NUMBER
*/
type DiceParser struct {
tokens []Token
current int
errors []error
}
func NewDiceParser(tokens []Token) DiceParser {
return DiceParser{tokens, 0, make([]error, 0)}
}
// Satisfies the rule `Expr => Term`.
func (p *DiceParser) Expr() AstExpr {
return p.Term()
}
// Satisfies the rule for `Term => Factor ([ '+' | '-' ]) Factor)*`
func (p *DiceParser) Term() AstExpr {
var expr AstExpr = p.Factor() // Left
for p.check(Term) {
t := p.consume()
operator := t // A Token
right := p.Factor() // An AstExpr
expr = AstOp{expr, right, operator}
}
return expr
}
// Satisfies the rule for `Factor => Primary ([ '*' | '/' ] Primary)*`
func (p *DiceParser) Factor() AstExpr {
expr := p.Primary()
for p.check(Factor) {
t := p.consume()
operator := t // A Token
right := p.Primary() // An AstExpr
expr = AstOp{expr, right, operator}
}
return expr
}
// Satisfies the rule for `Primary => '(' Expr ')' | DIE | NUMBER`
func (p *DiceParser) Primary() AstExpr {
//log.Error().Str("Val", fmt.Sprintf("%v", p.peek())).Bool("Eq?", p.peek().Type == Const).Msg("Fuck")
// If the current token is a Constant value..
if p.check(Const) {
t := p.consume()
// This should never fail because the tokenizer verifies that
// this kind of token is purely numeric.
value, err := strconv.Atoi(t.Value)
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("Found a NUMBER token that was not purely numeric: '%s'", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
return AstConst(value)
}
if p.check(Die) {
t := p.consume()
splitDie := strings.Split(t.Value, "d")
// A valid die expression is one with 2 parts, and the second part must be present and numeric.
if (len(splitDie) != 2) || (!isInt(splitDie[1])) {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" was not recognized as a valid number or dice expression.", t.Value)))
return nil
}
// An empty first string indicates that the die is of the format `dXX`
// in which case there is an implied preceding 1.
if splitDie[0] == "" {
splitDie[0] = "1"
}
// This should never fail because the tokenizer verifies that
// this kind of token is purely numeric.
left, err := strconv.Atoi(splitDie[0])
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" NUMBER in dice expression was not purely numeric.", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
right, err := strconv.Atoi(splitDie[1])
if err != nil {
p.errors = append(p.errors, errors.New(fmt.Sprintf("\"%s\" NUMBER in dice expression was not purely numeric.", t.Value)))
log.Error().Str("Value", t.Value).Str("Error", err.Error()).Msg("NUMBER token was not purely numeric! This should never happen!")
}
return AstDie{left, right}
}
if p.check(Group) && p.peek().Value == "(" {
p.consume()
// In the case of a group, recurse back to the lowest priority and build a new subtree.
expr := p.Expr()
// Expect a closing paren.
if p.check(Group) && p.peek().Value == ")" {
p.consume()
return expr
} else {
// Error, unmatched Paren.
p.errors = append(p.errors, errors.New("Unmatched parenthesis."))
return nil
}
}
panic("Unreachable!")
}
// Consumes the current token if it matches the given type,
// advancing the cursor and returning it. Otherwise does nothing.
func (p *DiceParser) consume() Token {
if !p.isAtEnd() {
// Advance the cursor and return whatever was before it.
p.current += 1
return p.tokens[p.current-1]
}
// If we are at the end, then there's only one token left to consume.
return p.tokens[p.current]
}
// Returns whether the current token is of the
// given type. Does not consume.
func (p DiceParser) check(t TokenType) bool {
return p.peek().Type == t
}
// Get the current token without advancing nor consuming it.
func (p DiceParser) peek() Token {
return p.tokens[p.current]
}
// Returns whether the `current` field is equal to
// the length of the token buf - 1
func (p DiceParser) isAtEnd() bool {
return p.current == (len(p.tokens) - 1)
}
// An AST Expression is any object which can resolve itself
// to a final sum and a set of rolls (if any)
type AstExpr interface {
// Eval returns a result and a "steps string"
Eval() (int, string)
}
// A constant value's evaulation is just itself.
type AstConst int
func (c AstConst) Eval() (int, string) {
return int(c), strconv.Itoa(int(c))
}
// A die's value is rolled, 1-[right] rolled [left] times, then summed.
type AstDie struct {
left int
right int
}
func (t AstDie) Eval() (int, string) {
var sb strings.Builder
sb.WriteRune('[')
rand.Seed(time.Now().UnixNano())
rolls := make([]int, t.left)
for i := range rolls {
//out[i] = rand.Intn(max-min+1) + min
rolls[i] = rand.Intn(int(t.right)) + 1
sb.WriteString(strconv.Itoa(rolls[i]))
if i != (len(rolls) - 1) {
sb.WriteString(", ")
}
}
sb.WriteRune(']')
// Sum values
sum := 0
for _, v := range rolls {
sum += v
}
return sum, sb.String()
}
type AstOp struct {
Left AstExpr
Right AstExpr
Op Token
}
func (t AstOp) Eval() (int, string) {
left, lwork := t.Left.Eval()
right, rwork := t.Right.Eval()
var sum int = 0
var sb strings.Builder
sb.WriteString(lwork)
sb.WriteRune(' ')
sb.WriteString(t.Op.Value)
sb.WriteRune(' ')
sb.WriteString(rwork)
// If the lexer did its job, it should only be these discrete values.
switch t.Op.Value {
case "+":
sum = left + right
case "-":
sum = left - right
case "*":
sum = left * right
case "/":
if right == 0 {
return 0, "ERROR: DIVIDE BY ZERO"
} else {
sum = left / right
}
default:
panic("Unreachable! The Lexer failed to validate an Op Token!")
}
return sum, sb.String()
}
| RollHandler | identifier_name |
OlMapView.js | OlMapView = function(){
this.map = null;
this.fromProjection = new OpenLayers.Projection("EPSG:4326");
this.toProjection = new OpenLayers.Projection("EPSG:900913");
this.baseLayer = null;
this.dotLayer = null;
this.contentlensManager = null;
//example
// this.strategy = null;
// this.clusters = null;
this.features = [];
this.tweetsHeatmapManager = null;
// histogramManager = null;
//polygon selection:
this.polygon_layer = null;
this.cachedCenter = [];
this.cachedZoom = null;
};
OlMapView.prototype.init = function(div) {
/* init map: */
var copyThis = this;
this.map = new OpenLayers.Map(div.id, {
projection: new OpenLayers.Projection("EPSG:900913"),
displayProjection: new OpenLayers.Projection("EPSG:4326")
});
//disable double clicking -> zooming feature;
var nav = new OpenLayers.Control.Navigation({
defaultDblClick: function(event) { return; }
});
this.map.addControl(nav);
//this.baseLayer = new OpenLayers.Layer.OSM("OSM base layer");
//grey-scale map;
this.baseLayer = new OpenLayers.Layer.OSM('Simple OSM Map', null, {
eventListeners: {
tileloaded: function(evt) {
var ctx = evt.tile.getCanvasContext();
if (ctx) {
var imgd = ctx.getImageData(0, 0, evt.tile.size.w, evt.tile.size.h);
var pix = imgd.data;
for (var i = 0, n = pix.length; i < n; i += 4) |
ctx.putImageData(imgd, 0, 0);
evt.tile.imgDiv.removeAttribute("crossorigin");
evt.tile.imgDiv.src = ctx.canvas.toDataURL();
}
}
}
});
this.map.addLayer(this.baseLayer);
// var style = new OpenLayers.Style({
// pointRadius: "${radius}",
// fillColor: "#ffcc66",
// fillOpacity: 0.8,
// strokeColor: "#cc6633",
// strokeWidth: "${width}",
// strokeOpacity: 0.8
// }, {
// context: {
// width: function(feature) {
// return (feature.cluster) ? 2 : 1;
// },
// radius: function(feature) {
// var pix = 2;
// if(feature.cluster) {
// pix = Math.min(feature.attributes.count, 7) + 2;
// }
// return pix;
// }
// }
// });
//start example
// this.strategy = new OpenLayers.Strategy.Cluster();
// this.strategy.distance = 100;
// this.strategy.threshold = 3;
// this.clusters = new OpenLayers.Layer.Vector("Clusters", {
// strategies: [this.strategy],
// styleMap: new OpenLayers.StyleMap({
// "default": style,
// "select": {
// fillColor: "#8aeeef",
// strokeColor: "#32a8a9"
// }
// })
// });
// var select = new OpenLayers.Control.SelectFeature(
// this.clusters, {hover: true}
// );
// this.map.addControl(select);
// select.activate();
// this.clusters.events.on({"featureselected": this.display});
//this.map.addLayers([this.baseLayer, this.clusters]);
var x = ( profile.min_x + profile.max_x ) * 0.5;
var y = ( profile.min_y + profile.max_y ) * 0.5;
// var x = profile.center[0];
// var y = profile.center[1];
var zoom = profile.zoom;
this.map.setCenter(
new OpenLayers.LonLat(x, y).transform(
new OpenLayers.Projection("EPSG:4326"),
copyThis.map.getProjectionObject()
), zoom
);
//this.tweetsHeatmapManager = new TweetsHeatmapManager();
//this.map.addLayer(this.tweetsHeatmapManager.getLayer());
/*
register events;
*/
this.map.addControl(new OpenLayers.Control.LayerSwitcher());
this.map.events.register("moveend", copyThis.map, function(e) {
$('[ng-controller="map_controller"]').scope().updateGeoBbox();
console.log("zoom level: "+copyThis.map.getZoom())
});
this.map.events.register("click", copyThis.map, function(e) {
if(!enableContentlens)
return;
copyThis.contentlensManager.addMultiContentlens();
console.log("zoom level: "+copyThis.map.getZoom())
});
this.map.events.register("mousemove", copyThis.map, function(e) {
if(!enableContentlens)
return;
var pixel = this.events.getMousePosition(e);
copyThis.contentlensManager.renderbyPixelCoordinates(pixel.x, pixel.y);
});
// this.map.events.register("rightclick", copyThis.map, function(e) {
// if(!enableContentlens)
// return;
// tweetsContentlensManager.deleteMulContentlens(e.xy.x, e.xy.y);
// });
// this.map.events.register("zoomend", copyThis.map, function(e) {
// $('[ng-controller="map_controller"]').scope().refresh_map(true);
// });
// this.map.events.register("mousemove", copyThis.map, function(e) {
// var pixel = this.events.getMousePosition(e);
// var lonlat = copyThis.map.getLonLatFromPixel( pixel );
// var lonlatTrans = lonlat.transform(copyThis.map.getProjectionObject(), copyThis.fromProjection);
// //for testing, only consider one lense;
// var lense_db = Canvas_manager.instance().topic_lense_manager.lense_db;
// if(lense_db.length > 0){
// var rst = lense_db[0].topic_lense_data.spatial_filter(lonlat.lon, lonlat.lat, 0.0001);
// console.log("spatial filtering: "+(rst.length>0?rst[0]:rst.length));
// }
// });
};
OlMapView.prototype.addDotLayer = function(){
this.dotLayer = new OpenLayers.Layer.Vector('TweetDotLayer',
{
styleMap: new OpenLayers.StyleMap({
pointRadius: "${radius}",
fillColor: "${color}",
fillOpacity: "${opacity}",
strokeOpacity: 0.5,
strokeWidth: 1,
strokeColor: '#777777'
})//,
//renderers: renderer
});
this.map.addLayer(this.dotLayer);
return this.dotLayer;
};
OlMapView.prototype.addContentlensLayer = function(){
var that = this;
this.contentlensManager = new TweetsContentlensManager(this.map);
this.map.addLayer(this.contentlensManager.getLayer());
$(that.contentlensManager.getLayer().div).css("pointer-events", "none");
}
OlMapView.prototype.toggleGlyphMode = function() {
Canvas_manager.instance().set_visibility(true);
this.dotLayer.setVisibility(false);
this.tweetsHeatmapManager.getLayer().setVisibility(false);
}
OlMapView.prototype.toggleHeatMapMode = function() {
this.tweetsHeatmapManager.getLayer().setVisibility(true);
this.dotLayer.setVisibility(false);
Canvas_manager.instance().set_visibility(false);
}
OlMapView.prototype.toggleDotMode = function() {
this.dotLayer.setVisibility(true);
this.tweetsHeatmapManager.getLayer().setVisibility(false);
Canvas_manager.instance().set_visibility(false);
}
OlMapView.prototype.toggleAllModes = function() {
this.dotLayer.setVisibility(true);
this.tweetsHeatmapManager.getLayer().setVisibility(true);
Canvas_manager.instance().set_visibility(true);
}
OlMapView.prototype.getMap = function(){
return this.map;
};
OlMapView.prototype.clear_dots = function(){
this.dotLayer.removeAllFeatures();
}
OlMapView.prototype.getFilteredArray = function(px, py){
var that = this;
var filterArray = [];
this.dotLayer.features.forEach(function(val){
var pixel = that.map.getPixelFromLonLat(new OpenLayers.LonLat(val.geometry.x, val.geometry.y));
pixel = [pixel.x, pixel.y];
if( (pixel[0]-px)*(pixel[0]-px) + (pixel[1]-py)*(pixel[1]-py) <= 30*30 ){
filterArray.push(val.data.keywords.join(" "));
val.attributes.color = "red";
val.attributes.opacity = 0.8;
}else{
val.attributes.color = "blue";
val.attributes.opacity = 0.5;
}
});
that.dotLayer.redraw();
console.log("contentlens: " + filterArray.length);
return filterArray;
}
OlMapView.prototype.render_dots = function(tweets, color, opac){
//this.dotLayer.removeAllFeatures();
var geo_arr = tweets.map(function(t){ return [t.lon, t.lat, t.keywords, t.tweet_id]; });
//this.dotLayer.removeAllFeatures();
var features_array = [];
for(var i = 0; i < geo_arr.length; i++) {
var point = new OpenLayers.Geometry.Point(geo_arr[i][0], geo_arr[i][1]).transform(this.fromProjection, this.toProjection);
// var pixelPoint = this.map.getPixelFromLonLat(new OpenLayers.LonLat(point.x, point.y));
var feature = new OpenLayers.Feature.Vector(point, {keywords:geo_arr[i][2], id:geo_arr[i][3]});
if(color == "blue")
feature.attributes = {color: color, opacity:opac, radius:2};
else
feature.attributes = {color: color, opacity:opac, radius:2};
features_array.push(feature);
}
//draw bounding box;
// if(Canvas_manager.instance().get_lense_manager().lense_db.length > 0){
// var geo_bbox = Canvas_manager.instance().get_lense_manager().lense_db[0].topic_lense_data.get_geo_bbox();
// var min_lng = geo_bbox.get_center().x - geo_bbox.get_extent().x;
// var max_lng = geo_bbox.get_center().x + geo_bbox.get_extent().x;
// var min_lat = geo_bbox.get_center().y - geo_bbox.get_extent().y;
// var max_lat = geo_bbox.get_center().y + geo_bbox.get_extent().y;
// pts = [[min_lng, min_lat],[min_lng, max_lat],[max_lng, min_lat],[max_lng, max_lat]];
// for(var i = 0; i < pts.length; i++) {
// var point = new OpenLayers.Geometry.Point(pts[i][0], pts[i][1]).transform(this.fromProjection, this.toProjection);
// var feature = new OpenLayers.Feature.Vector(point);
// feature.attributes = {color: "blue", opacity:1, radius:20};
// features_array.push(feature);
// }
// }
this.dotLayer.addFeatures(features_array);
};
//left, right, top, bottom;
OlMapView.prototype.getGeoBound = function(){
return this.map.getExtent().transform(this.toProjection,this.fromProjection);
};
OlMapView.prototype.getProjection = function(){
return {from:this.fromProjection,to:this.toProjection};
};
//calculate pixel coordinates for drawing dots, reduce overlapping;
OlMapView.prototype.render_heatmap = function(){
return;
//var bound = this.getGeoBound();
//var geo_arr = TweetsDataManager.instance().filter_by_geo_bound(bound.bottom, bound.top, bound.left, bound.right).select("tweet_id", "lat", "lng");
var geo_arr = Canvas_manager.instance().get_lense_manager().get_geo_points();
console.log("# of points for heat map: " + geo_arr.length);
this.features = [];
var geo_points = [];
geo_arr.forEach(function(entry){
geo_points.push({lat:entry[1], lng:entry[2], id:entry[0]});
});
//var gPoint = new OpenLayers.Geometry.Point(this.geoBuffer[id].lng, this.geoBuffer[id].lat).transform(this.getProjection().from, this.getProjection().to);
this.tweetsHeatmapManager.refreshMap(geo_points);
};
OlMapView.prototype.cacheLocation = function() {
this.cachedCenter = new OpenLayers.LonLat(this.map.center.lon, this.map.center.lat).transform(this.toProjection, this.fromProjection);
this.cachedZoom = this.map.zoom;
};
OlMapView.prototype.moveToCacheLocation = function(){
var lon = this.cachedCenter.lon;
var lat = this.cachedCenter.lat;
var zoom = this.cachedZoom;
this.map.panTo(new OpenLayers.LonLat(lon, lat).transform(this.fromProjection, this.toProjection));
this.map.zoomTo(zoom);
};
OlMapView.prototype.moveTo = function(lon, lat, zoom){
this.map.panTo(new OpenLayers.LonLat(lon, lat).transform(this.fromProjection, this.toProjection));
this.map.zoomTo(zoom);
};
// OlMapView.prototype.reset = function() {
// // console.log(distance);
// this.strategy.distance = 50;
// // this.strategy.threshold = 1;
// this.clusters.removeFeatures(this.clusters.features);
// this.clusters.addFeatures(this.features);
// };
// OlMapView.prototype.display = function(event) {
// var f = event.feature;
// var el = document.getElementById("output");
// if(f.cluster) {
// el.innerHTML = "cluster of " + f.attributes.count;
// } else {
// el.innerHTML = "unclustered " + f.geometry;
// }
// };
OlMapView.prototype.addLayer = function(layer) {
this.map.addLayer(layer);
};
OlMapView.prototype.getMapExtent = function() {
return this.map.getExtent();
};
OlMapView.zoomLevel = profile.zoom;
OlMapView.INTERACTION = {ZOOM_IN:0, ZOOM_OUT:1, PAN:2};
| {
var tmp = (3 * pix[i] + 4 * pix[i + 1] + pix[i + 2]) / 8;
pix[i] = pix[i + 1] = pix[i + 2] = Math.sqrt( tmp / 256.0 ) * 256 * 1.05;
} | conditional_block |
OlMapView.js | OlMapView = function(){
| this.dotLayer = null;
this.contentlensManager = null;
//example
// this.strategy = null;
// this.clusters = null;
this.features = [];
this.tweetsHeatmapManager = null;
// histogramManager = null;
//polygon selection:
this.polygon_layer = null;
this.cachedCenter = [];
this.cachedZoom = null;
};
OlMapView.prototype.init = function(div) {
/* init map: */
var copyThis = this;
this.map = new OpenLayers.Map(div.id, {
projection: new OpenLayers.Projection("EPSG:900913"),
displayProjection: new OpenLayers.Projection("EPSG:4326")
});
//disable double clicking -> zooming feature;
var nav = new OpenLayers.Control.Navigation({
defaultDblClick: function(event) { return; }
});
this.map.addControl(nav);
//this.baseLayer = new OpenLayers.Layer.OSM("OSM base layer");
//grey-scale map;
this.baseLayer = new OpenLayers.Layer.OSM('Simple OSM Map', null, {
eventListeners: {
tileloaded: function(evt) {
var ctx = evt.tile.getCanvasContext();
if (ctx) {
var imgd = ctx.getImageData(0, 0, evt.tile.size.w, evt.tile.size.h);
var pix = imgd.data;
for (var i = 0, n = pix.length; i < n; i += 4) {
var tmp = (3 * pix[i] + 4 * pix[i + 1] + pix[i + 2]) / 8;
pix[i] = pix[i + 1] = pix[i + 2] = Math.sqrt( tmp / 256.0 ) * 256 * 1.05;
}
ctx.putImageData(imgd, 0, 0);
evt.tile.imgDiv.removeAttribute("crossorigin");
evt.tile.imgDiv.src = ctx.canvas.toDataURL();
}
}
}
});
this.map.addLayer(this.baseLayer);
// var style = new OpenLayers.Style({
// pointRadius: "${radius}",
// fillColor: "#ffcc66",
// fillOpacity: 0.8,
// strokeColor: "#cc6633",
// strokeWidth: "${width}",
// strokeOpacity: 0.8
// }, {
// context: {
// width: function(feature) {
// return (feature.cluster) ? 2 : 1;
// },
// radius: function(feature) {
// var pix = 2;
// if(feature.cluster) {
// pix = Math.min(feature.attributes.count, 7) + 2;
// }
// return pix;
// }
// }
// });
//start example
// this.strategy = new OpenLayers.Strategy.Cluster();
// this.strategy.distance = 100;
// this.strategy.threshold = 3;
// this.clusters = new OpenLayers.Layer.Vector("Clusters", {
// strategies: [this.strategy],
// styleMap: new OpenLayers.StyleMap({
// "default": style,
// "select": {
// fillColor: "#8aeeef",
// strokeColor: "#32a8a9"
// }
// })
// });
// var select = new OpenLayers.Control.SelectFeature(
// this.clusters, {hover: true}
// );
// this.map.addControl(select);
// select.activate();
// this.clusters.events.on({"featureselected": this.display});
//this.map.addLayers([this.baseLayer, this.clusters]);
var x = ( profile.min_x + profile.max_x ) * 0.5;
var y = ( profile.min_y + profile.max_y ) * 0.5;
// var x = profile.center[0];
// var y = profile.center[1];
var zoom = profile.zoom;
this.map.setCenter(
new OpenLayers.LonLat(x, y).transform(
new OpenLayers.Projection("EPSG:4326"),
copyThis.map.getProjectionObject()
), zoom
);
//this.tweetsHeatmapManager = new TweetsHeatmapManager();
//this.map.addLayer(this.tweetsHeatmapManager.getLayer());
/*
register events;
*/
this.map.addControl(new OpenLayers.Control.LayerSwitcher());
this.map.events.register("moveend", copyThis.map, function(e) {
$('[ng-controller="map_controller"]').scope().updateGeoBbox();
console.log("zoom level: "+copyThis.map.getZoom())
});
this.map.events.register("click", copyThis.map, function(e) {
if(!enableContentlens)
return;
copyThis.contentlensManager.addMultiContentlens();
console.log("zoom level: "+copyThis.map.getZoom())
});
this.map.events.register("mousemove", copyThis.map, function(e) {
if(!enableContentlens)
return;
var pixel = this.events.getMousePosition(e);
copyThis.contentlensManager.renderbyPixelCoordinates(pixel.x, pixel.y);
});
// this.map.events.register("rightclick", copyThis.map, function(e) {
// if(!enableContentlens)
// return;
// tweetsContentlensManager.deleteMulContentlens(e.xy.x, e.xy.y);
// });
// this.map.events.register("zoomend", copyThis.map, function(e) {
// $('[ng-controller="map_controller"]').scope().refresh_map(true);
// });
// this.map.events.register("mousemove", copyThis.map, function(e) {
// var pixel = this.events.getMousePosition(e);
// var lonlat = copyThis.map.getLonLatFromPixel( pixel );
// var lonlatTrans = lonlat.transform(copyThis.map.getProjectionObject(), copyThis.fromProjection);
// //for testing, only consider one lense;
// var lense_db = Canvas_manager.instance().topic_lense_manager.lense_db;
// if(lense_db.length > 0){
// var rst = lense_db[0].topic_lense_data.spatial_filter(lonlat.lon, lonlat.lat, 0.0001);
// console.log("spatial filtering: "+(rst.length>0?rst[0]:rst.length));
// }
// });
};
OlMapView.prototype.addDotLayer = function(){
this.dotLayer = new OpenLayers.Layer.Vector('TweetDotLayer',
{
styleMap: new OpenLayers.StyleMap({
pointRadius: "${radius}",
fillColor: "${color}",
fillOpacity: "${opacity}",
strokeOpacity: 0.5,
strokeWidth: 1,
strokeColor: '#777777'
})//,
//renderers: renderer
});
this.map.addLayer(this.dotLayer);
return this.dotLayer;
};
OlMapView.prototype.addContentlensLayer = function(){
var that = this;
this.contentlensManager = new TweetsContentlensManager(this.map);
this.map.addLayer(this.contentlensManager.getLayer());
$(that.contentlensManager.getLayer().div).css("pointer-events", "none");
}
OlMapView.prototype.toggleGlyphMode = function() {
Canvas_manager.instance().set_visibility(true);
this.dotLayer.setVisibility(false);
this.tweetsHeatmapManager.getLayer().setVisibility(false);
}
OlMapView.prototype.toggleHeatMapMode = function() {
this.tweetsHeatmapManager.getLayer().setVisibility(true);
this.dotLayer.setVisibility(false);
Canvas_manager.instance().set_visibility(false);
}
OlMapView.prototype.toggleDotMode = function() {
this.dotLayer.setVisibility(true);
this.tweetsHeatmapManager.getLayer().setVisibility(false);
Canvas_manager.instance().set_visibility(false);
}
OlMapView.prototype.toggleAllModes = function() {
this.dotLayer.setVisibility(true);
this.tweetsHeatmapManager.getLayer().setVisibility(true);
Canvas_manager.instance().set_visibility(true);
}
OlMapView.prototype.getMap = function(){
return this.map;
};
OlMapView.prototype.clear_dots = function(){
this.dotLayer.removeAllFeatures();
}
OlMapView.prototype.getFilteredArray = function(px, py){
var that = this;
var filterArray = [];
this.dotLayer.features.forEach(function(val){
var pixel = that.map.getPixelFromLonLat(new OpenLayers.LonLat(val.geometry.x, val.geometry.y));
pixel = [pixel.x, pixel.y];
if( (pixel[0]-px)*(pixel[0]-px) + (pixel[1]-py)*(pixel[1]-py) <= 30*30 ){
filterArray.push(val.data.keywords.join(" "));
val.attributes.color = "red";
val.attributes.opacity = 0.8;
}else{
val.attributes.color = "blue";
val.attributes.opacity = 0.5;
}
});
that.dotLayer.redraw();
console.log("contentlens: " + filterArray.length);
return filterArray;
}
OlMapView.prototype.render_dots = function(tweets, color, opac){
//this.dotLayer.removeAllFeatures();
var geo_arr = tweets.map(function(t){ return [t.lon, t.lat, t.keywords, t.tweet_id]; });
//this.dotLayer.removeAllFeatures();
var features_array = [];
for(var i = 0; i < geo_arr.length; i++) {
var point = new OpenLayers.Geometry.Point(geo_arr[i][0], geo_arr[i][1]).transform(this.fromProjection, this.toProjection);
// var pixelPoint = this.map.getPixelFromLonLat(new OpenLayers.LonLat(point.x, point.y));
var feature = new OpenLayers.Feature.Vector(point, {keywords:geo_arr[i][2], id:geo_arr[i][3]});
if(color == "blue")
feature.attributes = {color: color, opacity:opac, radius:2};
else
feature.attributes = {color: color, opacity:opac, radius:2};
features_array.push(feature);
}
//draw bounding box;
// if(Canvas_manager.instance().get_lense_manager().lense_db.length > 0){
// var geo_bbox = Canvas_manager.instance().get_lense_manager().lense_db[0].topic_lense_data.get_geo_bbox();
// var min_lng = geo_bbox.get_center().x - geo_bbox.get_extent().x;
// var max_lng = geo_bbox.get_center().x + geo_bbox.get_extent().x;
// var min_lat = geo_bbox.get_center().y - geo_bbox.get_extent().y;
// var max_lat = geo_bbox.get_center().y + geo_bbox.get_extent().y;
// pts = [[min_lng, min_lat],[min_lng, max_lat],[max_lng, min_lat],[max_lng, max_lat]];
// for(var i = 0; i < pts.length; i++) {
// var point = new OpenLayers.Geometry.Point(pts[i][0], pts[i][1]).transform(this.fromProjection, this.toProjection);
// var feature = new OpenLayers.Feature.Vector(point);
// feature.attributes = {color: "blue", opacity:1, radius:20};
// features_array.push(feature);
// }
// }
this.dotLayer.addFeatures(features_array);
};
//left, right, top, bottom;
OlMapView.prototype.getGeoBound = function(){
return this.map.getExtent().transform(this.toProjection,this.fromProjection);
};
OlMapView.prototype.getProjection = function(){
return {from:this.fromProjection,to:this.toProjection};
};
//calculate pixel coordinates for drawing dots, reduce overlapping;
OlMapView.prototype.render_heatmap = function(){
return;
//var bound = this.getGeoBound();
//var geo_arr = TweetsDataManager.instance().filter_by_geo_bound(bound.bottom, bound.top, bound.left, bound.right).select("tweet_id", "lat", "lng");
var geo_arr = Canvas_manager.instance().get_lense_manager().get_geo_points();
console.log("# of points for heat map: " + geo_arr.length);
this.features = [];
var geo_points = [];
geo_arr.forEach(function(entry){
geo_points.push({lat:entry[1], lng:entry[2], id:entry[0]});
});
//var gPoint = new OpenLayers.Geometry.Point(this.geoBuffer[id].lng, this.geoBuffer[id].lat).transform(this.getProjection().from, this.getProjection().to);
this.tweetsHeatmapManager.refreshMap(geo_points);
};
OlMapView.prototype.cacheLocation = function() {
this.cachedCenter = new OpenLayers.LonLat(this.map.center.lon, this.map.center.lat).transform(this.toProjection, this.fromProjection);
this.cachedZoom = this.map.zoom;
};
OlMapView.prototype.moveToCacheLocation = function(){
var lon = this.cachedCenter.lon;
var lat = this.cachedCenter.lat;
var zoom = this.cachedZoom;
this.map.panTo(new OpenLayers.LonLat(lon, lat).transform(this.fromProjection, this.toProjection));
this.map.zoomTo(zoom);
};
OlMapView.prototype.moveTo = function(lon, lat, zoom){
this.map.panTo(new OpenLayers.LonLat(lon, lat).transform(this.fromProjection, this.toProjection));
this.map.zoomTo(zoom);
};
// OlMapView.prototype.reset = function() {
// // console.log(distance);
// this.strategy.distance = 50;
// // this.strategy.threshold = 1;
// this.clusters.removeFeatures(this.clusters.features);
// this.clusters.addFeatures(this.features);
// };
// OlMapView.prototype.display = function(event) {
// var f = event.feature;
// var el = document.getElementById("output");
// if(f.cluster) {
// el.innerHTML = "cluster of " + f.attributes.count;
// } else {
// el.innerHTML = "unclustered " + f.geometry;
// }
// };
OlMapView.prototype.addLayer = function(layer) {
this.map.addLayer(layer);
};
OlMapView.prototype.getMapExtent = function() {
return this.map.getExtent();
};
OlMapView.zoomLevel = profile.zoom;
OlMapView.INTERACTION = {ZOOM_IN:0, ZOOM_OUT:1, PAN:2}; | this.map = null;
this.fromProjection = new OpenLayers.Projection("EPSG:4326");
this.toProjection = new OpenLayers.Projection("EPSG:900913");
this.baseLayer = null; | random_line_split |
actions.rs | use crate::{
dkg_contract::{DKG as DKGContract, DKG_ABI},
opts::*,
};
use rand::{CryptoRng, RngCore};
use std::{fs::File, io::Write, sync::Arc};
use dkg_core::{
primitives::{joint_feldman::*, resharing::RDKG, *},
DKGPhase, Phase2Result,
};
use anyhow::Result;
use ethers::prelude::*;
use ethers::providers::Middleware;
use ethers::signers::LocalWallet;
use rustc_hex::{FromHex, ToHex};
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use threshold_bls::{group::Curve, sig::Scheme};
use threshold_bls::{
poly::{Idx, PublicPoly},
sig::Share,
};
#[derive(Serialize, Deserialize, Debug)]
struct CeloKeypairJson {
address: Address,
#[serde(rename = "privateKey")]
private_key: String,
}
pub fn keygen<R>(opts: KeygenOpts, rng: &mut R) -> Result<()>
where
R: CryptoRng + RngCore,
{
let wallet = Wallet::new(rng);
let output = CeloKeypairJson {
private_key: hex::encode(&wallet.signer().to_bytes()),
address: wallet.address(),
};
if let Some(path) = opts.path {
let f = File::create(path)?;
serde_json::to_writer(&f, &output)?;
} else {
serde_json::to_writer(std::io::stdout(), &output)?;
}
Ok(())
}
pub async fn deploy(opts: DeployOpts) -> Result<()> {
// hard-code the contract's bytecode when deploying
let bytecode = include_str!["../dkg.bin"];
let bytecode = bytecode.from_hex::<Vec<u8>>()?;
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let abi = DKG_ABI.clone();
let factory = ContractFactory::new(abi, Bytes::from(bytecode), client);
let contract = factory
.deploy((opts.threshold as u64, opts.phase_duration as u64))?
.send()
.await?;
println!("Contract deployed at: {:?}", contract.address());
Ok(())
}
pub async fn allow(opts: AllowlistOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
for addr in opts.address {
let tx = contract.allowlist(addr).block(BlockNumber::Pending);
let tx = tx.send().await?.await?;
println!("Sent `allow` tx for {:?} (hash: {:?})", addr, tx);
}
Ok(())
}
pub async fn start(opts: StartOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
// Submit the tx and wait for the confirmation
let _tx_hash = contract.start().send().await?.await?;
Ok(())
}
pub async fn reshare<S, M, C, R>(opts: ReshareConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
M: Middleware,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
// we need the previous group and public poly for resharing
let previous_group = {
let previous_dkg = DKGContract::new(opts.previous_contract_address, client.clone());
let previous_group = previous_dkg.get_bls_keys().call().await?;
pubkeys_to_group::<C>(previous_group)?
};
let public_poly = opts.public_polynomial.from_hex::<Vec<u8>>()?;
let public_poly: PublicPoly<C> = bincode::deserialize(&public_poly)?;
let dkg = DKGContract::new(opts.contract_address, client.clone());
let (private_key, public_key) = S::keypair(rng);
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
let new_group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = if let Some(share) = opts.share {
let share = share.from_hex::<Vec<u8>>()?;
let share: Share<C::Scalar> = bincode::deserialize(&share)?;
let dkg_output = DKGOutput {
share,
qual: previous_group,
public: public_poly,
};
RDKG::new_from_share(private_key, dkg_output, new_group)
} else {
RDKG::new_member(private_key, previous_group, public_poly, new_group)
}?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
pub async fn run<S, C, R>(opts: DKGConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let dkg = DKGContract::new(opts.contract_address, client);
// 1. Generate the keys
let (private_key, public_key) = S::keypair(rng);
// 2. Register
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
// Get the group info
let group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = DKG::new(private_key, group)?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
async fn register<S: Scheme, M: Middleware + 'static, Z: Signer + 'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
public_key: &S::Public,
) -> Result<()> {
println!("Registering...");
let public_key_serialized = bincode::serialize(public_key)?;
let public_key_bytes = ethers::prelude::Bytes::from(public_key_serialized);
let _pending_tx = dkg.register(public_key_bytes).send().await?.await?;
// Wait for Phase 1
wait_for_phase(dkg, 1).await?;
Ok(())
}
async fn get_group<C: Curve, M: Middleware + 'static, Z: Signer + 'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
) -> Result<Group<C>> {
let group = dkg.get_bls_keys().call().await?;
let participants = dkg.get_participants().call().await?;
confirm_group(&group, participants)?;
let group = pubkeys_to_group::<C>(group)?;
Ok(group)
}
fn confirm_group(
pubkeys: &(U256, Vec<ethers::prelude::Bytes>),
participants: Vec<Address>,
) -> Result<()> {
// print some debug info
println!(
"Will run DKG with the group listed below and threshold {}",
pubkeys.0
);
for (bls_pubkey, address) in pubkeys.1.iter().zip(&participants) {
let key = bls_pubkey.to_vec().to_hex::<String>();
println!("{:?} -> {}", address, key)
}
if !clt::confirm(
"\nDoes the above group look good to you?",
false,
"\n",
true,
) {
return Err(anyhow::anyhow!("User rejected group choice."));
}
Ok(())
}
// Pass the result of `get_bls_keys` to convert the raw data to a group
fn pubkeys_to_group<C: Curve>(pubkeys: (U256, Vec<ethers::prelude::Bytes>)) -> Result<Group<C>> {
let nodes = pubkeys
.1
.into_iter()
.filter(|pubkey| !pubkey.to_vec().is_empty()) // skip users that did not register
.enumerate()
.map(|(i, pubkey)| {
let pubkey: C::Point = bincode::deserialize(&pubkey.to_vec()[..])?;
Ok(Node::<C>::new(i as Idx, pubkey))
})
.collect::<Result<_>>()?;
Ok(Group {
threshold: pubkeys.0.as_u64() as usize,
nodes,
})
}
// Shared helper for running the DKG in both normal and re-sharing mode
async fn run_dkg<P, C, R, M: Middleware + 'static>(
mut dkg: DKGContract<M>,
phase0: P,
rng: &mut R,
output_path: Option<String>,
) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
// S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
P: Phase0<C>,
R: RngCore,
{
// Run Phase 1 and publish to the chain
println!("Calculating and broadcasting our shares...");
let phase1 = phase0.run(&mut dkg, rng).await?;
// Wait for Phase 2
wait_for_phase(&dkg, 2).await?;
// Get the shares
let shares = dkg.get_shares().call().await?;
println!("Got {} shares...", shares.len());
let shares = parse_bundle(&shares)?;
println!("Parsed {} shares. Running Phase 2", shares.len());
let phase2 = phase1.run(&mut dkg, &shares).await?;
// Get the responses
let responses = dkg.get_responses().call().await?;
println!("Got {} responses...", responses.len());
let responses = parse_bundle(&responses)?;
println!("Parsed the responses. Getting result.");
// Run Phase 2
let result = match phase2.run(&mut dkg, &responses).await? {
Phase2Result::Output(out) => Ok(out),
// Run Phase 3 if Phase 2 errored
Phase2Result::GoToPhase3(phase3) => {
println!("There were complaints. Running Phase 3.");
wait_for_phase(&dkg, 3).await?;
let justifications = dkg.get_justifications().call().await?;
let justifications = parse_bundle(&justifications)?;
phase3.run(&mut dkg, &justifications).await
}
};
match result {
Ok(output) => |
Err(err) => Err(anyhow::anyhow!("DKG error: {}", err)),
}
}
#[derive(serde::Serialize, Debug)]
struct OutputJson {
#[serde(rename = "publicKey")]
public_key: String,
#[serde(rename = "publicPolynomial")]
public_polynomial: String,
#[serde(rename = "share")]
share: String,
}
async fn wait_for_phase<M: Middleware>(
dkg: &DKGContract<M>,
num: u64,
) -> Result<(), ContractError<M>> {
println!("Waiting for Phase {} to start", num);
loop {
let phase = dkg.in_phase().call().await?;
if phase.as_u64() == num {
break;
}
print!(".");
// 6s for 1 Celo block
tokio::time::sleep(std::time::Duration::from_millis(6000)).await;
}
println!("\nIn Phase {}. Moving to the next step.", num);
Ok(())
}
fn parse_bundle<D: serde::de::DeserializeOwned>(
bundle: &[ethers::prelude::Bytes],
) -> Result<Vec<D>> {
bundle
.iter()
.filter(|item| !item.to_vec().is_empty()) // filter out empty items
.map(|item| Ok(bincode::deserialize::<D>(&item.to_vec()[..])?))
.collect()
}
fn write_output<C: Curve, W: Write>(writer: W, out: &DKGOutput<C>) -> Result<()> {
let output = OutputJson {
public_key: hex::encode(&bincode::serialize(&out.public.public_key())?),
public_polynomial: hex::encode(&bincode::serialize(&out.public)?),
share: hex::encode(&bincode::serialize(&out.share)?),
};
serde_json::to_writer(writer, &output)?;
Ok(())
}
| {
println!("Success. Your share and threshold pubkey are ready.");
if let Some(path) = output_path {
let file = File::create(path)?;
write_output(&file, &output)?;
} else {
write_output(std::io::stdout(), &output)?;
}
Ok(())
} | conditional_block |
actions.rs | use crate::{
dkg_contract::{DKG as DKGContract, DKG_ABI},
opts::*,
};
use rand::{CryptoRng, RngCore};
use std::{fs::File, io::Write, sync::Arc};
use dkg_core::{
primitives::{joint_feldman::*, resharing::RDKG, *},
DKGPhase, Phase2Result,
};
use anyhow::Result;
use ethers::prelude::*;
use ethers::providers::Middleware;
use ethers::signers::LocalWallet;
use rustc_hex::{FromHex, ToHex};
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use threshold_bls::{group::Curve, sig::Scheme};
use threshold_bls::{
poly::{Idx, PublicPoly},
sig::Share,
};
#[derive(Serialize, Deserialize, Debug)]
struct CeloKeypairJson {
address: Address,
#[serde(rename = "privateKey")]
private_key: String,
}
pub fn keygen<R>(opts: KeygenOpts, rng: &mut R) -> Result<()>
where
R: CryptoRng + RngCore,
{
let wallet = Wallet::new(rng);
let output = CeloKeypairJson {
private_key: hex::encode(&wallet.signer().to_bytes()),
address: wallet.address(),
};
if let Some(path) = opts.path {
let f = File::create(path)?;
serde_json::to_writer(&f, &output)?;
} else {
serde_json::to_writer(std::io::stdout(), &output)?;
}
Ok(())
}
pub async fn deploy(opts: DeployOpts) -> Result<()> {
// hard-code the contract's bytecode when deploying
let bytecode = include_str!["../dkg.bin"];
let bytecode = bytecode.from_hex::<Vec<u8>>()?;
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let abi = DKG_ABI.clone();
let factory = ContractFactory::new(abi, Bytes::from(bytecode), client);
let contract = factory
.deploy((opts.threshold as u64, opts.phase_duration as u64))?
.send()
.await?;
println!("Contract deployed at: {:?}", contract.address());
Ok(())
}
pub async fn allow(opts: AllowlistOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet); |
let contract = DKGContract::new(opts.contract_address, client);
for addr in opts.address {
let tx = contract.allowlist(addr).block(BlockNumber::Pending);
let tx = tx.send().await?.await?;
println!("Sent `allow` tx for {:?} (hash: {:?})", addr, tx);
}
Ok(())
}
pub async fn start(opts: StartOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
// Submit the tx and wait for the confirmation
let _tx_hash = contract.start().send().await?.await?;
Ok(())
}
pub async fn reshare<S, M, C, R>(opts: ReshareConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
M: Middleware,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
// we need the previous group and public poly for resharing
let previous_group = {
let previous_dkg = DKGContract::new(opts.previous_contract_address, client.clone());
let previous_group = previous_dkg.get_bls_keys().call().await?;
pubkeys_to_group::<C>(previous_group)?
};
let public_poly = opts.public_polynomial.from_hex::<Vec<u8>>()?;
let public_poly: PublicPoly<C> = bincode::deserialize(&public_poly)?;
let dkg = DKGContract::new(opts.contract_address, client.clone());
let (private_key, public_key) = S::keypair(rng);
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
let new_group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = if let Some(share) = opts.share {
let share = share.from_hex::<Vec<u8>>()?;
let share: Share<C::Scalar> = bincode::deserialize(&share)?;
let dkg_output = DKGOutput {
share,
qual: previous_group,
public: public_poly,
};
RDKG::new_from_share(private_key, dkg_output, new_group)
} else {
RDKG::new_member(private_key, previous_group, public_poly, new_group)
}?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
pub async fn run<S, C, R>(opts: DKGConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let dkg = DKGContract::new(opts.contract_address, client);
// 1. Generate the keys
let (private_key, public_key) = S::keypair(rng);
// 2. Register
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
// Get the group info
let group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = DKG::new(private_key, group)?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
async fn register<S: Scheme, M: Middleware + 'static, Z: Signer + 'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
public_key: &S::Public,
) -> Result<()> {
println!("Registering...");
let public_key_serialized = bincode::serialize(public_key)?;
let public_key_bytes = ethers::prelude::Bytes::from(public_key_serialized);
let _pending_tx = dkg.register(public_key_bytes).send().await?.await?;
// Wait for Phase 1
wait_for_phase(dkg, 1).await?;
Ok(())
}
async fn get_group<C: Curve, M: Middleware + 'static, Z: Signer + 'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
) -> Result<Group<C>> {
let group = dkg.get_bls_keys().call().await?;
let participants = dkg.get_participants().call().await?;
confirm_group(&group, participants)?;
let group = pubkeys_to_group::<C>(group)?;
Ok(group)
}
fn confirm_group(
pubkeys: &(U256, Vec<ethers::prelude::Bytes>),
participants: Vec<Address>,
) -> Result<()> {
// print some debug info
println!(
"Will run DKG with the group listed below and threshold {}",
pubkeys.0
);
for (bls_pubkey, address) in pubkeys.1.iter().zip(&participants) {
let key = bls_pubkey.to_vec().to_hex::<String>();
println!("{:?} -> {}", address, key)
}
if !clt::confirm(
"\nDoes the above group look good to you?",
false,
"\n",
true,
) {
return Err(anyhow::anyhow!("User rejected group choice."));
}
Ok(())
}
// Pass the result of `get_bls_keys` to convert the raw data to a group
fn pubkeys_to_group<C: Curve>(pubkeys: (U256, Vec<ethers::prelude::Bytes>)) -> Result<Group<C>> {
let nodes = pubkeys
.1
.into_iter()
.filter(|pubkey| !pubkey.to_vec().is_empty()) // skip users that did not register
.enumerate()
.map(|(i, pubkey)| {
let pubkey: C::Point = bincode::deserialize(&pubkey.to_vec()[..])?;
Ok(Node::<C>::new(i as Idx, pubkey))
})
.collect::<Result<_>>()?;
Ok(Group {
threshold: pubkeys.0.as_u64() as usize,
nodes,
})
}
// Shared helper for running the DKG in both normal and re-sharing mode
async fn run_dkg<P, C, R, M: Middleware + 'static>(
mut dkg: DKGContract<M>,
phase0: P,
rng: &mut R,
output_path: Option<String>,
) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
// S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
P: Phase0<C>,
R: RngCore,
{
// Run Phase 1 and publish to the chain
println!("Calculating and broadcasting our shares...");
let phase1 = phase0.run(&mut dkg, rng).await?;
// Wait for Phase 2
wait_for_phase(&dkg, 2).await?;
// Get the shares
let shares = dkg.get_shares().call().await?;
println!("Got {} shares...", shares.len());
let shares = parse_bundle(&shares)?;
println!("Parsed {} shares. Running Phase 2", shares.len());
let phase2 = phase1.run(&mut dkg, &shares).await?;
// Get the responses
let responses = dkg.get_responses().call().await?;
println!("Got {} responses...", responses.len());
let responses = parse_bundle(&responses)?;
println!("Parsed the responses. Getting result.");
// Run Phase 2
let result = match phase2.run(&mut dkg, &responses).await? {
Phase2Result::Output(out) => Ok(out),
// Run Phase 3 if Phase 2 errored
Phase2Result::GoToPhase3(phase3) => {
println!("There were complaints. Running Phase 3.");
wait_for_phase(&dkg, 3).await?;
let justifications = dkg.get_justifications().call().await?;
let justifications = parse_bundle(&justifications)?;
phase3.run(&mut dkg, &justifications).await
}
};
match result {
Ok(output) => {
println!("Success. Your share and threshold pubkey are ready.");
if let Some(path) = output_path {
let file = File::create(path)?;
write_output(&file, &output)?;
} else {
write_output(std::io::stdout(), &output)?;
}
Ok(())
}
Err(err) => Err(anyhow::anyhow!("DKG error: {}", err)),
}
}
#[derive(serde::Serialize, Debug)]
struct OutputJson {
#[serde(rename = "publicKey")]
public_key: String,
#[serde(rename = "publicPolynomial")]
public_polynomial: String,
#[serde(rename = "share")]
share: String,
}
async fn wait_for_phase<M: Middleware>(
dkg: &DKGContract<M>,
num: u64,
) -> Result<(), ContractError<M>> {
println!("Waiting for Phase {} to start", num);
loop {
let phase = dkg.in_phase().call().await?;
if phase.as_u64() == num {
break;
}
print!(".");
// 6s for 1 Celo block
tokio::time::sleep(std::time::Duration::from_millis(6000)).await;
}
println!("\nIn Phase {}. Moving to the next step.", num);
Ok(())
}
fn parse_bundle<D: serde::de::DeserializeOwned>(
bundle: &[ethers::prelude::Bytes],
) -> Result<Vec<D>> {
bundle
.iter()
.filter(|item| !item.to_vec().is_empty()) // filter out empty items
.map(|item| Ok(bincode::deserialize::<D>(&item.to_vec()[..])?))
.collect()
}
fn write_output<C: Curve, W: Write>(writer: W, out: &DKGOutput<C>) -> Result<()> {
let output = OutputJson {
public_key: hex::encode(&bincode::serialize(&out.public.public_key())?),
public_polynomial: hex::encode(&bincode::serialize(&out.public)?),
share: hex::encode(&bincode::serialize(&out.share)?),
};
serde_json::to_writer(writer, &output)?;
Ok(())
} | let client = Arc::new(client); | random_line_split |
actions.rs | use crate::{
dkg_contract::{DKG as DKGContract, DKG_ABI},
opts::*,
};
use rand::{CryptoRng, RngCore};
use std::{fs::File, io::Write, sync::Arc};
use dkg_core::{
primitives::{joint_feldman::*, resharing::RDKG, *},
DKGPhase, Phase2Result,
};
use anyhow::Result;
use ethers::prelude::*;
use ethers::providers::Middleware;
use ethers::signers::LocalWallet;
use rustc_hex::{FromHex, ToHex};
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use threshold_bls::{group::Curve, sig::Scheme};
use threshold_bls::{
poly::{Idx, PublicPoly},
sig::Share,
};
#[derive(Serialize, Deserialize, Debug)]
struct CeloKeypairJson {
address: Address,
#[serde(rename = "privateKey")]
private_key: String,
}
pub fn keygen<R>(opts: KeygenOpts, rng: &mut R) -> Result<()>
where
R: CryptoRng + RngCore,
{
let wallet = Wallet::new(rng);
let output = CeloKeypairJson {
private_key: hex::encode(&wallet.signer().to_bytes()),
address: wallet.address(),
};
if let Some(path) = opts.path {
let f = File::create(path)?;
serde_json::to_writer(&f, &output)?;
} else {
serde_json::to_writer(std::io::stdout(), &output)?;
}
Ok(())
}
pub async fn deploy(opts: DeployOpts) -> Result<()> {
// hard-code the contract's bytecode when deploying
let bytecode = include_str!["../dkg.bin"];
let bytecode = bytecode.from_hex::<Vec<u8>>()?;
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let abi = DKG_ABI.clone();
let factory = ContractFactory::new(abi, Bytes::from(bytecode), client);
let contract = factory
.deploy((opts.threshold as u64, opts.phase_duration as u64))?
.send()
.await?;
println!("Contract deployed at: {:?}", contract.address());
Ok(())
}
pub async fn allow(opts: AllowlistOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
for addr in opts.address {
let tx = contract.allowlist(addr).block(BlockNumber::Pending);
let tx = tx.send().await?.await?;
println!("Sent `allow` tx for {:?} (hash: {:?})", addr, tx);
}
Ok(())
}
pub async fn start(opts: StartOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
// Submit the tx and wait for the confirmation
let _tx_hash = contract.start().send().await?.await?;
Ok(())
}
pub async fn reshare<S, M, C, R>(opts: ReshareConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
M: Middleware,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
// we need the previous group and public poly for resharing
let previous_group = {
let previous_dkg = DKGContract::new(opts.previous_contract_address, client.clone());
let previous_group = previous_dkg.get_bls_keys().call().await?;
pubkeys_to_group::<C>(previous_group)?
};
let public_poly = opts.public_polynomial.from_hex::<Vec<u8>>()?;
let public_poly: PublicPoly<C> = bincode::deserialize(&public_poly)?;
let dkg = DKGContract::new(opts.contract_address, client.clone());
let (private_key, public_key) = S::keypair(rng);
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
let new_group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = if let Some(share) = opts.share {
let share = share.from_hex::<Vec<u8>>()?;
let share: Share<C::Scalar> = bincode::deserialize(&share)?;
let dkg_output = DKGOutput {
share,
qual: previous_group,
public: public_poly,
};
RDKG::new_from_share(private_key, dkg_output, new_group)
} else {
RDKG::new_member(private_key, previous_group, public_poly, new_group)
}?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
pub async fn run<S, C, R>(opts: DKGConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let dkg = DKGContract::new(opts.contract_address, client);
// 1. Generate the keys
let (private_key, public_key) = S::keypair(rng);
// 2. Register
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
// Get the group info
let group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = DKG::new(private_key, group)?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
async fn register<S: Scheme, M: Middleware + 'static, Z: Signer + 'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
public_key: &S::Public,
) -> Result<()> {
println!("Registering...");
let public_key_serialized = bincode::serialize(public_key)?;
let public_key_bytes = ethers::prelude::Bytes::from(public_key_serialized);
let _pending_tx = dkg.register(public_key_bytes).send().await?.await?;
// Wait for Phase 1
wait_for_phase(dkg, 1).await?;
Ok(())
}
async fn get_group<C: Curve, M: Middleware + 'static, Z: Signer + 'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
) -> Result<Group<C>> {
let group = dkg.get_bls_keys().call().await?;
let participants = dkg.get_participants().call().await?;
confirm_group(&group, participants)?;
let group = pubkeys_to_group::<C>(group)?;
Ok(group)
}
fn confirm_group(
pubkeys: &(U256, Vec<ethers::prelude::Bytes>),
participants: Vec<Address>,
) -> Result<()> {
// print some debug info
println!(
"Will run DKG with the group listed below and threshold {}",
pubkeys.0
);
for (bls_pubkey, address) in pubkeys.1.iter().zip(&participants) {
let key = bls_pubkey.to_vec().to_hex::<String>();
println!("{:?} -> {}", address, key)
}
if !clt::confirm(
"\nDoes the above group look good to you?",
false,
"\n",
true,
) {
return Err(anyhow::anyhow!("User rejected group choice."));
}
Ok(())
}
// Pass the result of `get_bls_keys` to convert the raw data to a group
fn pubkeys_to_group<C: Curve>(pubkeys: (U256, Vec<ethers::prelude::Bytes>)) -> Result<Group<C>> {
let nodes = pubkeys
.1
.into_iter()
.filter(|pubkey| !pubkey.to_vec().is_empty()) // skip users that did not register
.enumerate()
.map(|(i, pubkey)| {
let pubkey: C::Point = bincode::deserialize(&pubkey.to_vec()[..])?;
Ok(Node::<C>::new(i as Idx, pubkey))
})
.collect::<Result<_>>()?;
Ok(Group {
threshold: pubkeys.0.as_u64() as usize,
nodes,
})
}
// Shared helper for running the DKG in both normal and re-sharing mode
async fn | <P, C, R, M: Middleware + 'static>(
mut dkg: DKGContract<M>,
phase0: P,
rng: &mut R,
output_path: Option<String>,
) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
// S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
P: Phase0<C>,
R: RngCore,
{
// Run Phase 1 and publish to the chain
println!("Calculating and broadcasting our shares...");
let phase1 = phase0.run(&mut dkg, rng).await?;
// Wait for Phase 2
wait_for_phase(&dkg, 2).await?;
// Get the shares
let shares = dkg.get_shares().call().await?;
println!("Got {} shares...", shares.len());
let shares = parse_bundle(&shares)?;
println!("Parsed {} shares. Running Phase 2", shares.len());
let phase2 = phase1.run(&mut dkg, &shares).await?;
// Get the responses
let responses = dkg.get_responses().call().await?;
println!("Got {} responses...", responses.len());
let responses = parse_bundle(&responses)?;
println!("Parsed the responses. Getting result.");
// Run Phase 2
let result = match phase2.run(&mut dkg, &responses).await? {
Phase2Result::Output(out) => Ok(out),
// Run Phase 3 if Phase 2 errored
Phase2Result::GoToPhase3(phase3) => {
println!("There were complaints. Running Phase 3.");
wait_for_phase(&dkg, 3).await?;
let justifications = dkg.get_justifications().call().await?;
let justifications = parse_bundle(&justifications)?;
phase3.run(&mut dkg, &justifications).await
}
};
match result {
Ok(output) => {
println!("Success. Your share and threshold pubkey are ready.");
if let Some(path) = output_path {
let file = File::create(path)?;
write_output(&file, &output)?;
} else {
write_output(std::io::stdout(), &output)?;
}
Ok(())
}
Err(err) => Err(anyhow::anyhow!("DKG error: {}", err)),
}
}
#[derive(serde::Serialize, Debug)]
struct OutputJson {
#[serde(rename = "publicKey")]
public_key: String,
#[serde(rename = "publicPolynomial")]
public_polynomial: String,
#[serde(rename = "share")]
share: String,
}
async fn wait_for_phase<M: Middleware>(
dkg: &DKGContract<M>,
num: u64,
) -> Result<(), ContractError<M>> {
println!("Waiting for Phase {} to start", num);
loop {
let phase = dkg.in_phase().call().await?;
if phase.as_u64() == num {
break;
}
print!(".");
// 6s for 1 Celo block
tokio::time::sleep(std::time::Duration::from_millis(6000)).await;
}
println!("\nIn Phase {}. Moving to the next step.", num);
Ok(())
}
fn parse_bundle<D: serde::de::DeserializeOwned>(
bundle: &[ethers::prelude::Bytes],
) -> Result<Vec<D>> {
bundle
.iter()
.filter(|item| !item.to_vec().is_empty()) // filter out empty items
.map(|item| Ok(bincode::deserialize::<D>(&item.to_vec()[..])?))
.collect()
}
fn write_output<C: Curve, W: Write>(writer: W, out: &DKGOutput<C>) -> Result<()> {
let output = OutputJson {
public_key: hex::encode(&bincode::serialize(&out.public.public_key())?),
public_polynomial: hex::encode(&bincode::serialize(&out.public)?),
share: hex::encode(&bincode::serialize(&out.share)?),
};
serde_json::to_writer(writer, &output)?;
Ok(())
}
| run_dkg | identifier_name |
transaction_verify_centre.rs | //! The `tvu` module implements the Transaction Validation Unit, a
//! multi-stage transaction validation pipeline in software.
//!
//! 1. BlobFetchStage
//! - Incoming blobs are picked up from the TVU sockets and repair socket.
//! 2. RetransmitStage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. ReplayStage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
//! 4. StorageStage
//! - Generating the keys used to encrypt the ledger and sample it for storage mining.
// use crate::bank_forks::BankForks;
use crate::treasury_forks::BankForks;
use crate::fetch_spot_stage::BlobFetchStage;
use crate::block_stream_service::BlockstreamService;
use crate::block_buffer_pool::{BlockBufferPool, CompletedSlotsReceiver};
use crate::node_group_info::NodeGroupInfo;
use crate::leader_arrange_cache::LeaderScheduleCache;
use crate::water_clock_recorder::WaterClockRecorder;
use crate::repeat_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use crate::storage_stage::{StorageStage, StorageState};
use morgan_interface::hash::Hash;
use morgan_interface::pubkey::Pubkey;
use morgan_interface::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct Tvu {
fetch_stage: BlobFetchStage,
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
blockstream_service: Option<BlockstreamService>,
storage_stage: StorageStage,
}
pub struct Sockets {
pub fetch: Vec<UdpSocket>,
pub repair: UdpSocket,
pub retransmit: UdpSocket,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `node_group_info` - The node_group_info state.
/// * `sockets` - fetch, repair, and retransmit sockets
/// * `block_buffer_pool` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>(
vote_account: &Pubkey,
voting_keypair: Option<&Arc<T>>,
storage_keypair: &Arc<Keypair>,
bank_forks: &Arc<RwLock<BankForks>>,
node_group_info: &Arc<RwLock<NodeGroupInfo>>,
sockets: Sockets,
block_buffer_pool: Arc<BlockBufferPool>,
storage_rotate_count: u64,
storage_state: &StorageState,
blockstream: Option<&String>,
ledger_signal_receiver: Receiver<bool>,
subscriptions: &Arc<RpcSubscriptions>,
waterclock_recorder: &Arc<Mutex<WaterClockRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
genesis_blockhash: &Hash,
completed_slots_receiver: CompletedSlotsReceiver,
) -> Self
where
T: 'static + KeypairUtil + Sync + Send,
{
let keypair: Arc<Keypair> = node_group_info
.read()
.expect("Unable to read from node_group_info during Tvu creation")
.keypair
.clone();
let Sockets {
repair: repair_socket,
fetch: fetch_sockets,
retransmit: retransmit_socket,
} = sockets;
let (blob_fetch_sender, blob_fetch_receiver) = channel();
let repair_socket = Arc::new(repair_socket);
let mut blob_sockets: Vec<Arc<UdpSocket>> =
fetch_sockets.into_iter().map(Arc::new).collect();
blob_sockets.push(repair_socket.clone());
let fetch_stage = BlobFetchStage::new_multi_socket(blob_sockets, &blob_fetch_sender, &exit);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
block_buffer_pool.clone(),
&node_group_info,
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
&exit,
genesis_blockhash,
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
);
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
&keypair.pubkey(),
vote_account,
voting_keypair,
block_buffer_pool.clone(),
&bank_forks,
node_group_info.clone(),
&exit,
ledger_signal_receiver,
subscriptions,
waterclock_recorder,
leader_schedule_cache,
);
let blockstream_service = if blockstream.is_some() {
let blockstream_service = BlockstreamService::new(
slot_full_receiver,
block_buffer_pool.clone(),
blockstream.unwrap().to_string(),
&exit,
);
Some(blockstream_service)
} else {
None
};
let storage_stage = StorageStage::new(
storage_state,
root_slot_receiver,
Some(block_buffer_pool),
&keypair,
storage_keypair,
&exit,
&bank_forks,
storage_rotate_count,
&node_group_info,
);
Tvu {
fetch_stage,
retransmit_stage,
replay_stage,
blockstream_service,
storage_stage,
}
}
}
impl Service for Tvu {
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
self.retransmit_stage.join()?;
self.fetch_stage.join()?;
self.storage_stage.join()?;
if self.blockstream_service.is_some() {
self.blockstream_service.unwrap().join()?;
}
self.replay_stage.join()?;
Ok(())
}
}
use std::{borrow::Cow, convert, ffi::OsStr, path::Path};
static LICENSE_HEADER: &str = "Copyright (c) The Libra Core Contributors\n\
SPDX-License-Identifier: Apache-2.0\n\
";
#[allow(dead_code)]
pub(super) fn has_license_header(file: &Path, contents: &str) -> Result<(), Cow<'static, str>> {
enum FileType {
Rust,
Shell,
Proto,
}
let file_type = match file
.extension()
.map(OsStr::to_str)
.and_then(convert::identity)
{
Some("rs") => FileType::Rust,
Some("sh") => FileType::Shell,
Some("proto") => FileType::Proto,
_ => return Ok(()),
};
// Determine if the file is missing the license header
let missing_header = match file_type {
FileType::Rust | FileType::Proto => {
let maybe_license = contents
.lines()
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("// "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
FileType::Shell => {
let maybe_license = contents
.lines() | .skip_while(|line| line.starts_with("#!"))
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("# "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
};
if missing_header {
return Err("missing a license header".into());
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::treasury_stage::create_test_recorder;
use crate::block_buffer_pool::get_tmp_ledger_path;
use crate::node_group_info::{NodeGroupInfo, Node};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use morgan_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
fn test_tvu_exit() {
morgan_logger::setup();
let leader = Node::new_localhost();
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let starting_balance = 10_000;
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(starting_balance);
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
//start cluster_info1
let mut cluster_info1 = NodeGroupInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let block_buffer_pool_path = get_tmp_ledger_path!();
let (block_buffer_pool, l_receiver, completed_slots_receiver) =
BlockBufferPool::open_by_message(&block_buffer_pool_path)
.expect("Expected to successfully open ledger");
let block_buffer_pool = Arc::new(block_buffer_pool);
let bank = bank_forks.working_bank();
let (exit, waterclock_recorder, waterclock_service, _entry_receiver) =
create_test_recorder(&bank, &block_buffer_pool);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit,
fetch: target1.sockets.tvu,
}
},
block_buffer_pool,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
l_receiver,
&Arc::new(RpcSubscriptions::default()),
&waterclock_recorder,
&leader_schedule_cache,
&exit,
&Hash::default(),
completed_slots_receiver,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();
waterclock_service.join().unwrap();
}
} | random_line_split | |
transaction_verify_centre.rs | //! The `tvu` module implements the Transaction Validation Unit, a
//! multi-stage transaction validation pipeline in software.
//!
//! 1. BlobFetchStage
//! - Incoming blobs are picked up from the TVU sockets and repair socket.
//! 2. RetransmitStage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. ReplayStage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
//! 4. StorageStage
//! - Generating the keys used to encrypt the ledger and sample it for storage mining.
// use crate::bank_forks::BankForks;
use crate::treasury_forks::BankForks;
use crate::fetch_spot_stage::BlobFetchStage;
use crate::block_stream_service::BlockstreamService;
use crate::block_buffer_pool::{BlockBufferPool, CompletedSlotsReceiver};
use crate::node_group_info::NodeGroupInfo;
use crate::leader_arrange_cache::LeaderScheduleCache;
use crate::water_clock_recorder::WaterClockRecorder;
use crate::repeat_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use crate::storage_stage::{StorageStage, StorageState};
use morgan_interface::hash::Hash;
use morgan_interface::pubkey::Pubkey;
use morgan_interface::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct Tvu {
fetch_stage: BlobFetchStage,
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
blockstream_service: Option<BlockstreamService>,
storage_stage: StorageStage,
}
pub struct | {
pub fetch: Vec<UdpSocket>,
pub repair: UdpSocket,
pub retransmit: UdpSocket,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `node_group_info` - The node_group_info state.
/// * `sockets` - fetch, repair, and retransmit sockets
/// * `block_buffer_pool` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>(
vote_account: &Pubkey,
voting_keypair: Option<&Arc<T>>,
storage_keypair: &Arc<Keypair>,
bank_forks: &Arc<RwLock<BankForks>>,
node_group_info: &Arc<RwLock<NodeGroupInfo>>,
sockets: Sockets,
block_buffer_pool: Arc<BlockBufferPool>,
storage_rotate_count: u64,
storage_state: &StorageState,
blockstream: Option<&String>,
ledger_signal_receiver: Receiver<bool>,
subscriptions: &Arc<RpcSubscriptions>,
waterclock_recorder: &Arc<Mutex<WaterClockRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
genesis_blockhash: &Hash,
completed_slots_receiver: CompletedSlotsReceiver,
) -> Self
where
T: 'static + KeypairUtil + Sync + Send,
{
let keypair: Arc<Keypair> = node_group_info
.read()
.expect("Unable to read from node_group_info during Tvu creation")
.keypair
.clone();
let Sockets {
repair: repair_socket,
fetch: fetch_sockets,
retransmit: retransmit_socket,
} = sockets;
let (blob_fetch_sender, blob_fetch_receiver) = channel();
let repair_socket = Arc::new(repair_socket);
let mut blob_sockets: Vec<Arc<UdpSocket>> =
fetch_sockets.into_iter().map(Arc::new).collect();
blob_sockets.push(repair_socket.clone());
let fetch_stage = BlobFetchStage::new_multi_socket(blob_sockets, &blob_fetch_sender, &exit);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
block_buffer_pool.clone(),
&node_group_info,
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
&exit,
genesis_blockhash,
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
);
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
&keypair.pubkey(),
vote_account,
voting_keypair,
block_buffer_pool.clone(),
&bank_forks,
node_group_info.clone(),
&exit,
ledger_signal_receiver,
subscriptions,
waterclock_recorder,
leader_schedule_cache,
);
let blockstream_service = if blockstream.is_some() {
let blockstream_service = BlockstreamService::new(
slot_full_receiver,
block_buffer_pool.clone(),
blockstream.unwrap().to_string(),
&exit,
);
Some(blockstream_service)
} else {
None
};
let storage_stage = StorageStage::new(
storage_state,
root_slot_receiver,
Some(block_buffer_pool),
&keypair,
storage_keypair,
&exit,
&bank_forks,
storage_rotate_count,
&node_group_info,
);
Tvu {
fetch_stage,
retransmit_stage,
replay_stage,
blockstream_service,
storage_stage,
}
}
}
impl Service for Tvu {
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
self.retransmit_stage.join()?;
self.fetch_stage.join()?;
self.storage_stage.join()?;
if self.blockstream_service.is_some() {
self.blockstream_service.unwrap().join()?;
}
self.replay_stage.join()?;
Ok(())
}
}
use std::{borrow::Cow, convert, ffi::OsStr, path::Path};
static LICENSE_HEADER: &str = "Copyright (c) The Libra Core Contributors\n\
SPDX-License-Identifier: Apache-2.0\n\
";
#[allow(dead_code)]
pub(super) fn has_license_header(file: &Path, contents: &str) -> Result<(), Cow<'static, str>> {
enum FileType {
Rust,
Shell,
Proto,
}
let file_type = match file
.extension()
.map(OsStr::to_str)
.and_then(convert::identity)
{
Some("rs") => FileType::Rust,
Some("sh") => FileType::Shell,
Some("proto") => FileType::Proto,
_ => return Ok(()),
};
// Determine if the file is missing the license header
let missing_header = match file_type {
FileType::Rust | FileType::Proto => {
let maybe_license = contents
.lines()
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("// "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
FileType::Shell => {
let maybe_license = contents
.lines()
.skip_while(|line| line.starts_with("#!"))
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("# "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
};
if missing_header {
return Err("missing a license header".into());
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::treasury_stage::create_test_recorder;
use crate::block_buffer_pool::get_tmp_ledger_path;
use crate::node_group_info::{NodeGroupInfo, Node};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use morgan_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
fn test_tvu_exit() {
morgan_logger::setup();
let leader = Node::new_localhost();
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let starting_balance = 10_000;
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(starting_balance);
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
//start cluster_info1
let mut cluster_info1 = NodeGroupInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let block_buffer_pool_path = get_tmp_ledger_path!();
let (block_buffer_pool, l_receiver, completed_slots_receiver) =
BlockBufferPool::open_by_message(&block_buffer_pool_path)
.expect("Expected to successfully open ledger");
let block_buffer_pool = Arc::new(block_buffer_pool);
let bank = bank_forks.working_bank();
let (exit, waterclock_recorder, waterclock_service, _entry_receiver) =
create_test_recorder(&bank, &block_buffer_pool);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit,
fetch: target1.sockets.tvu,
}
},
block_buffer_pool,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
l_receiver,
&Arc::new(RpcSubscriptions::default()),
&waterclock_recorder,
&leader_schedule_cache,
&exit,
&Hash::default(),
completed_slots_receiver,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();
waterclock_service.join().unwrap();
}
}
| Sockets | identifier_name |
transaction_verify_centre.rs | //! The `tvu` module implements the Transaction Validation Unit, a
//! multi-stage transaction validation pipeline in software.
//!
//! 1. BlobFetchStage
//! - Incoming blobs are picked up from the TVU sockets and repair socket.
//! 2. RetransmitStage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. ReplayStage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
//! 4. StorageStage
//! - Generating the keys used to encrypt the ledger and sample it for storage mining.
// use crate::bank_forks::BankForks;
use crate::treasury_forks::BankForks;
use crate::fetch_spot_stage::BlobFetchStage;
use crate::block_stream_service::BlockstreamService;
use crate::block_buffer_pool::{BlockBufferPool, CompletedSlotsReceiver};
use crate::node_group_info::NodeGroupInfo;
use crate::leader_arrange_cache::LeaderScheduleCache;
use crate::water_clock_recorder::WaterClockRecorder;
use crate::repeat_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use crate::storage_stage::{StorageStage, StorageState};
use morgan_interface::hash::Hash;
use morgan_interface::pubkey::Pubkey;
use morgan_interface::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct Tvu {
fetch_stage: BlobFetchStage,
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
blockstream_service: Option<BlockstreamService>,
storage_stage: StorageStage,
}
pub struct Sockets {
pub fetch: Vec<UdpSocket>,
pub repair: UdpSocket,
pub retransmit: UdpSocket,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `node_group_info` - The node_group_info state.
/// * `sockets` - fetch, repair, and retransmit sockets
/// * `block_buffer_pool` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>(
vote_account: &Pubkey,
voting_keypair: Option<&Arc<T>>,
storage_keypair: &Arc<Keypair>,
bank_forks: &Arc<RwLock<BankForks>>,
node_group_info: &Arc<RwLock<NodeGroupInfo>>,
sockets: Sockets,
block_buffer_pool: Arc<BlockBufferPool>,
storage_rotate_count: u64,
storage_state: &StorageState,
blockstream: Option<&String>,
ledger_signal_receiver: Receiver<bool>,
subscriptions: &Arc<RpcSubscriptions>,
waterclock_recorder: &Arc<Mutex<WaterClockRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
genesis_blockhash: &Hash,
completed_slots_receiver: CompletedSlotsReceiver,
) -> Self
where
T: 'static + KeypairUtil + Sync + Send,
|
}
impl Service for Tvu {
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
self.retransmit_stage.join()?;
self.fetch_stage.join()?;
self.storage_stage.join()?;
if self.blockstream_service.is_some() {
self.blockstream_service.unwrap().join()?;
}
self.replay_stage.join()?;
Ok(())
}
}
use std::{borrow::Cow, convert, ffi::OsStr, path::Path};
static LICENSE_HEADER: &str = "Copyright (c) The Libra Core Contributors\n\
SPDX-License-Identifier: Apache-2.0\n\
";
#[allow(dead_code)]
pub(super) fn has_license_header(file: &Path, contents: &str) -> Result<(), Cow<'static, str>> {
enum FileType {
Rust,
Shell,
Proto,
}
let file_type = match file
.extension()
.map(OsStr::to_str)
.and_then(convert::identity)
{
Some("rs") => FileType::Rust,
Some("sh") => FileType::Shell,
Some("proto") => FileType::Proto,
_ => return Ok(()),
};
// Determine if the file is missing the license header
let missing_header = match file_type {
FileType::Rust | FileType::Proto => {
let maybe_license = contents
.lines()
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("// "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
FileType::Shell => {
let maybe_license = contents
.lines()
.skip_while(|line| line.starts_with("#!"))
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("# "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
};
if missing_header {
return Err("missing a license header".into());
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::treasury_stage::create_test_recorder;
use crate::block_buffer_pool::get_tmp_ledger_path;
use crate::node_group_info::{NodeGroupInfo, Node};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use morgan_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
fn test_tvu_exit() {
morgan_logger::setup();
let leader = Node::new_localhost();
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let starting_balance = 10_000;
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(starting_balance);
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
//start cluster_info1
let mut cluster_info1 = NodeGroupInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let block_buffer_pool_path = get_tmp_ledger_path!();
let (block_buffer_pool, l_receiver, completed_slots_receiver) =
BlockBufferPool::open_by_message(&block_buffer_pool_path)
.expect("Expected to successfully open ledger");
let block_buffer_pool = Arc::new(block_buffer_pool);
let bank = bank_forks.working_bank();
let (exit, waterclock_recorder, waterclock_service, _entry_receiver) =
create_test_recorder(&bank, &block_buffer_pool);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit,
fetch: target1.sockets.tvu,
}
},
block_buffer_pool,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
l_receiver,
&Arc::new(RpcSubscriptions::default()),
&waterclock_recorder,
&leader_schedule_cache,
&exit,
&Hash::default(),
completed_slots_receiver,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();
waterclock_service.join().unwrap();
}
}
| {
let keypair: Arc<Keypair> = node_group_info
.read()
.expect("Unable to read from node_group_info during Tvu creation")
.keypair
.clone();
let Sockets {
repair: repair_socket,
fetch: fetch_sockets,
retransmit: retransmit_socket,
} = sockets;
let (blob_fetch_sender, blob_fetch_receiver) = channel();
let repair_socket = Arc::new(repair_socket);
let mut blob_sockets: Vec<Arc<UdpSocket>> =
fetch_sockets.into_iter().map(Arc::new).collect();
blob_sockets.push(repair_socket.clone());
let fetch_stage = BlobFetchStage::new_multi_socket(blob_sockets, &blob_fetch_sender, &exit);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
block_buffer_pool.clone(),
&node_group_info,
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
&exit,
genesis_blockhash,
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
);
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
&keypair.pubkey(),
vote_account,
voting_keypair,
block_buffer_pool.clone(),
&bank_forks,
node_group_info.clone(),
&exit,
ledger_signal_receiver,
subscriptions,
waterclock_recorder,
leader_schedule_cache,
);
let blockstream_service = if blockstream.is_some() {
let blockstream_service = BlockstreamService::new(
slot_full_receiver,
block_buffer_pool.clone(),
blockstream.unwrap().to_string(),
&exit,
);
Some(blockstream_service)
} else {
None
};
let storage_stage = StorageStage::new(
storage_state,
root_slot_receiver,
Some(block_buffer_pool),
&keypair,
storage_keypair,
&exit,
&bank_forks,
storage_rotate_count,
&node_group_info,
);
Tvu {
fetch_stage,
retransmit_stage,
replay_stage,
blockstream_service,
storage_stage,
}
} | identifier_body |
transaction_verify_centre.rs | //! The `tvu` module implements the Transaction Validation Unit, a
//! multi-stage transaction validation pipeline in software.
//!
//! 1. BlobFetchStage
//! - Incoming blobs are picked up from the TVU sockets and repair socket.
//! 2. RetransmitStage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. ReplayStage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
//! 4. StorageStage
//! - Generating the keys used to encrypt the ledger and sample it for storage mining.
// use crate::bank_forks::BankForks;
use crate::treasury_forks::BankForks;
use crate::fetch_spot_stage::BlobFetchStage;
use crate::block_stream_service::BlockstreamService;
use crate::block_buffer_pool::{BlockBufferPool, CompletedSlotsReceiver};
use crate::node_group_info::NodeGroupInfo;
use crate::leader_arrange_cache::LeaderScheduleCache;
use crate::water_clock_recorder::WaterClockRecorder;
use crate::repeat_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use crate::storage_stage::{StorageStage, StorageState};
use morgan_interface::hash::Hash;
use morgan_interface::pubkey::Pubkey;
use morgan_interface::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct Tvu {
fetch_stage: BlobFetchStage,
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
blockstream_service: Option<BlockstreamService>,
storage_stage: StorageStage,
}
pub struct Sockets {
pub fetch: Vec<UdpSocket>,
pub repair: UdpSocket,
pub retransmit: UdpSocket,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `node_group_info` - The node_group_info state.
/// * `sockets` - fetch, repair, and retransmit sockets
/// * `block_buffer_pool` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>(
vote_account: &Pubkey,
voting_keypair: Option<&Arc<T>>,
storage_keypair: &Arc<Keypair>,
bank_forks: &Arc<RwLock<BankForks>>,
node_group_info: &Arc<RwLock<NodeGroupInfo>>,
sockets: Sockets,
block_buffer_pool: Arc<BlockBufferPool>,
storage_rotate_count: u64,
storage_state: &StorageState,
blockstream: Option<&String>,
ledger_signal_receiver: Receiver<bool>,
subscriptions: &Arc<RpcSubscriptions>,
waterclock_recorder: &Arc<Mutex<WaterClockRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
genesis_blockhash: &Hash,
completed_slots_receiver: CompletedSlotsReceiver,
) -> Self
where
T: 'static + KeypairUtil + Sync + Send,
{
let keypair: Arc<Keypair> = node_group_info
.read()
.expect("Unable to read from node_group_info during Tvu creation")
.keypair
.clone();
let Sockets {
repair: repair_socket,
fetch: fetch_sockets,
retransmit: retransmit_socket,
} = sockets;
let (blob_fetch_sender, blob_fetch_receiver) = channel();
let repair_socket = Arc::new(repair_socket);
let mut blob_sockets: Vec<Arc<UdpSocket>> =
fetch_sockets.into_iter().map(Arc::new).collect();
blob_sockets.push(repair_socket.clone());
let fetch_stage = BlobFetchStage::new_multi_socket(blob_sockets, &blob_fetch_sender, &exit);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
block_buffer_pool.clone(),
&node_group_info,
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
&exit,
genesis_blockhash,
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
);
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
&keypair.pubkey(),
vote_account,
voting_keypair,
block_buffer_pool.clone(),
&bank_forks,
node_group_info.clone(),
&exit,
ledger_signal_receiver,
subscriptions,
waterclock_recorder,
leader_schedule_cache,
);
let blockstream_service = if blockstream.is_some() {
let blockstream_service = BlockstreamService::new(
slot_full_receiver,
block_buffer_pool.clone(),
blockstream.unwrap().to_string(),
&exit,
);
Some(blockstream_service)
} else {
None
};
let storage_stage = StorageStage::new(
storage_state,
root_slot_receiver,
Some(block_buffer_pool),
&keypair,
storage_keypair,
&exit,
&bank_forks,
storage_rotate_count,
&node_group_info,
);
Tvu {
fetch_stage,
retransmit_stage,
replay_stage,
blockstream_service,
storage_stage,
}
}
}
impl Service for Tvu {
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
self.retransmit_stage.join()?;
self.fetch_stage.join()?;
self.storage_stage.join()?;
if self.blockstream_service.is_some() {
self.blockstream_service.unwrap().join()?;
}
self.replay_stage.join()?;
Ok(())
}
}
use std::{borrow::Cow, convert, ffi::OsStr, path::Path};
static LICENSE_HEADER: &str = "Copyright (c) The Libra Core Contributors\n\
SPDX-License-Identifier: Apache-2.0\n\
";
#[allow(dead_code)]
pub(super) fn has_license_header(file: &Path, contents: &str) -> Result<(), Cow<'static, str>> {
enum FileType {
Rust,
Shell,
Proto,
}
let file_type = match file
.extension()
.map(OsStr::to_str)
.and_then(convert::identity)
{
Some("rs") => FileType::Rust,
Some("sh") => FileType::Shell,
Some("proto") => FileType::Proto,
_ => return Ok(()),
};
// Determine if the file is missing the license header
let missing_header = match file_type {
FileType::Rust | FileType::Proto => {
let maybe_license = contents
.lines()
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("// "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
FileType::Shell => |
};
if missing_header {
return Err("missing a license header".into());
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::treasury_stage::create_test_recorder;
use crate::block_buffer_pool::get_tmp_ledger_path;
use crate::node_group_info::{NodeGroupInfo, Node};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use morgan_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
fn test_tvu_exit() {
morgan_logger::setup();
let leader = Node::new_localhost();
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let starting_balance = 10_000;
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(starting_balance);
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
//start cluster_info1
let mut cluster_info1 = NodeGroupInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let block_buffer_pool_path = get_tmp_ledger_path!();
let (block_buffer_pool, l_receiver, completed_slots_receiver) =
BlockBufferPool::open_by_message(&block_buffer_pool_path)
.expect("Expected to successfully open ledger");
let block_buffer_pool = Arc::new(block_buffer_pool);
let bank = bank_forks.working_bank();
let (exit, waterclock_recorder, waterclock_service, _entry_receiver) =
create_test_recorder(&bank, &block_buffer_pool);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit,
fetch: target1.sockets.tvu,
}
},
block_buffer_pool,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
l_receiver,
&Arc::new(RpcSubscriptions::default()),
&waterclock_recorder,
&leader_schedule_cache,
&exit,
&Hash::default(),
completed_slots_receiver,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();
waterclock_service.join().unwrap();
}
}
| {
let maybe_license = contents
.lines()
.skip_while(|line| line.starts_with("#!"))
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("# "));
!LICENSE_HEADER.lines().eq(maybe_license)
} | conditional_block |
process.go | // Copyright 2018 The QOS Authors
package buyad
import (
"encoding/json"
"github.com/QOSGroup/qbase/txs"
qbasetypes "github.com/QOSGroup/qbase/types"
qostxs "github.com/QOSGroup/qos/module/bank/txs"
qostxtype "github.com/QOSGroup/qos/module/bank/types"
qostypes "github.com/QOSGroup/qos/types"
"github.com/QOSGroup/qstars/client/utils"
"github.com/QOSGroup/qstars/config"
"github.com/QOSGroup/qstars/types"
"github.com/QOSGroup/qstars/utility"
"github.com/QOSGroup/qstars/wire"
"github.com/QOSGroup/qstars/x/common"
"github.com/QOSGroup/qstars/x/jianqian"
"log"
"strconv"
"strings"
"time"
)
const coinsName = "QOS"
// BuyAdBackground 提交到链上
func BuyAdBackground(cdc *wire.Codec, txb string, timeout time.Duration) string {
ts := new(txs.TxStd)
err := cdc.UnmarshalJSON([]byte(txb), ts)
log.Printf("buyad.BuyAdBackground ts:%+v, err:%+v", ts, err)
if err != nil {
return common.InternalError(err.Error()).Marshal()
}
cliCtx := *config.GetCLIContext().QSCCliContext
_, commitresult, err := utils.SendTx(cliCtx, cdc, ts)
log.Printf("buyad.BuyAdBackground SendTx commitresult:%+v, err:%+v", commitresult, err)
if err != nil {
return common.NewErrorResult(common.ResultCodeInternalError, 0, "", err.Error()).Marshal()
}
height := strconv.FormatInt(commitresult.Height, 10)
code := common.ResultCodeSuccess
var reason string
var result interface{}
waittime, err := strconv.Atoi(config.GetCLIContext().Config.WaitingForQosResult)
if err != nil {
panic("WaitingForQosResult should be able to convert to integer." + err.Error())
}
counter := 0
for {
resultstr, err := fetchResult(cdc, height, commitresult.Hash.String())
log.Printf("fetchResult result:%s, err:%+v\n", resultstr, err)
if err != nil {
log.Printf("fetchResult error:%s\n", err.Error())
reason = err.Error()
code = common.ResultCodeInternalError
break
}
if resultstr != "" && resultstr != (BuyadStub{}).Name() {
log.Printf("fetchResult result:[%+v]\n", resultstr)
rs := []rune(resultstr)
index1 := strings.Index(resultstr, " ")
reason = ""
result = string(rs[index1+1:])
code = string(rs[:index1])
break
}
if counter >= waittime {
log.Println("time out")
reason = "time out"
if resultstr == "" {
code = common.ResultCodeQstarsTimeout
} else {
code = common.ResultCodeQOSTimeout
}
break
}
time.Sleep(500 * time.Millisecond)
counter++
}
if code != common.ResultCodeSuccess {
return common.NewErrorResult(code, commitresult.Height, commitresult.Hash.String(), reason).Marshal()
}
return common.NewSuccessResult(cdc, commitresult.Height, commitresult.Hash.String(), result).Marshal()
}
func fetchResult(cdc *wire.Codec, heigth1 string, tx1 string) (string, error) {
qstarskey := "heigth:" + heigth1 + ",hash:" + tx1
d, err := config.GetCLIContext().QSCCliContext.QueryStore([]byte(qstarskey), common.QSCResultMapperName)
if err != nil {
return "", err
}
if d == nil {
return "", nil
}
var res []byte
err = cdc.UnmarshalBinaryBare(d, &res)
if err != nil {
return "", err
}
return string(res), err
}
// BuyAd 投资广告
func BuyAd(cdc *wire.Codec, chainId, articleHash, coins, privatekey string, qosnonce, qscnonce int64) string {
var result common.Result
result.Code = common.ResultCodeSuccess
tx, berr := buyAd(cdc, chainId, articleHash, coins, privatekey, qosnonce, qscnonce)
if berr != nil {
log.Printf("buyAd err:%s", berr.Error())
result.Code = berr.Code()
result.Reason = berr.Error()
return result.Marshal()
}
js, err := cdc.MarshalJSON(tx)
if err != nil {
log.Printf("buyAd err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
result.Result = json.RawMessage(js)
return result.Marshal()
}
func warpperInvestorTx(cdc *wire.Codec, articleHash string, amount int64) []qostxtype.TransItem {
investors, err := jianqian.ListInvestors(config.GetCLIContext().QSCCliContext, cdc, articleHash)
var result []qostxtype.TransItem
log.Printf("buyAd warpperInvestorTx investors:%+v", investors)
if err == nil {
totalInvest := qbasetypes.NewInt(0)
for _, v := range investors {
totalInvest = totalInvest.Add(v.Invest)
}
log.Printf("buyAd warpperInvestorTx amount:%d, totalInvest:%d", amount, totalInvest.Int64())
if !totalInvest.IsZero() {
for _, v := range investors {
result = append(
result,
warpperTransItem(
v.Address,
[]qbasetypes.BaseCoin{{Name: coinsName, Amount: qbasetypes.NewInt(amount * v.Invest.Int64() / totalInvest.Int64())}}))
}
}
}
return result
}
//func getCommunityAddr(cdc *wire.Codec) (qbasetypes.Address, error) {
//config.GetServerConf().Community
// communityPri := config.GetCLIContext().Config.Community
// if communityPri == "" {
// return nil, errors.New("no community")
// }
//
// _, addrben32, _ := utility.PubAddrRetrievalFromAmino(communityPri, cdc)
// community, err := types.AccAddressFromBech32(addrben32)
// if err != nil {
// return nil, err
// }
//
// return community, nil
//}
func mergeQSCs(q1, q2 qostypes.QSCs) qostypes.QSCs {
m := make(map[string]*qbasetypes.BaseCoin)
for _, v := range q1 {
m[v.Name] = v
}
var res qostypes.QSCs
for _, v := range q2 {
if q, ok := m[v.Name]; ok {
v.Amount.Add(q.Amount)
m[v.Name] = v
} else {
m[v.Name] = v
}
}
for _, v := range m {
res = append(res, v)
}
return res
}
func mergeReceivers(rs []qostxtype.TransItem) []qostxtype.TransItem {
var res []qostxtype.TransItem
m := make(map[string]qostxtype.TransItem)
for _, v := range rs {
if ti, ok := m[v.Address.String()]; ok {
v.QOS = v.QOS.Add(ti.QOS)
v.QSCs = mergeQSCs(v.QSCs, ti.QSCs)
m[v.Address.String()] = v
} else {
m[v.Address.String()] = v
}
}
for _, v := range m {
res = append(res, v)
}
log.Printf("buyad.mergeReceivers rs:%+v, res:%+v", rs, res)
return res
}
func warpperReceivers(cdc *wire.Codec, article *jianqian.Articles, amount qbasetypes.BigInt,
investors jianqian.Investors, communityAddr qbasetypes.Address) []qostxtype.TransItem {
var result []qostxtype.TransItem
log.Printf("buyad warpperReceivers article:%+v", article)
investors = calculateRevenue(cdc, article, amount, investors, communityAddr)
for _, v := range investors {
if !v.Revenue.IsZero() {
result = append(
result,
warpperTransItem(
v.Address,
[]qbasetypes.BaseCoin{{Name: coinsName, Amount: v.Revenue}}))
}
}
return mergeReceivers(result)
}
// calculateInvestorRevenue 计算投资者收入
func calculateInvestorRevenue(cdc *wire.Codec, investors jianqian.Investors, amount qbasetypes.BigInt) jianqian.Investors {
log.Printf("buyAd calculateInvestorRevenue investors:%+v", investors)
totalInvest := investors.TotalInvest()
log.Printf("buyAd calculateInvestorRevenue amount:%s, totalInvest:%d", amount.String(), totalInvest.Int64())
curAmount := qbasetypes.NewInt(0)
if !totalInvest.IsZero() {
l := len(investors)
for i := 0; i < l; i++ {
var revenue qbasetypes.BigInt
if i+1 == l {
revenue = amount.Sub(curAmount)
} else {
revenue = amount.Mul(investors[i].Invest).Div(totalInvest)
}
investors[i].Revenue = revenue
curAmount = curAmount.Add(revenue)
log.Printf("buyad calculateRevenue investorAddr:%s invest:%d, revenue:%d",
investors[i].Address.String(), investors[i].Invest.Int64(), revenue.Int64())
}
}
return investors
}
// calculateRevenue 计算收入
func calculateRevenue(cdc *wire.Codec, article *jianqian.Articles, amount qbasetypes.BigInt, is jianqian.Investors,
communityAddr qbasetypes.Address) jianqian.Investors {
var result []jianqian.Investor
log.Printf("buyad calculateRevenue article:%+v, amount:%d", article, amount.Int64())
// 作者地址
authorTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareAuthor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue Authoraddress:%s amount:%d", article.Authoraddress.String(), authorTotal.Int64())
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeAuthor, // 投资者类型
Address: article.Authoraddress, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: authorTotal, // 投资收益
})
// 原创作者地址
shareOriginalTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareOriginalAuthor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue OriginalAuthor:%s amount:%d", article.OriginalAuthor.String(), shareOriginalTotal.Int64())
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeOriginalAuthor, // 投资者类型
Address: article.OriginalAuthor, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: shareOriginalTotal, // 投资收益
})
// 投资者收入分配
investorShouldTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareInvestor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue investorShouldTotal:%d", investorShouldTotal.Int64())
investors := calculateInvestorRevenue(cdc, is, investorShouldTotal)
result = append(result, investors...)
shareCommunityTotal := amount.Sub(authorTotal).Sub(shareOriginalTotal).Sub(investors.TotalRevenue())
log.Printf("buyad calculateRevenue communityAddr:%s amount:%d", communityAddr.String(), shareCommunityTotal.Int64())
// 社区收入比例
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeCommunity, // 投资者类型
Address: communityAddr, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: shareCommunityTotal, // 投资收益
})
return result
}
// buyAd 投资广告
func buyAd(cdc *wire.Codec, chainId, articleHash, coins, privatekey string, qosnonce, qscnonce int64) (*txs.TxStd, *BuyadErr) {
communityPri := config.GetCLIContext().Config.Community
if communityPri == "" {
return nil, NoCommunityErr
}
_, addrben32, _ := utility.PubAddrRetrievalFromAmino(communityPri, cdc)
communityAddr, err := types.AccAddressFromBech32(addrben32)
if err != nil {
return nil, NewBuyadErr(NoCommunityErrCode, err.Error())
}
if articleHash == "" {
return nil, InvalidArticleErr
}
article, err := jianqian.QueryArticle(cdc, config.GetCLIContext().QSCCliContext, articleHash)
log.Printf("buyad.buyAd QueryArticle article:%+v, err:%+v", article, err)
if err != nil {
return nil, NewBuyadErr(InvalidArticleErrCode, err.Error())
}
articleBuy, err := jianqian.QueryArticleBuyer(cdc, config.GetCLIContext().QSCCliContext, articleHash)
log.Printf("buyad.buyAd QueryArticleBuyer articleBuy:%+v, err:%+v", articleBuy, err)
if err == nil {
if articleBuy.CheckStatus != jianqian.CheckStatusFail {
return nil, HasBeenBuyedErr
}
}
investors, err := jianqian.ListInvestors(config.GetCLIContext().QSCCliContext, cdc, article.ArticleHash)
if err != nil {
investors = jianqian.Investors{}
}
if articleBuy == nil {
articleBuy = &jianqian.Buyer{}
}
cs, err := types.ParseCoins(coins)
if err != nil {
return nil, NewBuyadErr(CoinsErrCode, err.Error())
}
if len(cs) != 1 {
return nil, CoinsErr
}
for _, v := range cs {
if v.Denom != coinsName {
return nil, CoinsErr
}
}
var amount int64
_, addrben32, priv := utility.PubAddrRetrievalFromAmino(privatekey, cdc)
buyer, err := types.AccAddressFromBech32(addrben32)
var ccs []qbasetypes.BaseCoin
for _, coin := range cs {
amount = coin.Amount.Int64()
ccs = append(ccs, qbasetypes.BaseCoin{
Name: coin.Denom,
Amount: qbasetypes.NewInt(coin.Amount.Int64()),
})
}
qosnonce += 1
var transferTx qostxs.TxTransfer
transferTx.Senders = []qostxtype.TransItem{warpperTransItem(buyer, ccs)}
receivers := warpperReceivers(cdc, article, qbasetypes.NewInt(amount), investors, communityAddr)
transferTx.Receivers = receivers
gas := qbasetypes.NewInt(int64(config.MaxGas))
stx := txs.NewTxStd(transferTx, config.GetCLIContext().Config.QOSChainID, gas)
signature, _ := stx.SignTx(priv, qosnonce, config.GetCLIContext().Config.QSCChainID, config.GetCLIContext().Config.QOSChainID)
stx.Signature = []txs.Signature{txs.Signature{
Pubkey: priv.PubKey(),
Signature: signature,
Nonce: qosnonce,
}}
qscnonce += 1
it := &BuyTx{}
it.ArticleHash = []byte(articleHash)
it.Std = stx
tx2 := txs.NewTxStd(it, config.GetCLIContext().Config.QSCChainID, stx.MaxGas)
signature2, _ := tx2.SignTx(priv, qscnonce, config.GetCLIContext().Config.QSCChainID, config.GetCLIContext().Config.QSCChainID)
tx2.Signature = []txs.Signature{txs.Signature{
Pubkey: priv.PubKey(),
Signature: signature2,
Nonce: qscnonce,
}}
return tx2, nil
}
func warpperTransItem(addr qbasetypes.Address, coins []qbasetypes.BaseCoin) qostxtype.TransItem {
var ti qostxtype.TransItem
ti.Address = addr
ti.QOS = qbasetypes.NewInt(0)
for _, coin := range coins {
| pper(coin.Name) == "QOS" {
ti.QOS = ti.QOS.Add(coin.Amount)
} else {
ti.QSCs = append(ti.QSCs, &coin)
}
}
return ti
}
// RetrieveBuyer 查询购买者
func RetrieveBuyer(cdc *wire.Codec, articleHash string) string {
var result common.Result
result.Code = common.ResultCodeSuccess
buyer, err := jianqian.QueryArticleBuyer(cdc, config.GetCLIContext().QSCCliContext, articleHash)
if err != nil {
log.Printf("QueryArticleBuyer err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
js, err := cdc.MarshalJSON(buyer)
if err != nil {
log.Printf("buyAd err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
result.Result = json.RawMessage(js)
return result.Marshal()
}
| if strings.ToU | identifier_name |
process.go | // Copyright 2018 The QOS Authors
package buyad
import (
"encoding/json"
"github.com/QOSGroup/qbase/txs"
qbasetypes "github.com/QOSGroup/qbase/types"
qostxs "github.com/QOSGroup/qos/module/bank/txs"
qostxtype "github.com/QOSGroup/qos/module/bank/types"
qostypes "github.com/QOSGroup/qos/types"
"github.com/QOSGroup/qstars/client/utils"
"github.com/QOSGroup/qstars/config"
"github.com/QOSGroup/qstars/types"
"github.com/QOSGroup/qstars/utility"
"github.com/QOSGroup/qstars/wire"
"github.com/QOSGroup/qstars/x/common"
"github.com/QOSGroup/qstars/x/jianqian"
"log"
"strconv"
"strings"
"time"
)
const coinsName = "QOS"
// BuyAdBackground 提交到链上
func BuyAdBackground(cdc *wire.Codec, txb string, timeout time.Duration) string {
ts := new(txs.TxStd)
err := cdc.UnmarshalJSON([]byte(txb), ts)
log.Printf("buyad.BuyAdBackground ts:%+v, err:%+v", ts, err)
if err != nil {
return common.InternalError(err.Error()).Marshal()
}
cliCtx := *config.GetCLIContext().QSCCliContext
_, commitresult, err := utils.SendTx(cliCtx, cdc, ts)
log.Printf("buyad.BuyAdBackground SendTx commitresult:%+v, err:%+v", commitresult, err)
if err != nil {
return common.NewErrorResult(common.ResultCodeInternalError, 0, "", err.Error()).Marshal()
}
height := strconv.FormatInt(commitresult.Height, 10)
code := common.ResultCodeSuccess
var reason string
var result interface{}
waittime, err := strconv.Atoi(config.GetCLIContext().Config.WaitingForQosResult)
if err != nil {
panic("WaitingForQosResult should be able to convert to integer." + err.Error())
}
counter := 0
for {
resultstr, err := fetchResult(cdc, height, commitresult.Hash.String())
log.Printf("fetchResult result:%s, err:%+v\n", resultstr, err)
if err != nil {
log.Printf("fetchResult error:%s\n", err.Error())
reason = err.Error()
code = common.ResultCodeInternalError
break
}
if resultstr != "" && resultstr != (BuyadStub{}).Name() {
log.Printf("fetchResult result:[%+v]\n", resultstr)
rs := []rune(resultstr)
index1 := strings.Index(resultstr, " ")
reason = ""
result = string(rs[index1+1:])
code = string(rs[:index1])
break
}
if counter >= waittime {
log.Println("time out")
reason = "time out"
if resultstr == "" {
code = common.ResultCodeQstarsTimeout
} else {
code = common.ResultCodeQOSTimeout
}
break
}
time.Sleep(500 * time.Millisecond)
counter++
}
if code != common.ResultCodeSuccess {
return common.NewErrorResult(code, commitresult.Height, commitresult.Hash.String(), reason).Marshal()
}
return common.NewSuccessResult(cdc, commitresult.Height, commitresult.Hash.String(), result).Marshal()
}
func fetchResult(cdc *wire.Codec, heigth1 string, tx1 string) (string, error) {
qstarskey := "heigth:" + heigth1 + ",hash:" + tx1
d, err := config.GetCLIContext().QSCCliContext.QueryStore([]byte(qstarskey), common.QSCResultMapperName)
if err != nil {
return "", err
}
if d == nil {
return "", nil
}
var res []byte
err = cdc.UnmarshalBinaryBare(d, &res)
if err != nil {
return "", err
}
return string(res), err
}
// BuyAd 投资广告
func BuyAd(cdc *wire.Codec, chainId, articleHash, coins, privatekey string, qosnonce, qscnonce int64) string {
var result common.Result
result.Code = common.ResultCodeSuccess
tx, berr := buyAd(cdc, chainId, articleHash, coins, privatekey, qosnonce, qscnonce)
if berr != nil {
log.Printf("buyAd err:%s", berr.Error())
result.Code = berr.Code()
result.Reason = berr.Error()
return result.Marshal()
}
js, err := cdc.MarshalJSON(tx)
if err != nil {
log.Printf("buyAd err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
result.Result = json.RawMessage(js)
return result.Marshal()
}
func warpperInvestorTx(cdc *wire.Codec, articleHash string, amount int64) []qostxtype.TransItem {
investors, err := jianqian.ListInvestors(config.GetCLIContext().QSCCliContext, cdc, articleHash)
var result []qostxtype.TransItem
log.Printf("buyAd warpperInvestorTx investors:%+v", investors)
if err == nil {
totalInvest := qbasetypes.NewInt(0)
for _, v := range investors {
totalInvest = totalInvest.Add(v.Invest)
}
log.Printf("buyAd warpperInvestorTx amount:%d, totalInvest:%d", amount, totalInvest.Int64())
if !totalInvest.IsZero() {
for _, v := range investors {
result = append(
result,
warpperTransItem(
v.Address,
[]qbasetypes.BaseCoin{{Name: coinsName, Amount: qbasetypes.NewInt(amount * v.Invest.Int64() / totalInvest.Int64())}}))
}
}
}
return result
}
//func getCommunityAddr(cdc *wire.Codec) (qbasetypes.Address, error) {
//config.GetServerConf().Community
// communityPri := config.GetCLIContext().Config.Community
// if communityPri == "" {
// return nil, errors.New("no community")
// }
//
// _, addrben32, _ := utility.PubAddrRetrievalFromAmino(communityPri, cdc)
// community, err := types.AccAddressFromBech32(addrben32)
// if err != nil {
// return nil, err
// }
//
// return community, nil
//}
func mergeQSCs(q1, q2 qostypes.QSCs) qostypes.QSCs {
m := make(map[string]*qbasetypes.BaseCoin)
for _, v := range q1 {
m[v.Name] = v
}
var res qostypes.QSCs
for _, v := range q2 {
if q, ok := m[v.Name]; ok {
v.Amount.Add(q.Amount)
m[v.Name] = v
} else {
m[v.Name] = v
}
}
for _, v := range m {
res = append(res, v)
}
return res
}
func mergeReceivers(rs []qostxtype.TransItem) []qostxtype.TransItem {
var res []qostxtype.TransItem
m := make(map[string]qostxtype.TransItem)
for _, v := range rs {
if ti, ok := m[v.Address.String()]; ok {
v.QOS = v.QOS.Add(ti.QOS)
v.QSCs = mergeQSCs(v.QSCs, ti.QSCs)
m[v.Address.String()] = v
} else {
m[v.Address.String()] = v
}
}
for _, v := range m {
res = append(res, v)
}
log.Printf("buyad.mergeReceivers rs:%+v, res:%+v", rs, res)
return res
}
func warpperReceivers(cdc *wire.Codec, article *jianqian.Articles, amount qbasetypes.BigInt,
investors jianqian.Investors, communityAddr qbasetypes.Address) []qostxtype.TransItem {
var result []qostxtype.TransItem
log.Printf("buyad warpperReceivers article:%+v", article)
investors = calculateRevenue(cdc, article, amount, investors, communityAddr)
for _, v := range investors {
if !v.Revenue.IsZero() {
result = append(
result,
warpperTransItem(
v.Address,
[]qbasetypes.BaseCoin{{Name: coinsName, Amount: v.Revenue}}))
}
}
return mergeReceivers(result)
}
// calculateInvestorRevenue 计算投资者收入
func calculateInvestorRevenue(cdc *wire.Codec, investors jianqian.Investors, amount qbasetypes.BigInt) jianqian.Investors {
log.Printf("buyAd calculateInvestorRevenue investors:%+v", investors)
totalInvest := investors.TotalInvest()
log.Printf("buyAd calculateInvestorRevenue amount:%s, totalInvest:%d", amount.String(), totalInvest.Int64())
curAmount := qbasetypes.NewInt(0)
if !totalInvest.IsZero() {
l := len(investors)
for i := 0; i < l; i++ {
var revenue qbasetypes.BigInt
if i+1 == l {
revenue = amount.Sub(curAmount)
} else {
revenue = amount.Mul(investors[i].Invest).Div(totalInvest)
}
investors[i].Revenue = revenue
curAmount = curAmount.Add(revenue)
log.Printf("buyad calculateRevenue investorAddr:%s invest:%d, revenue:%d",
investors[i].Address.String(), investors[i].Invest.Int64(), revenue.Int64())
}
}
return investors
}
// calculateRevenue 计算收入
func calculateRevenue(cdc *wire.Codec, article *jianqian.Articles, amount qbasetypes.BigInt, is jianqian.Investors,
communityAddr qbasetypes.Address) jianqian.Investors {
var result []jianqian.Investor
log.Printf("buyad calculateRevenue article:%+v, amount:%d", article, amount.Int64())
// 作者地址
authorTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareAuthor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue Authoraddress:%s amount:%d", article.Authoraddress.String(), authorTotal.Int64())
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeAuthor, // 投资者类型
Address: article.Authoraddress, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: authorTotal, // 投资收益
})
// 原创作者地址
shareOriginalTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareOriginalAuthor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue OriginalAuthor:%s amount:%d", article.OriginalAuthor.String(), shareOriginalTotal.Int64())
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeOriginalAuthor, // 投资者类型
Address: article.OriginalAuthor, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: shareOriginalTotal, // 投资收益
})
// 投资者收入分配
investorShouldTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareInvestor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue investorShouldTotal:%d", investorShouldTotal.Int64())
investors := calculateInvestorRevenue(cdc, is, investorShouldTotal)
result = append(result, investors...)
shareCommunityTotal := amount.Sub(authorTotal).Sub(shareOriginalTotal).Sub(investors.TotalRevenue())
log.Printf("buyad calculateRevenue communityAddr:%s amount:%d", communityAddr.String(), shareCommunityTotal.Int64())
// 社区收入比例
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeCommunity, // 投资者类型
Address: communityAddr, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: shareCommunityTotal, // 投资收益
})
return result
}
// buyAd 投资广告
func buyAd(cdc *wire.Codec, chainId, articleHash, coins, privatekey string, qosnonce, qscnonce int64) (*txs.TxStd, *BuyadErr) {
communityPri := config.GetCLIContext().Config.Community
if communityPri == "" {
return nil, NoCommunityErr
}
_, addrben32, _ := utility.PubAddrRetrievalFromAmino(communityPri, cdc)
communityAddr, err := types.AccAddressFromBech32(addrben32)
if err != nil {
return nil, NewBuyadErr(NoCommunityErrCode, err.Error())
}
if articleHash == "" {
return nil, InvalidArticleErr
}
article, err := jianqian.QueryArticle(cdc, config.GetCLIContext().QSCCliContext, articleHash)
log.Printf("buyad.buyAd QueryArticle article:%+v, err:%+v", article, err)
if err != nil {
return nil, NewBuyadErr(InvalidArticleErrCode, err.Error())
}
articleBuy, err := jianqian.QueryArticleBuyer(cdc, config.GetCLIContext().QSCCliContext, articleHash) | log.Printf("buyad.buyAd QueryArticleBuyer articleBuy:%+v, err:%+v", articleBuy, err)
if err == nil {
if articleBuy.CheckStatus != jianqian.CheckStatusFail {
return nil, HasBeenBuyedErr
}
}
investors, err := jianqian.ListInvestors(config.GetCLIContext().QSCCliContext, cdc, article.ArticleHash)
if err != nil {
investors = jianqian.Investors{}
}
if articleBuy == nil {
articleBuy = &jianqian.Buyer{}
}
cs, err := types.ParseCoins(coins)
if err != nil {
return nil, NewBuyadErr(CoinsErrCode, err.Error())
}
if len(cs) != 1 {
return nil, CoinsErr
}
for _, v := range cs {
if v.Denom != coinsName {
return nil, CoinsErr
}
}
var amount int64
_, addrben32, priv := utility.PubAddrRetrievalFromAmino(privatekey, cdc)
buyer, err := types.AccAddressFromBech32(addrben32)
var ccs []qbasetypes.BaseCoin
for _, coin := range cs {
amount = coin.Amount.Int64()
ccs = append(ccs, qbasetypes.BaseCoin{
Name: coin.Denom,
Amount: qbasetypes.NewInt(coin.Amount.Int64()),
})
}
qosnonce += 1
var transferTx qostxs.TxTransfer
transferTx.Senders = []qostxtype.TransItem{warpperTransItem(buyer, ccs)}
receivers := warpperReceivers(cdc, article, qbasetypes.NewInt(amount), investors, communityAddr)
transferTx.Receivers = receivers
gas := qbasetypes.NewInt(int64(config.MaxGas))
stx := txs.NewTxStd(transferTx, config.GetCLIContext().Config.QOSChainID, gas)
signature, _ := stx.SignTx(priv, qosnonce, config.GetCLIContext().Config.QSCChainID, config.GetCLIContext().Config.QOSChainID)
stx.Signature = []txs.Signature{txs.Signature{
Pubkey: priv.PubKey(),
Signature: signature,
Nonce: qosnonce,
}}
qscnonce += 1
it := &BuyTx{}
it.ArticleHash = []byte(articleHash)
it.Std = stx
tx2 := txs.NewTxStd(it, config.GetCLIContext().Config.QSCChainID, stx.MaxGas)
signature2, _ := tx2.SignTx(priv, qscnonce, config.GetCLIContext().Config.QSCChainID, config.GetCLIContext().Config.QSCChainID)
tx2.Signature = []txs.Signature{txs.Signature{
Pubkey: priv.PubKey(),
Signature: signature2,
Nonce: qscnonce,
}}
return tx2, nil
}
func warpperTransItem(addr qbasetypes.Address, coins []qbasetypes.BaseCoin) qostxtype.TransItem {
var ti qostxtype.TransItem
ti.Address = addr
ti.QOS = qbasetypes.NewInt(0)
for _, coin := range coins {
if strings.ToUpper(coin.Name) == "QOS" {
ti.QOS = ti.QOS.Add(coin.Amount)
} else {
ti.QSCs = append(ti.QSCs, &coin)
}
}
return ti
}
// RetrieveBuyer 查询购买者
func RetrieveBuyer(cdc *wire.Codec, articleHash string) string {
var result common.Result
result.Code = common.ResultCodeSuccess
buyer, err := jianqian.QueryArticleBuyer(cdc, config.GetCLIContext().QSCCliContext, articleHash)
if err != nil {
log.Printf("QueryArticleBuyer err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
js, err := cdc.MarshalJSON(buyer)
if err != nil {
log.Printf("buyAd err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
result.Result = json.RawMessage(js)
return result.Marshal()
} | random_line_split | |
process.go | // Copyright 2018 The QOS Authors
package buyad
import (
"encoding/json"
"github.com/QOSGroup/qbase/txs"
qbasetypes "github.com/QOSGroup/qbase/types"
qostxs "github.com/QOSGroup/qos/module/bank/txs"
qostxtype "github.com/QOSGroup/qos/module/bank/types"
qostypes "github.com/QOSGroup/qos/types"
"github.com/QOSGroup/qstars/client/utils"
"github.com/QOSGroup/qstars/config"
"github.com/QOSGroup/qstars/types"
"github.com/QOSGroup/qstars/utility"
"github.com/QOSGroup/qstars/wire"
"github.com/QOSGroup/qstars/x/common"
"github.com/QOSGroup/qstars/x/jianqian"
"log"
"strconv"
"strings"
"time"
)
const coinsName = "QOS"
// BuyAdBackground 提交到链上
func BuyAdBackground(cdc *wire.Codec, txb string, timeout time.Duration) string {
ts := new(txs.TxStd)
err := cdc.UnmarshalJSON([]byte(txb), ts)
log.Printf("buyad.BuyAdBackground ts:%+v, err:%+v", ts, err)
if err != nil {
return common.InternalError(err.Error()).Marshal()
}
cliCtx := *config.GetCLIContext().QSCCliContext
_, commitresult, err := utils.SendTx(cliCtx, cdc, ts)
log.Printf("buyad.BuyAdBackground SendTx commitresult:%+v, err:%+v", commitresult, err)
if err != nil {
return common.NewErrorResult(common.ResultCodeInternalError, 0, "", err.Error()).Marshal()
}
height := strconv.FormatInt(commitresult.Height, 10)
code := common.ResultCodeSuccess
var reason string
var result interface{}
waittime, err := strconv.Atoi(config.GetCLIContext().Config.WaitingForQosResult)
if err != nil {
panic("WaitingForQosResult should be able to convert to integer." + err.Error())
}
counter := 0
for {
resultstr, err := fetchResult(cdc, height, commitresult.Hash.String())
log.Printf("fetchResult result:%s, err:%+v\n", resultstr, err)
if err != nil {
log.Printf("fetchResult error:%s\n", err.Error())
reason = err.Error()
code = common.ResultCodeInternalError
break
}
if resultstr != "" && resultstr != (BuyadStub{}).Name() {
log.Printf("fetchResult result:[%+v]\n", resultstr)
rs := []rune(resultstr)
index1 := strings.Index(resultstr, " ")
reason = ""
result = string(rs[index1+1:])
code = string(rs[:index1])
break
}
if counter >= waittime {
log.Println("time out")
reason = "time out"
if resultstr == "" {
code = common.ResultCodeQstarsTimeout
} else {
code = common.ResultCodeQOSTimeout
}
break
}
time.Sleep(500 * time.Millisecond)
counter++
}
if code != common.ResultCodeSuccess {
return common.NewErrorResult(code, commitresult.Height, commitresult.Hash.String(), reason).Marshal()
}
return common.NewSuccessResult(cdc, commitresult.Height, commitresult.Hash.String(), result).Marshal()
}
func fetchResult(cdc *wire.Codec, heigth1 string, tx1 string) (string, error) {
qstarskey := "heigth:" + heigth1 + ",hash:" + tx1
d, err := config.GetCLIContext().QSCCliContext.QueryStore([]byte(qstarskey), common.QSCResultMapperName)
if err != nil {
return "", err
}
if d == nil {
return "", nil
}
var res []byte
err = cdc.UnmarshalBinaryBare(d, &res)
if err != nil {
return "", err
}
return string(res), err
}
// BuyAd 投资广告
func BuyAd(cdc *wire.Codec, chainId, articleHash, coins, privatekey string, qosnonce, qscnonce int64) string {
var result common.Result
result.Code = common.ResultCodeSuccess
tx, berr := buyAd(cdc, chainId, articleHash, coins, privatekey, qosnonce, qscnonce)
if berr != nil {
log.Printf("buyAd err:%s", berr.Error())
result.Code = berr.Code()
result.Reason = berr.Error()
return result.Marshal()
}
js, err := cdc.MarshalJSON(tx)
if err != nil {
log.Printf("buyAd err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
result.Result = json.RawMessage(js)
return result.Marshal()
}
func warpperInvestorTx(cdc *wire.Codec, articleHash string, amount int64) []qostxtype.TransItem {
investors, err := jianqian.ListInvestors(config.GetCLIContext().QSCCliContext, cdc, articleHash)
var result []qostxtype.TransItem
log.Printf("buyAd warpperInvestorTx investors:%+v", investors)
if err == nil {
totalInvest := qbasetypes.NewInt(0)
for _, v := range investors {
totalInvest = totalInvest.Add(v.Invest)
}
log.Printf("buyAd warpperInvestorTx amount:%d, totalInvest:%d", amount, totalInvest.Int64())
if !totalInvest.IsZero() {
for _, v := range investors {
result = append(
result,
warpperTransItem(
v.Address,
[]qbasetypes.BaseCoin{{Name: coinsName, Amount: qbasetypes.NewInt(amount * v.Invest.Int64() / totalInvest.Int64())}}))
}
}
}
return result
}
//func getCommunityAddr(cdc *wire.Codec) (qbasetypes.Address, error) {
//config.GetServerConf().Community
// communityPri := config.GetCLIContext().Config.Community
// if communityPri == "" {
// return nil, errors.New("no community")
// }
//
// _, addrben32, _ := utility.PubAddrRetrievalFromAmino(communityPri, cdc)
// community, err := types.AccAddressFromBech32(addrben32)
// if err != nil {
// return nil, err
// }
//
// return community, nil
//}
func mergeQSCs(q1, q2 qostypes.QSCs) qostypes.QSCs {
m := make(map[string]*qbasetypes.BaseCoin)
for _, v := range q1 {
m[v.Name] = v
}
var res qostypes.QSCs
for _, v := range q2 {
if q, ok := m[v.Name]; ok {
v.Amount.Add(q.Amount)
m[v.Name] = v
} else {
m[v.Name] = v
}
}
for _, v := range m {
res = append(res, v)
}
return res
}
func mergeReceivers(rs []qostxtype.TransItem) []qostxtype.TransItem {
var res []qostxtype.TransItem
m := make(map[string]qostxtype.TransItem)
for _, v := range rs {
if ti, ok := m[v.Address.String()]; ok {
v.QOS = v.QOS.Add(ti.QOS)
v.QSCs = mergeQSCs(v.QSCs, ti.QSCs)
m[v.Address.String()] = v
} else {
m[v.Address.String()] = v
}
}
for _, v := range m {
res = append(res, v)
}
log.Printf("buyad.mergeReceivers rs:%+v, res:%+v", rs, res)
return res
}
func warpperReceivers(cdc *wire.Codec, article *jianqian.Articles, amount qbasetypes.BigInt,
investors jianqian.Investors, communityAddr qbasetypes.Address) []qostxtype.TransItem {
var result []qostxtype.TransItem
log.Printf("buyad warpperReceivers article:%+v", article)
investors = calculateRevenue(cdc, article, amount, investors, communityAddr)
for _, v := range investors {
if !v.Revenue.IsZero() {
result = appe | Receivers(result)
}
// calculateInvestorRevenue 计算投资者收入
func calculateInvestorRevenue(cdc *wire.Codec, investors jianqian.Investors, amount qbasetypes.BigInt) jianqian.Investors {
log.Printf("buyAd calculateInvestorRevenue investors:%+v", investors)
totalInvest := investors.TotalInvest()
log.Printf("buyAd calculateInvestorRevenue amount:%s, totalInvest:%d", amount.String(), totalInvest.Int64())
curAmount := qbasetypes.NewInt(0)
if !totalInvest.IsZero() {
l := len(investors)
for i := 0; i < l; i++ {
var revenue qbasetypes.BigInt
if i+1 == l {
revenue = amount.Sub(curAmount)
} else {
revenue = amount.Mul(investors[i].Invest).Div(totalInvest)
}
investors[i].Revenue = revenue
curAmount = curAmount.Add(revenue)
log.Printf("buyad calculateRevenue investorAddr:%s invest:%d, revenue:%d",
investors[i].Address.String(), investors[i].Invest.Int64(), revenue.Int64())
}
}
return investors
}
// calculateRevenue 计算收入
func calculateRevenue(cdc *wire.Codec, article *jianqian.Articles, amount qbasetypes.BigInt, is jianqian.Investors,
communityAddr qbasetypes.Address) jianqian.Investors {
var result []jianqian.Investor
log.Printf("buyad calculateRevenue article:%+v, amount:%d", article, amount.Int64())
// 作者地址
authorTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareAuthor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue Authoraddress:%s amount:%d", article.Authoraddress.String(), authorTotal.Int64())
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeAuthor, // 投资者类型
Address: article.Authoraddress, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: authorTotal, // 投资收益
})
// 原创作者地址
shareOriginalTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareOriginalAuthor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue OriginalAuthor:%s amount:%d", article.OriginalAuthor.String(), shareOriginalTotal.Int64())
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeOriginalAuthor, // 投资者类型
Address: article.OriginalAuthor, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: shareOriginalTotal, // 投资收益
})
// 投资者收入分配
investorShouldTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareInvestor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue investorShouldTotal:%d", investorShouldTotal.Int64())
investors := calculateInvestorRevenue(cdc, is, investorShouldTotal)
result = append(result, investors...)
shareCommunityTotal := amount.Sub(authorTotal).Sub(shareOriginalTotal).Sub(investors.TotalRevenue())
log.Printf("buyad calculateRevenue communityAddr:%s amount:%d", communityAddr.String(), shareCommunityTotal.Int64())
// 社区收入比例
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeCommunity, // 投资者类型
Address: communityAddr, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: shareCommunityTotal, // 投资收益
})
return result
}
// buyAd 投资广告
func buyAd(cdc *wire.Codec, chainId, articleHash, coins, privatekey string, qosnonce, qscnonce int64) (*txs.TxStd, *BuyadErr) {
communityPri := config.GetCLIContext().Config.Community
if communityPri == "" {
return nil, NoCommunityErr
}
_, addrben32, _ := utility.PubAddrRetrievalFromAmino(communityPri, cdc)
communityAddr, err := types.AccAddressFromBech32(addrben32)
if err != nil {
return nil, NewBuyadErr(NoCommunityErrCode, err.Error())
}
if articleHash == "" {
return nil, InvalidArticleErr
}
article, err := jianqian.QueryArticle(cdc, config.GetCLIContext().QSCCliContext, articleHash)
log.Printf("buyad.buyAd QueryArticle article:%+v, err:%+v", article, err)
if err != nil {
return nil, NewBuyadErr(InvalidArticleErrCode, err.Error())
}
articleBuy, err := jianqian.QueryArticleBuyer(cdc, config.GetCLIContext().QSCCliContext, articleHash)
log.Printf("buyad.buyAd QueryArticleBuyer articleBuy:%+v, err:%+v", articleBuy, err)
if err == nil {
if articleBuy.CheckStatus != jianqian.CheckStatusFail {
return nil, HasBeenBuyedErr
}
}
investors, err := jianqian.ListInvestors(config.GetCLIContext().QSCCliContext, cdc, article.ArticleHash)
if err != nil {
investors = jianqian.Investors{}
}
if articleBuy == nil {
articleBuy = &jianqian.Buyer{}
}
cs, err := types.ParseCoins(coins)
if err != nil {
return nil, NewBuyadErr(CoinsErrCode, err.Error())
}
if len(cs) != 1 {
return nil, CoinsErr
}
for _, v := range cs {
if v.Denom != coinsName {
return nil, CoinsErr
}
}
var amount int64
_, addrben32, priv := utility.PubAddrRetrievalFromAmino(privatekey, cdc)
buyer, err := types.AccAddressFromBech32(addrben32)
var ccs []qbasetypes.BaseCoin
for _, coin := range cs {
amount = coin.Amount.Int64()
ccs = append(ccs, qbasetypes.BaseCoin{
Name: coin.Denom,
Amount: qbasetypes.NewInt(coin.Amount.Int64()),
})
}
qosnonce += 1
var transferTx qostxs.TxTransfer
transferTx.Senders = []qostxtype.TransItem{warpperTransItem(buyer, ccs)}
receivers := warpperReceivers(cdc, article, qbasetypes.NewInt(amount), investors, communityAddr)
transferTx.Receivers = receivers
gas := qbasetypes.NewInt(int64(config.MaxGas))
stx := txs.NewTxStd(transferTx, config.GetCLIContext().Config.QOSChainID, gas)
signature, _ := stx.SignTx(priv, qosnonce, config.GetCLIContext().Config.QSCChainID, config.GetCLIContext().Config.QOSChainID)
stx.Signature = []txs.Signature{txs.Signature{
Pubkey: priv.PubKey(),
Signature: signature,
Nonce: qosnonce,
}}
qscnonce += 1
it := &BuyTx{}
it.ArticleHash = []byte(articleHash)
it.Std = stx
tx2 := txs.NewTxStd(it, config.GetCLIContext().Config.QSCChainID, stx.MaxGas)
signature2, _ := tx2.SignTx(priv, qscnonce, config.GetCLIContext().Config.QSCChainID, config.GetCLIContext().Config.QSCChainID)
tx2.Signature = []txs.Signature{txs.Signature{
Pubkey: priv.PubKey(),
Signature: signature2,
Nonce: qscnonce,
}}
return tx2, nil
}
func warpperTransItem(addr qbasetypes.Address, coins []qbasetypes.BaseCoin) qostxtype.TransItem {
var ti qostxtype.TransItem
ti.Address = addr
ti.QOS = qbasetypes.NewInt(0)
for _, coin := range coins {
if strings.ToUpper(coin.Name) == "QOS" {
ti.QOS = ti.QOS.Add(coin.Amount)
} else {
ti.QSCs = append(ti.QSCs, &coin)
}
}
return ti
}
// RetrieveBuyer 查询购买者
func RetrieveBuyer(cdc *wire.Codec, articleHash string) string {
var result common.Result
result.Code = common.ResultCodeSuccess
buyer, err := jianqian.QueryArticleBuyer(cdc, config.GetCLIContext().QSCCliContext, articleHash)
if err != nil {
log.Printf("QueryArticleBuyer err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
js, err := cdc.MarshalJSON(buyer)
if err != nil {
log.Printf("buyAd err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
result.Result = json.RawMessage(js)
return result.Marshal()
}
| nd(
result,
warpperTransItem(
v.Address,
[]qbasetypes.BaseCoin{{Name: coinsName, Amount: v.Revenue}}))
}
}
return merge | conditional_block |
process.go | // Copyright 2018 The QOS Authors
package buyad
import (
"encoding/json"
"github.com/QOSGroup/qbase/txs"
qbasetypes "github.com/QOSGroup/qbase/types"
qostxs "github.com/QOSGroup/qos/module/bank/txs"
qostxtype "github.com/QOSGroup/qos/module/bank/types"
qostypes "github.com/QOSGroup/qos/types"
"github.com/QOSGroup/qstars/client/utils"
"github.com/QOSGroup/qstars/config"
"github.com/QOSGroup/qstars/types"
"github.com/QOSGroup/qstars/utility"
"github.com/QOSGroup/qstars/wire"
"github.com/QOSGroup/qstars/x/common"
"github.com/QOSGroup/qstars/x/jianqian"
"log"
"strconv"
"strings"
"time"
)
const coinsName = "QOS"
// BuyAdBackground 提交到链上
func BuyAdBackground(cdc *wire.Codec, txb string, timeout time.Duration) string {
ts := n | chResult(cdc *wire.Codec, heigth1 string, tx1 string) (string, error) {
qstarskey := "heigth:" + heigth1 + ",hash:" + tx1
d, err := config.GetCLIContext().QSCCliContext.QueryStore([]byte(qstarskey), common.QSCResultMapperName)
if err != nil {
return "", err
}
if d == nil {
return "", nil
}
var res []byte
err = cdc.UnmarshalBinaryBare(d, &res)
if err != nil {
return "", err
}
return string(res), err
}
// BuyAd 投资广告
func BuyAd(cdc *wire.Codec, chainId, articleHash, coins, privatekey string, qosnonce, qscnonce int64) string {
var result common.Result
result.Code = common.ResultCodeSuccess
tx, berr := buyAd(cdc, chainId, articleHash, coins, privatekey, qosnonce, qscnonce)
if berr != nil {
log.Printf("buyAd err:%s", berr.Error())
result.Code = berr.Code()
result.Reason = berr.Error()
return result.Marshal()
}
js, err := cdc.MarshalJSON(tx)
if err != nil {
log.Printf("buyAd err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
result.Result = json.RawMessage(js)
return result.Marshal()
}
func warpperInvestorTx(cdc *wire.Codec, articleHash string, amount int64) []qostxtype.TransItem {
investors, err := jianqian.ListInvestors(config.GetCLIContext().QSCCliContext, cdc, articleHash)
var result []qostxtype.TransItem
log.Printf("buyAd warpperInvestorTx investors:%+v", investors)
if err == nil {
totalInvest := qbasetypes.NewInt(0)
for _, v := range investors {
totalInvest = totalInvest.Add(v.Invest)
}
log.Printf("buyAd warpperInvestorTx amount:%d, totalInvest:%d", amount, totalInvest.Int64())
if !totalInvest.IsZero() {
for _, v := range investors {
result = append(
result,
warpperTransItem(
v.Address,
[]qbasetypes.BaseCoin{{Name: coinsName, Amount: qbasetypes.NewInt(amount * v.Invest.Int64() / totalInvest.Int64())}}))
}
}
}
return result
}
//func getCommunityAddr(cdc *wire.Codec) (qbasetypes.Address, error) {
//config.GetServerConf().Community
// communityPri := config.GetCLIContext().Config.Community
// if communityPri == "" {
// return nil, errors.New("no community")
// }
//
// _, addrben32, _ := utility.PubAddrRetrievalFromAmino(communityPri, cdc)
// community, err := types.AccAddressFromBech32(addrben32)
// if err != nil {
// return nil, err
// }
//
// return community, nil
//}
func mergeQSCs(q1, q2 qostypes.QSCs) qostypes.QSCs {
m := make(map[string]*qbasetypes.BaseCoin)
for _, v := range q1 {
m[v.Name] = v
}
var res qostypes.QSCs
for _, v := range q2 {
if q, ok := m[v.Name]; ok {
v.Amount.Add(q.Amount)
m[v.Name] = v
} else {
m[v.Name] = v
}
}
for _, v := range m {
res = append(res, v)
}
return res
}
func mergeReceivers(rs []qostxtype.TransItem) []qostxtype.TransItem {
var res []qostxtype.TransItem
m := make(map[string]qostxtype.TransItem)
for _, v := range rs {
if ti, ok := m[v.Address.String()]; ok {
v.QOS = v.QOS.Add(ti.QOS)
v.QSCs = mergeQSCs(v.QSCs, ti.QSCs)
m[v.Address.String()] = v
} else {
m[v.Address.String()] = v
}
}
for _, v := range m {
res = append(res, v)
}
log.Printf("buyad.mergeReceivers rs:%+v, res:%+v", rs, res)
return res
}
func warpperReceivers(cdc *wire.Codec, article *jianqian.Articles, amount qbasetypes.BigInt,
investors jianqian.Investors, communityAddr qbasetypes.Address) []qostxtype.TransItem {
var result []qostxtype.TransItem
log.Printf("buyad warpperReceivers article:%+v", article)
investors = calculateRevenue(cdc, article, amount, investors, communityAddr)
for _, v := range investors {
if !v.Revenue.IsZero() {
result = append(
result,
warpperTransItem(
v.Address,
[]qbasetypes.BaseCoin{{Name: coinsName, Amount: v.Revenue}}))
}
}
return mergeReceivers(result)
}
// calculateInvestorRevenue 计算投资者收入
func calculateInvestorRevenue(cdc *wire.Codec, investors jianqian.Investors, amount qbasetypes.BigInt) jianqian.Investors {
log.Printf("buyAd calculateInvestorRevenue investors:%+v", investors)
totalInvest := investors.TotalInvest()
log.Printf("buyAd calculateInvestorRevenue amount:%s, totalInvest:%d", amount.String(), totalInvest.Int64())
curAmount := qbasetypes.NewInt(0)
if !totalInvest.IsZero() {
l := len(investors)
for i := 0; i < l; i++ {
var revenue qbasetypes.BigInt
if i+1 == l {
revenue = amount.Sub(curAmount)
} else {
revenue = amount.Mul(investors[i].Invest).Div(totalInvest)
}
investors[i].Revenue = revenue
curAmount = curAmount.Add(revenue)
log.Printf("buyad calculateRevenue investorAddr:%s invest:%d, revenue:%d",
investors[i].Address.String(), investors[i].Invest.Int64(), revenue.Int64())
}
}
return investors
}
// calculateRevenue 计算收入
func calculateRevenue(cdc *wire.Codec, article *jianqian.Articles, amount qbasetypes.BigInt, is jianqian.Investors,
communityAddr qbasetypes.Address) jianqian.Investors {
var result []jianqian.Investor
log.Printf("buyad calculateRevenue article:%+v, amount:%d", article, amount.Int64())
// 作者地址
authorTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareAuthor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue Authoraddress:%s amount:%d", article.Authoraddress.String(), authorTotal.Int64())
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeAuthor, // 投资者类型
Address: article.Authoraddress, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: authorTotal, // 投资收益
})
// 原创作者地址
shareOriginalTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareOriginalAuthor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue OriginalAuthor:%s amount:%d", article.OriginalAuthor.String(), shareOriginalTotal.Int64())
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeOriginalAuthor, // 投资者类型
Address: article.OriginalAuthor, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: shareOriginalTotal, // 投资收益
})
// 投资者收入分配
investorShouldTotal := amount.Mul(qbasetypes.NewInt(int64(article.ShareInvestor))).Div(qbasetypes.NewInt(100))
log.Printf("buyad calculateRevenue investorShouldTotal:%d", investorShouldTotal.Int64())
investors := calculateInvestorRevenue(cdc, is, investorShouldTotal)
result = append(result, investors...)
shareCommunityTotal := amount.Sub(authorTotal).Sub(shareOriginalTotal).Sub(investors.TotalRevenue())
log.Printf("buyad calculateRevenue communityAddr:%s amount:%d", communityAddr.String(), shareCommunityTotal.Int64())
// 社区收入比例
result = append(
result,
jianqian.Investor{
InvestorType: jianqian.InvestorTypeCommunity, // 投资者类型
Address: communityAddr, // 投资者地址
Invest: qbasetypes.NewInt(0), // 投资金额
Revenue: shareCommunityTotal, // 投资收益
})
return result
}
// buyAd 投资广告
func buyAd(cdc *wire.Codec, chainId, articleHash, coins, privatekey string, qosnonce, qscnonce int64) (*txs.TxStd, *BuyadErr) {
communityPri := config.GetCLIContext().Config.Community
if communityPri == "" {
return nil, NoCommunityErr
}
_, addrben32, _ := utility.PubAddrRetrievalFromAmino(communityPri, cdc)
communityAddr, err := types.AccAddressFromBech32(addrben32)
if err != nil {
return nil, NewBuyadErr(NoCommunityErrCode, err.Error())
}
if articleHash == "" {
return nil, InvalidArticleErr
}
article, err := jianqian.QueryArticle(cdc, config.GetCLIContext().QSCCliContext, articleHash)
log.Printf("buyad.buyAd QueryArticle article:%+v, err:%+v", article, err)
if err != nil {
return nil, NewBuyadErr(InvalidArticleErrCode, err.Error())
}
articleBuy, err := jianqian.QueryArticleBuyer(cdc, config.GetCLIContext().QSCCliContext, articleHash)
log.Printf("buyad.buyAd QueryArticleBuyer articleBuy:%+v, err:%+v", articleBuy, err)
if err == nil {
if articleBuy.CheckStatus != jianqian.CheckStatusFail {
return nil, HasBeenBuyedErr
}
}
investors, err := jianqian.ListInvestors(config.GetCLIContext().QSCCliContext, cdc, article.ArticleHash)
if err != nil {
investors = jianqian.Investors{}
}
if articleBuy == nil {
articleBuy = &jianqian.Buyer{}
}
cs, err := types.ParseCoins(coins)
if err != nil {
return nil, NewBuyadErr(CoinsErrCode, err.Error())
}
if len(cs) != 1 {
return nil, CoinsErr
}
for _, v := range cs {
if v.Denom != coinsName {
return nil, CoinsErr
}
}
var amount int64
_, addrben32, priv := utility.PubAddrRetrievalFromAmino(privatekey, cdc)
buyer, err := types.AccAddressFromBech32(addrben32)
var ccs []qbasetypes.BaseCoin
for _, coin := range cs {
amount = coin.Amount.Int64()
ccs = append(ccs, qbasetypes.BaseCoin{
Name: coin.Denom,
Amount: qbasetypes.NewInt(coin.Amount.Int64()),
})
}
qosnonce += 1
var transferTx qostxs.TxTransfer
transferTx.Senders = []qostxtype.TransItem{warpperTransItem(buyer, ccs)}
receivers := warpperReceivers(cdc, article, qbasetypes.NewInt(amount), investors, communityAddr)
transferTx.Receivers = receivers
gas := qbasetypes.NewInt(int64(config.MaxGas))
stx := txs.NewTxStd(transferTx, config.GetCLIContext().Config.QOSChainID, gas)
signature, _ := stx.SignTx(priv, qosnonce, config.GetCLIContext().Config.QSCChainID, config.GetCLIContext().Config.QOSChainID)
stx.Signature = []txs.Signature{txs.Signature{
Pubkey: priv.PubKey(),
Signature: signature,
Nonce: qosnonce,
}}
qscnonce += 1
it := &BuyTx{}
it.ArticleHash = []byte(articleHash)
it.Std = stx
tx2 := txs.NewTxStd(it, config.GetCLIContext().Config.QSCChainID, stx.MaxGas)
signature2, _ := tx2.SignTx(priv, qscnonce, config.GetCLIContext().Config.QSCChainID, config.GetCLIContext().Config.QSCChainID)
tx2.Signature = []txs.Signature{txs.Signature{
Pubkey: priv.PubKey(),
Signature: signature2,
Nonce: qscnonce,
}}
return tx2, nil
}
func warpperTransItem(addr qbasetypes.Address, coins []qbasetypes.BaseCoin) qostxtype.TransItem {
var ti qostxtype.TransItem
ti.Address = addr
ti.QOS = qbasetypes.NewInt(0)
for _, coin := range coins {
if strings.ToUpper(coin.Name) == "QOS" {
ti.QOS = ti.QOS.Add(coin.Amount)
} else {
ti.QSCs = append(ti.QSCs, &coin)
}
}
return ti
}
// RetrieveBuyer 查询购买者
func RetrieveBuyer(cdc *wire.Codec, articleHash string) string {
var result common.Result
result.Code = common.ResultCodeSuccess
buyer, err := jianqian.QueryArticleBuyer(cdc, config.GetCLIContext().QSCCliContext, articleHash)
if err != nil {
log.Printf("QueryArticleBuyer err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
js, err := cdc.MarshalJSON(buyer)
if err != nil {
log.Printf("buyAd err:%s", err.Error())
result.Code = common.ResultCodeInternalError
result.Reason = err.Error()
return result.Marshal()
}
result.Result = json.RawMessage(js)
return result.Marshal()
}
| ew(txs.TxStd)
err := cdc.UnmarshalJSON([]byte(txb), ts)
log.Printf("buyad.BuyAdBackground ts:%+v, err:%+v", ts, err)
if err != nil {
return common.InternalError(err.Error()).Marshal()
}
cliCtx := *config.GetCLIContext().QSCCliContext
_, commitresult, err := utils.SendTx(cliCtx, cdc, ts)
log.Printf("buyad.BuyAdBackground SendTx commitresult:%+v, err:%+v", commitresult, err)
if err != nil {
return common.NewErrorResult(common.ResultCodeInternalError, 0, "", err.Error()).Marshal()
}
height := strconv.FormatInt(commitresult.Height, 10)
code := common.ResultCodeSuccess
var reason string
var result interface{}
waittime, err := strconv.Atoi(config.GetCLIContext().Config.WaitingForQosResult)
if err != nil {
panic("WaitingForQosResult should be able to convert to integer." + err.Error())
}
counter := 0
for {
resultstr, err := fetchResult(cdc, height, commitresult.Hash.String())
log.Printf("fetchResult result:%s, err:%+v\n", resultstr, err)
if err != nil {
log.Printf("fetchResult error:%s\n", err.Error())
reason = err.Error()
code = common.ResultCodeInternalError
break
}
if resultstr != "" && resultstr != (BuyadStub{}).Name() {
log.Printf("fetchResult result:[%+v]\n", resultstr)
rs := []rune(resultstr)
index1 := strings.Index(resultstr, " ")
reason = ""
result = string(rs[index1+1:])
code = string(rs[:index1])
break
}
if counter >= waittime {
log.Println("time out")
reason = "time out"
if resultstr == "" {
code = common.ResultCodeQstarsTimeout
} else {
code = common.ResultCodeQOSTimeout
}
break
}
time.Sleep(500 * time.Millisecond)
counter++
}
if code != common.ResultCodeSuccess {
return common.NewErrorResult(code, commitresult.Height, commitresult.Hash.String(), reason).Marshal()
}
return common.NewSuccessResult(cdc, commitresult.Height, commitresult.Hash.String(), result).Marshal()
}
func fet | identifier_body |
tensor.rs | use std::collections::HashSet;
use std::io::{Read, Seek};
use std::ops::Range;
use std::str::FromStr;
use std::sync::Mutex;
use crate::model::Model;
use tract_hir::internal::*;
#[derive(Debug, Default, Clone)]
pub struct TensorsValues(pub Vec<TensorValues>);
impl TensorsValues {
pub fn by_name(&self, name: &str) -> Option<&TensorValues> {
self.0.iter().find(|t| t.name.as_deref() == Some(name))
}
pub fn by_name_mut(&mut self, name: &str) -> Option<&mut TensorValues> {
self.0.iter_mut().find(|t| t.name.as_deref() == Some(name))
}
pub fn by_name_mut_with_default(&mut self, name: &str) -> &mut TensorValues {
if self.by_name_mut(name).is_none() {
self.add(TensorValues { name: Some(name.to_string()), ..TensorValues::default() });
}
self.by_name_mut(name).unwrap()
}
pub fn by_input_ix(&self, ix: usize) -> Option<&TensorValues> {
self.0.iter().find(|t| t.input_index == Some(ix))
}
pub fn by_input_ix_mut(&mut self, ix: usize) -> Option<&mut TensorValues> {
self.0.iter_mut().find(|t| t.input_index == Some(ix))
}
pub fn by_input_ix_mut_with_default(&mut self, ix: usize) -> &mut TensorValues {
if self.by_input_ix_mut(ix).is_none() |
self.by_input_ix_mut(ix).unwrap()
}
pub fn add(&mut self, other: TensorValues) {
let mut tensor = other.input_index.and_then(|ix| self.by_input_ix_mut(ix));
if tensor.is_none() {
tensor = other.name.as_deref().and_then(|ix| self.by_name_mut(ix))
}
if let Some(tensor) = tensor {
if tensor.fact.is_none() {
tensor.fact = other.fact;
}
if tensor.values.is_none() {
tensor.values = other.values;
}
} else {
self.0.push(other.clone());
};
}
}
#[derive(Debug, PartialEq, Clone, Default)]
pub struct TensorValues {
pub input_index: Option<usize>,
pub output_index: Option<usize>,
pub name: Option<String>,
pub fact: Option<InferenceFact>,
pub values: Option<Vec<TValue>>,
pub random_range: Option<Range<f32>>,
}
fn parse_dt(dt: &str) -> TractResult<DatumType> {
Ok(match dt.to_lowercase().as_ref() {
"bool" => DatumType::Bool,
"f16" => DatumType::F16,
"f32" => DatumType::F32,
"f64" => DatumType::F64,
"i8" => DatumType::I8,
"i16" => DatumType::I16,
"i32" => DatumType::I32,
"i64" => DatumType::I64,
"u8" => DatumType::U8,
"u16" => DatumType::U16,
"u32" => DatumType::U32,
"u64" => DatumType::U64,
"tdim" => DatumType::TDim,
_ => bail!(
"Type of the input should be f16, f32, f64, i8, i16, i16, i32, u8, u16, u32, u64, TDim."
),
})
}
pub fn parse_spec(symbol_table: &SymbolTable, size: &str) -> TractResult<InferenceFact> {
if size.is_empty() {
return Ok(InferenceFact::default());
}
parse_coma_spec(symbol_table, size)
}
pub fn parse_coma_spec(symbol_table: &SymbolTable, size: &str) -> TractResult<InferenceFact> {
let splits = size.split(',').collect::<Vec<_>>();
if splits.is_empty() {
// Hide '{' in this error message from the formatting machinery in bail macro
let msg = "The <size> argument should be formatted as {size},{...},{type}.";
bail!(msg);
}
let last = splits.last().unwrap();
let (datum_type, shape) = if let Ok(dt) = parse_dt(last) {
(Some(dt), &splits[0..splits.len() - 1])
} else {
(None, &*splits)
};
let shape = ShapeFactoid::closed(
shape
.iter()
.map(|&s| {
Ok(if s == "_" {
GenericFactoid::Any
} else {
GenericFactoid::Only(parse_tdim(symbol_table, s)?)
})
})
.collect::<TractResult<TVec<DimFact>>>()?,
);
if let Some(dt) = datum_type {
Ok(InferenceFact::dt_shape(dt, shape))
} else {
Ok(InferenceFact::shape(shape))
}
}
fn parse_values<T: Datum + FromStr>(shape: &[usize], it: Vec<&str>) -> TractResult<Tensor> {
let values = it
.into_iter()
.map(|v| v.parse::<T>().map_err(|_| format_err!("Failed to parse {}", v)))
.collect::<TractResult<Vec<T>>>()?;
Ok(tract_ndarray::Array::from_shape_vec(shape, values)?.into())
}
fn tensor_for_text_data(
symbol_table: &SymbolTable,
_filename: &str,
mut reader: impl Read,
) -> TractResult<Tensor> {
let mut data = String::new();
reader.read_to_string(&mut data)?;
let mut lines = data.lines();
let proto = parse_spec(symbol_table, lines.next().context("Empty data file")?)?;
let shape = proto.shape.concretize().unwrap();
let values = lines.flat_map(|l| l.split_whitespace()).collect::<Vec<&str>>();
// We know there is at most one streaming dimension, so we can deduce the
// missing value with a simple division.
let product: usize = shape.iter().map(|o| o.to_usize().unwrap_or(1)).product();
let missing = values.len() / product;
let shape: Vec<_> = shape.iter().map(|d| d.to_usize().unwrap_or(missing)).collect();
dispatch_numbers!(parse_values(proto.datum_type.concretize().unwrap())(&*shape, values))
}
/// Parses the `data` command-line argument.
pub fn for_data(
symbol_table: &SymbolTable,
filename: &str,
reader: impl Read + std::io::Seek,
) -> TractResult<(Option<String>, InferenceFact)> {
#[allow(unused_imports)]
use std::convert::TryFrom;
if filename.ends_with(".pb") {
#[cfg(feature = "onnx")]
{
/*
let file =
fs::File::open(filename).with_context(|| format!("Can't open {filename:?}"))?;
*/
let proto = ::tract_onnx::tensor::proto_from_reader(reader)?;
Ok((
Some(proto.name.to_string()).filter(|s| !s.is_empty()),
Tensor::try_from(proto)?.into(),
))
}
#[cfg(not(feature = "onnx"))]
{
panic!("Loading tensor from protobuf requires onnx features");
}
} else if filename.contains(".npz:") {
let mut tokens = filename.split(':');
let (_filename, inner) = (tokens.next().unwrap(), tokens.next().unwrap());
let mut npz = ndarray_npy::NpzReader::new(reader)?;
Ok((None, for_npz(&mut npz, inner)?.into()))
} else {
Ok((None, tensor_for_text_data(symbol_table, filename, reader)?.into()))
}
}
pub fn for_npz(
npz: &mut ndarray_npy::NpzReader<impl Read + Seek>,
name: &str,
) -> TractResult<Tensor> {
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i8>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i16>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u8>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u16>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<bool>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
bail!("Can not extract tensor from {}", name);
}
pub fn for_string(
symbol_table: &SymbolTable,
value: &str,
) -> TractResult<(Option<String>, InferenceFact)> {
let (name, value) = if value.contains(':') {
let mut splits = value.split(':');
(Some(splits.next().unwrap().to_string()), splits.next().unwrap())
} else {
(None, value)
};
if value.contains('=') {
let mut split = value.split('=');
let spec = parse_spec(symbol_table, split.next().unwrap())?;
let value = split.next().unwrap().split(',');
let dt =
spec.datum_type.concretize().context("Must specify type when giving tensor value")?;
let shape = spec
.shape
.as_concrete_finite()?
.context("Must specify concrete shape when giving tensor value")?;
let tensor = dispatch_numbers!(parse_values(dt)(&*shape, value.collect()))?;
Ok((name, tensor.into()))
} else {
Ok((name, parse_spec(symbol_table, value)?))
}
}
lazy_static::lazy_static! {
static ref WARNING_ONCE: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn warn_once(msg: String) {
if WARNING_ONCE.lock().unwrap().insert(msg.clone()) {
warn!("{}", msg);
}
}
pub struct RunParams {
pub tensors_values: TensorsValues,
pub allow_random_input: bool,
pub allow_float_casts: bool,
}
pub fn retrieve_or_make_inputs(
tract: &dyn Model,
params: &RunParams,
) -> TractResult<Vec<TVec<TValue>>> {
let mut tmp: TVec<Vec<TValue>> = tvec![];
for (ix, input) in tract.input_outlets().iter().enumerate() {
let name = tract.node_name(input.node);
let fact = tract.outlet_typedfact(*input)?;
if let Some(mut value) = params.tensors_values.by_name(name).and_then(|t| t.values.clone())
{
if !value[0].datum_type().is_quantized()
&& fact.datum_type.is_quantized()
&& value[0].datum_type() == fact.datum_type.unquantized()
{
value = value
.iter()
.map(|v| {
let mut v = v.clone().into_tensor();
unsafe { v.set_datum_type(fact.datum_type) };
v.into()
})
.collect();
}
if TypedFact::from(&*value[0]).compatible_with(&fact) {
info!("Using fixed input for input called {} ({} turn(s))", name, value.len());
tmp.push(value.iter().map(|t| t.clone().into_tensor().into()).collect())
} else if fact.datum_type == f16::datum_type()
&& value[0].datum_type() == f32::datum_type()
&& params.allow_float_casts
{
tmp.push(
value.iter().map(|t| t.cast_to::<f16>().unwrap().into_owned().into()).collect(),
)
} else if value.len() == 1 && tract.properties().contains_key("pulse.delay") {
let value = &value[0];
let input_pulse_axis = tract
.properties()
.get("pulse.input_axes")
.context("Expect pulse.input_axes property")?
.cast_to::<i64>()?
.as_slice::<i64>()?[ix] as usize;
let input_pulse = fact.shape.get(input_pulse_axis).unwrap().to_usize().unwrap();
let input_len = value.shape()[input_pulse_axis];
// how many pulses do we need to push full result out ?
// guess by looking at len and delay of the first output
let output_pulse_axis = tract
.properties()
.get("pulse.output_axes")
.context("Expect pulse.output_axes property")?
.cast_to::<i64>()?
.as_slice::<i64>()?[0] as usize;
let output_fact = tract.outlet_typedfact(tract.output_outlets()[0])?;
let output_pulse =
output_fact.shape.get(output_pulse_axis).unwrap().to_usize().unwrap();
let output_len = input_len * output_pulse / input_pulse;
let output_delay = tract.properties()["pulse.delay"].as_slice::<i64>()?[0] as usize;
let last_frame = output_len + output_delay;
let needed_pulses = last_frame.divceil(output_pulse);
let mut values = vec![];
for ix in 0..needed_pulses {
let mut t =
Tensor::zero_dt(fact.datum_type, fact.shape.as_concrete().unwrap())?;
let start = ix * input_pulse;
let end = (start + input_pulse).min(input_len);
if end > start {
t.assign_slice(0..end - start, value, start..end, input_pulse_axis)?;
}
values.push(t.into());
}
info!(
"Generated {} pulses of shape {:?} for input {}.",
needed_pulses, fact.shape, ix
);
tmp.push(values);
} else {
bail!("For input {}, can not reconcile model input fact {:?} with provided input {:?}", name, fact, value[0]);
};
} else if params.allow_random_input {
let fact = tract.outlet_typedfact(*input)?;
warn_once(format!("Using random input for input called {name:?}: {fact:?}"));
let tv = params
.tensors_values
.by_name(name)
.or_else(|| params.tensors_values.by_input_ix(ix));
tmp.push(vec![crate::tensor::tensor_for_fact(&fact, None, tv)?.into()]);
} else {
bail!("Unmatched tensor {}. Fix the input or use \"--allow-random-input\" if this was intended", name);
}
}
Ok((0..tmp[0].len()).map(|turn| tmp.iter().map(|t| t[turn].clone()).collect()).collect())
}
fn make_inputs(values: &[impl std::borrow::Borrow<TypedFact>]) -> TractResult<TVec<TValue>> {
values.iter().map(|v| tensor_for_fact(v.borrow(), None, None).map(|t| t.into())).collect()
}
pub fn make_inputs_for_model(model: &dyn Model) -> TractResult<TVec<TValue>> {
make_inputs(
&model
.input_outlets()
.iter()
.map(|&t| model.outlet_typedfact(t))
.collect::<TractResult<Vec<TypedFact>>>()?,
)
}
#[allow(unused_variables)]
pub fn tensor_for_fact(
fact: &TypedFact,
streaming_dim: Option<usize>,
tv: Option<&TensorValues>,
) -> TractResult<Tensor> {
if let Some(value) = &fact.konst {
return Ok(value.clone().into_tensor());
}
#[cfg(pulse)]
{
if fact.shape.stream_info().is_some() {
use tract_pulse::fact::StreamFact;
use tract_pulse::internal::stream_symbol;
let s = stream_symbol();
if let Some(dim) = streaming_dim {
let shape = fact
.shape
.iter()
.map(|d| {
d.eval(&SymbolValues::default().with(s, dim as i64)).to_usize().unwrap()
})
.collect::<TVec<_>>();
return Ok(random(&shape, fact.datum_type));
} else {
bail!("random tensor requires a streaming dim")
}
}
}
Ok(random(
fact.shape
.as_concrete()
.with_context(|| format!("Expected concrete shape, found: {fact:?}"))?,
fact.datum_type,
tv,
))
}
/// Generates a random tensor of a given size and type.
pub fn random(sizes: &[usize], datum_type: DatumType, tv: Option<&TensorValues>) -> Tensor {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(21242);
let mut tensor = Tensor::zero::<f32>(sizes).unwrap();
let slice = tensor.as_slice_mut::<f32>().unwrap();
if let Some(range) = tv.and_then(|tv| tv.random_range.as_ref()) {
slice.iter_mut().for_each(|x| *x = rng.gen_range(range.clone()))
} else {
slice.iter_mut().for_each(|x| *x = rng.gen())
};
tensor.cast_to_dt(datum_type).unwrap().into_owned()
}
| {
self.add(TensorValues { input_index: Some(ix), ..TensorValues::default() });
} | conditional_block |
tensor.rs | use std::collections::HashSet;
use std::io::{Read, Seek};
use std::ops::Range;
use std::str::FromStr;
use std::sync::Mutex;
use crate::model::Model;
use tract_hir::internal::*;
#[derive(Debug, Default, Clone)]
pub struct TensorsValues(pub Vec<TensorValues>);
impl TensorsValues {
pub fn by_name(&self, name: &str) -> Option<&TensorValues> {
self.0.iter().find(|t| t.name.as_deref() == Some(name))
}
pub fn | (&mut self, name: &str) -> Option<&mut TensorValues> {
self.0.iter_mut().find(|t| t.name.as_deref() == Some(name))
}
pub fn by_name_mut_with_default(&mut self, name: &str) -> &mut TensorValues {
if self.by_name_mut(name).is_none() {
self.add(TensorValues { name: Some(name.to_string()), ..TensorValues::default() });
}
self.by_name_mut(name).unwrap()
}
pub fn by_input_ix(&self, ix: usize) -> Option<&TensorValues> {
self.0.iter().find(|t| t.input_index == Some(ix))
}
pub fn by_input_ix_mut(&mut self, ix: usize) -> Option<&mut TensorValues> {
self.0.iter_mut().find(|t| t.input_index == Some(ix))
}
pub fn by_input_ix_mut_with_default(&mut self, ix: usize) -> &mut TensorValues {
if self.by_input_ix_mut(ix).is_none() {
self.add(TensorValues { input_index: Some(ix), ..TensorValues::default() });
}
self.by_input_ix_mut(ix).unwrap()
}
pub fn add(&mut self, other: TensorValues) {
let mut tensor = other.input_index.and_then(|ix| self.by_input_ix_mut(ix));
if tensor.is_none() {
tensor = other.name.as_deref().and_then(|ix| self.by_name_mut(ix))
}
if let Some(tensor) = tensor {
if tensor.fact.is_none() {
tensor.fact = other.fact;
}
if tensor.values.is_none() {
tensor.values = other.values;
}
} else {
self.0.push(other.clone());
};
}
}
#[derive(Debug, PartialEq, Clone, Default)]
pub struct TensorValues {
pub input_index: Option<usize>,
pub output_index: Option<usize>,
pub name: Option<String>,
pub fact: Option<InferenceFact>,
pub values: Option<Vec<TValue>>,
pub random_range: Option<Range<f32>>,
}
fn parse_dt(dt: &str) -> TractResult<DatumType> {
Ok(match dt.to_lowercase().as_ref() {
"bool" => DatumType::Bool,
"f16" => DatumType::F16,
"f32" => DatumType::F32,
"f64" => DatumType::F64,
"i8" => DatumType::I8,
"i16" => DatumType::I16,
"i32" => DatumType::I32,
"i64" => DatumType::I64,
"u8" => DatumType::U8,
"u16" => DatumType::U16,
"u32" => DatumType::U32,
"u64" => DatumType::U64,
"tdim" => DatumType::TDim,
_ => bail!(
"Type of the input should be f16, f32, f64, i8, i16, i16, i32, u8, u16, u32, u64, TDim."
),
})
}
pub fn parse_spec(symbol_table: &SymbolTable, size: &str) -> TractResult<InferenceFact> {
if size.is_empty() {
return Ok(InferenceFact::default());
}
parse_coma_spec(symbol_table, size)
}
pub fn parse_coma_spec(symbol_table: &SymbolTable, size: &str) -> TractResult<InferenceFact> {
let splits = size.split(',').collect::<Vec<_>>();
if splits.is_empty() {
// Hide '{' in this error message from the formatting machinery in bail macro
let msg = "The <size> argument should be formatted as {size},{...},{type}.";
bail!(msg);
}
let last = splits.last().unwrap();
let (datum_type, shape) = if let Ok(dt) = parse_dt(last) {
(Some(dt), &splits[0..splits.len() - 1])
} else {
(None, &*splits)
};
let shape = ShapeFactoid::closed(
shape
.iter()
.map(|&s| {
Ok(if s == "_" {
GenericFactoid::Any
} else {
GenericFactoid::Only(parse_tdim(symbol_table, s)?)
})
})
.collect::<TractResult<TVec<DimFact>>>()?,
);
if let Some(dt) = datum_type {
Ok(InferenceFact::dt_shape(dt, shape))
} else {
Ok(InferenceFact::shape(shape))
}
}
fn parse_values<T: Datum + FromStr>(shape: &[usize], it: Vec<&str>) -> TractResult<Tensor> {
let values = it
.into_iter()
.map(|v| v.parse::<T>().map_err(|_| format_err!("Failed to parse {}", v)))
.collect::<TractResult<Vec<T>>>()?;
Ok(tract_ndarray::Array::from_shape_vec(shape, values)?.into())
}
fn tensor_for_text_data(
symbol_table: &SymbolTable,
_filename: &str,
mut reader: impl Read,
) -> TractResult<Tensor> {
let mut data = String::new();
reader.read_to_string(&mut data)?;
let mut lines = data.lines();
let proto = parse_spec(symbol_table, lines.next().context("Empty data file")?)?;
let shape = proto.shape.concretize().unwrap();
let values = lines.flat_map(|l| l.split_whitespace()).collect::<Vec<&str>>();
// We know there is at most one streaming dimension, so we can deduce the
// missing value with a simple division.
let product: usize = shape.iter().map(|o| o.to_usize().unwrap_or(1)).product();
let missing = values.len() / product;
let shape: Vec<_> = shape.iter().map(|d| d.to_usize().unwrap_or(missing)).collect();
dispatch_numbers!(parse_values(proto.datum_type.concretize().unwrap())(&*shape, values))
}
/// Parses the `data` command-line argument.
pub fn for_data(
symbol_table: &SymbolTable,
filename: &str,
reader: impl Read + std::io::Seek,
) -> TractResult<(Option<String>, InferenceFact)> {
#[allow(unused_imports)]
use std::convert::TryFrom;
if filename.ends_with(".pb") {
#[cfg(feature = "onnx")]
{
/*
let file =
fs::File::open(filename).with_context(|| format!("Can't open {filename:?}"))?;
*/
let proto = ::tract_onnx::tensor::proto_from_reader(reader)?;
Ok((
Some(proto.name.to_string()).filter(|s| !s.is_empty()),
Tensor::try_from(proto)?.into(),
))
}
#[cfg(not(feature = "onnx"))]
{
panic!("Loading tensor from protobuf requires onnx features");
}
} else if filename.contains(".npz:") {
let mut tokens = filename.split(':');
let (_filename, inner) = (tokens.next().unwrap(), tokens.next().unwrap());
let mut npz = ndarray_npy::NpzReader::new(reader)?;
Ok((None, for_npz(&mut npz, inner)?.into()))
} else {
Ok((None, tensor_for_text_data(symbol_table, filename, reader)?.into()))
}
}
pub fn for_npz(
npz: &mut ndarray_npy::NpzReader<impl Read + Seek>,
name: &str,
) -> TractResult<Tensor> {
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i8>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i16>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u8>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u16>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<bool>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
bail!("Can not extract tensor from {}", name);
}
pub fn for_string(
symbol_table: &SymbolTable,
value: &str,
) -> TractResult<(Option<String>, InferenceFact)> {
let (name, value) = if value.contains(':') {
let mut splits = value.split(':');
(Some(splits.next().unwrap().to_string()), splits.next().unwrap())
} else {
(None, value)
};
if value.contains('=') {
let mut split = value.split('=');
let spec = parse_spec(symbol_table, split.next().unwrap())?;
let value = split.next().unwrap().split(',');
let dt =
spec.datum_type.concretize().context("Must specify type when giving tensor value")?;
let shape = spec
.shape
.as_concrete_finite()?
.context("Must specify concrete shape when giving tensor value")?;
let tensor = dispatch_numbers!(parse_values(dt)(&*shape, value.collect()))?;
Ok((name, tensor.into()))
} else {
Ok((name, parse_spec(symbol_table, value)?))
}
}
lazy_static::lazy_static! {
static ref WARNING_ONCE: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn warn_once(msg: String) {
if WARNING_ONCE.lock().unwrap().insert(msg.clone()) {
warn!("{}", msg);
}
}
pub struct RunParams {
pub tensors_values: TensorsValues,
pub allow_random_input: bool,
pub allow_float_casts: bool,
}
pub fn retrieve_or_make_inputs(
tract: &dyn Model,
params: &RunParams,
) -> TractResult<Vec<TVec<TValue>>> {
let mut tmp: TVec<Vec<TValue>> = tvec![];
for (ix, input) in tract.input_outlets().iter().enumerate() {
let name = tract.node_name(input.node);
let fact = tract.outlet_typedfact(*input)?;
if let Some(mut value) = params.tensors_values.by_name(name).and_then(|t| t.values.clone())
{
if !value[0].datum_type().is_quantized()
&& fact.datum_type.is_quantized()
&& value[0].datum_type() == fact.datum_type.unquantized()
{
value = value
.iter()
.map(|v| {
let mut v = v.clone().into_tensor();
unsafe { v.set_datum_type(fact.datum_type) };
v.into()
})
.collect();
}
if TypedFact::from(&*value[0]).compatible_with(&fact) {
info!("Using fixed input for input called {} ({} turn(s))", name, value.len());
tmp.push(value.iter().map(|t| t.clone().into_tensor().into()).collect())
} else if fact.datum_type == f16::datum_type()
&& value[0].datum_type() == f32::datum_type()
&& params.allow_float_casts
{
tmp.push(
value.iter().map(|t| t.cast_to::<f16>().unwrap().into_owned().into()).collect(),
)
} else if value.len() == 1 && tract.properties().contains_key("pulse.delay") {
let value = &value[0];
let input_pulse_axis = tract
.properties()
.get("pulse.input_axes")
.context("Expect pulse.input_axes property")?
.cast_to::<i64>()?
.as_slice::<i64>()?[ix] as usize;
let input_pulse = fact.shape.get(input_pulse_axis).unwrap().to_usize().unwrap();
let input_len = value.shape()[input_pulse_axis];
// how many pulses do we need to push full result out ?
// guess by looking at len and delay of the first output
let output_pulse_axis = tract
.properties()
.get("pulse.output_axes")
.context("Expect pulse.output_axes property")?
.cast_to::<i64>()?
.as_slice::<i64>()?[0] as usize;
let output_fact = tract.outlet_typedfact(tract.output_outlets()[0])?;
let output_pulse =
output_fact.shape.get(output_pulse_axis).unwrap().to_usize().unwrap();
let output_len = input_len * output_pulse / input_pulse;
let output_delay = tract.properties()["pulse.delay"].as_slice::<i64>()?[0] as usize;
let last_frame = output_len + output_delay;
let needed_pulses = last_frame.divceil(output_pulse);
let mut values = vec![];
for ix in 0..needed_pulses {
let mut t =
Tensor::zero_dt(fact.datum_type, fact.shape.as_concrete().unwrap())?;
let start = ix * input_pulse;
let end = (start + input_pulse).min(input_len);
if end > start {
t.assign_slice(0..end - start, value, start..end, input_pulse_axis)?;
}
values.push(t.into());
}
info!(
"Generated {} pulses of shape {:?} for input {}.",
needed_pulses, fact.shape, ix
);
tmp.push(values);
} else {
bail!("For input {}, can not reconcile model input fact {:?} with provided input {:?}", name, fact, value[0]);
};
} else if params.allow_random_input {
let fact = tract.outlet_typedfact(*input)?;
warn_once(format!("Using random input for input called {name:?}: {fact:?}"));
let tv = params
.tensors_values
.by_name(name)
.or_else(|| params.tensors_values.by_input_ix(ix));
tmp.push(vec![crate::tensor::tensor_for_fact(&fact, None, tv)?.into()]);
} else {
bail!("Unmatched tensor {}. Fix the input or use \"--allow-random-input\" if this was intended", name);
}
}
Ok((0..tmp[0].len()).map(|turn| tmp.iter().map(|t| t[turn].clone()).collect()).collect())
}
fn make_inputs(values: &[impl std::borrow::Borrow<TypedFact>]) -> TractResult<TVec<TValue>> {
values.iter().map(|v| tensor_for_fact(v.borrow(), None, None).map(|t| t.into())).collect()
}
pub fn make_inputs_for_model(model: &dyn Model) -> TractResult<TVec<TValue>> {
make_inputs(
&model
.input_outlets()
.iter()
.map(|&t| model.outlet_typedfact(t))
.collect::<TractResult<Vec<TypedFact>>>()?,
)
}
#[allow(unused_variables)]
pub fn tensor_for_fact(
fact: &TypedFact,
streaming_dim: Option<usize>,
tv: Option<&TensorValues>,
) -> TractResult<Tensor> {
if let Some(value) = &fact.konst {
return Ok(value.clone().into_tensor());
}
#[cfg(pulse)]
{
if fact.shape.stream_info().is_some() {
use tract_pulse::fact::StreamFact;
use tract_pulse::internal::stream_symbol;
let s = stream_symbol();
if let Some(dim) = streaming_dim {
let shape = fact
.shape
.iter()
.map(|d| {
d.eval(&SymbolValues::default().with(s, dim as i64)).to_usize().unwrap()
})
.collect::<TVec<_>>();
return Ok(random(&shape, fact.datum_type));
} else {
bail!("random tensor requires a streaming dim")
}
}
}
Ok(random(
fact.shape
.as_concrete()
.with_context(|| format!("Expected concrete shape, found: {fact:?}"))?,
fact.datum_type,
tv,
))
}
/// Generates a random tensor of a given size and type.
pub fn random(sizes: &[usize], datum_type: DatumType, tv: Option<&TensorValues>) -> Tensor {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(21242);
let mut tensor = Tensor::zero::<f32>(sizes).unwrap();
let slice = tensor.as_slice_mut::<f32>().unwrap();
if let Some(range) = tv.and_then(|tv| tv.random_range.as_ref()) {
slice.iter_mut().for_each(|x| *x = rng.gen_range(range.clone()))
} else {
slice.iter_mut().for_each(|x| *x = rng.gen())
};
tensor.cast_to_dt(datum_type).unwrap().into_owned()
}
| by_name_mut | identifier_name |
tensor.rs | use std::collections::HashSet;
use std::io::{Read, Seek};
use std::ops::Range;
use std::str::FromStr;
use std::sync::Mutex;
use crate::model::Model;
use tract_hir::internal::*;
#[derive(Debug, Default, Clone)]
pub struct TensorsValues(pub Vec<TensorValues>);
impl TensorsValues {
pub fn by_name(&self, name: &str) -> Option<&TensorValues> {
self.0.iter().find(|t| t.name.as_deref() == Some(name))
}
pub fn by_name_mut(&mut self, name: &str) -> Option<&mut TensorValues> {
self.0.iter_mut().find(|t| t.name.as_deref() == Some(name))
}
pub fn by_name_mut_with_default(&mut self, name: &str) -> &mut TensorValues {
if self.by_name_mut(name).is_none() {
self.add(TensorValues { name: Some(name.to_string()), ..TensorValues::default() });
}
self.by_name_mut(name).unwrap()
}
pub fn by_input_ix(&self, ix: usize) -> Option<&TensorValues> {
self.0.iter().find(|t| t.input_index == Some(ix))
}
pub fn by_input_ix_mut(&mut self, ix: usize) -> Option<&mut TensorValues> {
self.0.iter_mut().find(|t| t.input_index == Some(ix))
}
pub fn by_input_ix_mut_with_default(&mut self, ix: usize) -> &mut TensorValues {
if self.by_input_ix_mut(ix).is_none() {
self.add(TensorValues { input_index: Some(ix), ..TensorValues::default() });
}
self.by_input_ix_mut(ix).unwrap()
}
pub fn add(&mut self, other: TensorValues) {
let mut tensor = other.input_index.and_then(|ix| self.by_input_ix_mut(ix));
if tensor.is_none() {
tensor = other.name.as_deref().and_then(|ix| self.by_name_mut(ix))
}
if let Some(tensor) = tensor {
if tensor.fact.is_none() {
tensor.fact = other.fact;
}
if tensor.values.is_none() {
tensor.values = other.values;
}
} else {
self.0.push(other.clone());
};
}
}
#[derive(Debug, PartialEq, Clone, Default)]
pub struct TensorValues {
pub input_index: Option<usize>,
pub output_index: Option<usize>,
pub name: Option<String>,
pub fact: Option<InferenceFact>,
pub values: Option<Vec<TValue>>,
pub random_range: Option<Range<f32>>,
}
fn parse_dt(dt: &str) -> TractResult<DatumType> {
Ok(match dt.to_lowercase().as_ref() {
"bool" => DatumType::Bool,
"f16" => DatumType::F16,
"f32" => DatumType::F32,
"f64" => DatumType::F64,
"i8" => DatumType::I8,
"i16" => DatumType::I16,
"i32" => DatumType::I32,
"i64" => DatumType::I64,
"u8" => DatumType::U8,
"u16" => DatumType::U16,
"u32" => DatumType::U32,
"u64" => DatumType::U64,
"tdim" => DatumType::TDim,
_ => bail!(
"Type of the input should be f16, f32, f64, i8, i16, i16, i32, u8, u16, u32, u64, TDim."
),
})
}
pub fn parse_spec(symbol_table: &SymbolTable, size: &str) -> TractResult<InferenceFact> {
if size.is_empty() {
return Ok(InferenceFact::default());
}
parse_coma_spec(symbol_table, size)
}
pub fn parse_coma_spec(symbol_table: &SymbolTable, size: &str) -> TractResult<InferenceFact> {
let splits = size.split(',').collect::<Vec<_>>();
if splits.is_empty() {
// Hide '{' in this error message from the formatting machinery in bail macro
let msg = "The <size> argument should be formatted as {size},{...},{type}.";
bail!(msg);
}
let last = splits.last().unwrap();
let (datum_type, shape) = if let Ok(dt) = parse_dt(last) {
(Some(dt), &splits[0..splits.len() - 1])
} else {
(None, &*splits)
};
let shape = ShapeFactoid::closed(
shape
.iter()
.map(|&s| {
Ok(if s == "_" {
GenericFactoid::Any
} else {
GenericFactoid::Only(parse_tdim(symbol_table, s)?)
})
})
.collect::<TractResult<TVec<DimFact>>>()?,
);
if let Some(dt) = datum_type {
Ok(InferenceFact::dt_shape(dt, shape))
} else {
Ok(InferenceFact::shape(shape))
}
}
fn parse_values<T: Datum + FromStr>(shape: &[usize], it: Vec<&str>) -> TractResult<Tensor> {
let values = it
.into_iter()
.map(|v| v.parse::<T>().map_err(|_| format_err!("Failed to parse {}", v)))
.collect::<TractResult<Vec<T>>>()?;
Ok(tract_ndarray::Array::from_shape_vec(shape, values)?.into())
}
fn tensor_for_text_data(
symbol_table: &SymbolTable,
_filename: &str,
mut reader: impl Read,
) -> TractResult<Tensor> {
let mut data = String::new();
reader.read_to_string(&mut data)?;
let mut lines = data.lines();
let proto = parse_spec(symbol_table, lines.next().context("Empty data file")?)?;
let shape = proto.shape.concretize().unwrap();
let values = lines.flat_map(|l| l.split_whitespace()).collect::<Vec<&str>>();
// We know there is at most one streaming dimension, so we can deduce the
// missing value with a simple division.
let product: usize = shape.iter().map(|o| o.to_usize().unwrap_or(1)).product();
let missing = values.len() / product;
let shape: Vec<_> = shape.iter().map(|d| d.to_usize().unwrap_or(missing)).collect();
dispatch_numbers!(parse_values(proto.datum_type.concretize().unwrap())(&*shape, values))
}
/// Parses the `data` command-line argument.
pub fn for_data(
symbol_table: &SymbolTable,
filename: &str,
reader: impl Read + std::io::Seek,
) -> TractResult<(Option<String>, InferenceFact)> {
#[allow(unused_imports)]
use std::convert::TryFrom;
if filename.ends_with(".pb") {
#[cfg(feature = "onnx")]
{
/*
let file =
fs::File::open(filename).with_context(|| format!("Can't open {filename:?}"))?;
*/
let proto = ::tract_onnx::tensor::proto_from_reader(reader)?;
Ok((
Some(proto.name.to_string()).filter(|s| !s.is_empty()),
Tensor::try_from(proto)?.into(),
))
}
#[cfg(not(feature = "onnx"))]
{
panic!("Loading tensor from protobuf requires onnx features");
}
} else if filename.contains(".npz:") {
let mut tokens = filename.split(':');
let (_filename, inner) = (tokens.next().unwrap(), tokens.next().unwrap());
let mut npz = ndarray_npy::NpzReader::new(reader)?;
Ok((None, for_npz(&mut npz, inner)?.into()))
} else {
Ok((None, tensor_for_text_data(symbol_table, filename, reader)?.into()))
}
}
pub fn for_npz(
npz: &mut ndarray_npy::NpzReader<impl Read + Seek>,
name: &str,
) -> TractResult<Tensor> {
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i8>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i16>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u8>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u16>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<bool>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
bail!("Can not extract tensor from {}", name);
}
pub fn for_string(
symbol_table: &SymbolTable,
value: &str,
) -> TractResult<(Option<String>, InferenceFact)> {
let (name, value) = if value.contains(':') {
let mut splits = value.split(':');
(Some(splits.next().unwrap().to_string()), splits.next().unwrap())
} else {
(None, value)
};
if value.contains('=') {
let mut split = value.split('=');
let spec = parse_spec(symbol_table, split.next().unwrap())?;
let value = split.next().unwrap().split(',');
let dt =
spec.datum_type.concretize().context("Must specify type when giving tensor value")?;
let shape = spec
.shape
.as_concrete_finite()?
.context("Must specify concrete shape when giving tensor value")?;
let tensor = dispatch_numbers!(parse_values(dt)(&*shape, value.collect()))?;
Ok((name, tensor.into()))
} else {
Ok((name, parse_spec(symbol_table, value)?))
}
}
lazy_static::lazy_static! {
static ref WARNING_ONCE: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn warn_once(msg: String) {
if WARNING_ONCE.lock().unwrap().insert(msg.clone()) {
warn!("{}", msg);
}
}
pub struct RunParams {
pub tensors_values: TensorsValues,
pub allow_random_input: bool,
pub allow_float_casts: bool,
}
pub fn retrieve_or_make_inputs(
tract: &dyn Model,
params: &RunParams,
) -> TractResult<Vec<TVec<TValue>>> {
let mut tmp: TVec<Vec<TValue>> = tvec![];
for (ix, input) in tract.input_outlets().iter().enumerate() {
let name = tract.node_name(input.node);
let fact = tract.outlet_typedfact(*input)?;
if let Some(mut value) = params.tensors_values.by_name(name).and_then(|t| t.values.clone())
{
if !value[0].datum_type().is_quantized()
&& fact.datum_type.is_quantized()
&& value[0].datum_type() == fact.datum_type.unquantized()
{
value = value
.iter()
.map(|v| {
let mut v = v.clone().into_tensor();
unsafe { v.set_datum_type(fact.datum_type) };
v.into()
})
.collect();
}
if TypedFact::from(&*value[0]).compatible_with(&fact) {
info!("Using fixed input for input called {} ({} turn(s))", name, value.len());
tmp.push(value.iter().map(|t| t.clone().into_tensor().into()).collect())
} else if fact.datum_type == f16::datum_type()
&& value[0].datum_type() == f32::datum_type()
&& params.allow_float_casts
{
tmp.push(
value.iter().map(|t| t.cast_to::<f16>().unwrap().into_owned().into()).collect(),
)
} else if value.len() == 1 && tract.properties().contains_key("pulse.delay") {
let value = &value[0];
let input_pulse_axis = tract
.properties()
.get("pulse.input_axes")
.context("Expect pulse.input_axes property")?
.cast_to::<i64>()?
.as_slice::<i64>()?[ix] as usize;
let input_pulse = fact.shape.get(input_pulse_axis).unwrap().to_usize().unwrap();
let input_len = value.shape()[input_pulse_axis];
// how many pulses do we need to push full result out ?
// guess by looking at len and delay of the first output
let output_pulse_axis = tract
.properties()
.get("pulse.output_axes")
.context("Expect pulse.output_axes property")?
.cast_to::<i64>()?
.as_slice::<i64>()?[0] as usize;
let output_fact = tract.outlet_typedfact(tract.output_outlets()[0])?;
let output_pulse =
output_fact.shape.get(output_pulse_axis).unwrap().to_usize().unwrap();
let output_len = input_len * output_pulse / input_pulse;
let output_delay = tract.properties()["pulse.delay"].as_slice::<i64>()?[0] as usize;
let last_frame = output_len + output_delay;
let needed_pulses = last_frame.divceil(output_pulse);
let mut values = vec![];
for ix in 0..needed_pulses {
let mut t =
Tensor::zero_dt(fact.datum_type, fact.shape.as_concrete().unwrap())?;
let start = ix * input_pulse;
let end = (start + input_pulse).min(input_len);
if end > start {
t.assign_slice(0..end - start, value, start..end, input_pulse_axis)?;
}
values.push(t.into());
}
info!(
"Generated {} pulses of shape {:?} for input {}.",
needed_pulses, fact.shape, ix
);
tmp.push(values);
} else {
bail!("For input {}, can not reconcile model input fact {:?} with provided input {:?}", name, fact, value[0]);
};
} else if params.allow_random_input {
let fact = tract.outlet_typedfact(*input)?;
warn_once(format!("Using random input for input called {name:?}: {fact:?}"));
let tv = params
.tensors_values
.by_name(name)
.or_else(|| params.tensors_values.by_input_ix(ix));
tmp.push(vec![crate::tensor::tensor_for_fact(&fact, None, tv)?.into()]);
} else {
bail!("Unmatched tensor {}. Fix the input or use \"--allow-random-input\" if this was intended", name);
} | values.iter().map(|v| tensor_for_fact(v.borrow(), None, None).map(|t| t.into())).collect()
}
pub fn make_inputs_for_model(model: &dyn Model) -> TractResult<TVec<TValue>> {
make_inputs(
&model
.input_outlets()
.iter()
.map(|&t| model.outlet_typedfact(t))
.collect::<TractResult<Vec<TypedFact>>>()?,
)
}
#[allow(unused_variables)]
pub fn tensor_for_fact(
fact: &TypedFact,
streaming_dim: Option<usize>,
tv: Option<&TensorValues>,
) -> TractResult<Tensor> {
if let Some(value) = &fact.konst {
return Ok(value.clone().into_tensor());
}
#[cfg(pulse)]
{
if fact.shape.stream_info().is_some() {
use tract_pulse::fact::StreamFact;
use tract_pulse::internal::stream_symbol;
let s = stream_symbol();
if let Some(dim) = streaming_dim {
let shape = fact
.shape
.iter()
.map(|d| {
d.eval(&SymbolValues::default().with(s, dim as i64)).to_usize().unwrap()
})
.collect::<TVec<_>>();
return Ok(random(&shape, fact.datum_type));
} else {
bail!("random tensor requires a streaming dim")
}
}
}
Ok(random(
fact.shape
.as_concrete()
.with_context(|| format!("Expected concrete shape, found: {fact:?}"))?,
fact.datum_type,
tv,
))
}
/// Generates a random tensor of a given size and type.
pub fn random(sizes: &[usize], datum_type: DatumType, tv: Option<&TensorValues>) -> Tensor {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(21242);
let mut tensor = Tensor::zero::<f32>(sizes).unwrap();
let slice = tensor.as_slice_mut::<f32>().unwrap();
if let Some(range) = tv.and_then(|tv| tv.random_range.as_ref()) {
slice.iter_mut().for_each(|x| *x = rng.gen_range(range.clone()))
} else {
slice.iter_mut().for_each(|x| *x = rng.gen())
};
tensor.cast_to_dt(datum_type).unwrap().into_owned()
} | }
Ok((0..tmp[0].len()).map(|turn| tmp.iter().map(|t| t[turn].clone()).collect()).collect())
}
fn make_inputs(values: &[impl std::borrow::Borrow<TypedFact>]) -> TractResult<TVec<TValue>> { | random_line_split |
tensor.rs | use std::collections::HashSet;
use std::io::{Read, Seek};
use std::ops::Range;
use std::str::FromStr;
use std::sync::Mutex;
use crate::model::Model;
use tract_hir::internal::*;
#[derive(Debug, Default, Clone)]
pub struct TensorsValues(pub Vec<TensorValues>);
impl TensorsValues {
pub fn by_name(&self, name: &str) -> Option<&TensorValues> {
self.0.iter().find(|t| t.name.as_deref() == Some(name))
}
pub fn by_name_mut(&mut self, name: &str) -> Option<&mut TensorValues> {
self.0.iter_mut().find(|t| t.name.as_deref() == Some(name))
}
pub fn by_name_mut_with_default(&mut self, name: &str) -> &mut TensorValues {
if self.by_name_mut(name).is_none() {
self.add(TensorValues { name: Some(name.to_string()), ..TensorValues::default() });
}
self.by_name_mut(name).unwrap()
}
pub fn by_input_ix(&self, ix: usize) -> Option<&TensorValues> {
self.0.iter().find(|t| t.input_index == Some(ix))
}
pub fn by_input_ix_mut(&mut self, ix: usize) -> Option<&mut TensorValues> {
self.0.iter_mut().find(|t| t.input_index == Some(ix))
}
pub fn by_input_ix_mut_with_default(&mut self, ix: usize) -> &mut TensorValues {
if self.by_input_ix_mut(ix).is_none() {
self.add(TensorValues { input_index: Some(ix), ..TensorValues::default() });
}
self.by_input_ix_mut(ix).unwrap()
}
pub fn add(&mut self, other: TensorValues) {
let mut tensor = other.input_index.and_then(|ix| self.by_input_ix_mut(ix));
if tensor.is_none() {
tensor = other.name.as_deref().and_then(|ix| self.by_name_mut(ix))
}
if let Some(tensor) = tensor {
if tensor.fact.is_none() {
tensor.fact = other.fact;
}
if tensor.values.is_none() {
tensor.values = other.values;
}
} else {
self.0.push(other.clone());
};
}
}
#[derive(Debug, PartialEq, Clone, Default)]
pub struct TensorValues {
pub input_index: Option<usize>,
pub output_index: Option<usize>,
pub name: Option<String>,
pub fact: Option<InferenceFact>,
pub values: Option<Vec<TValue>>,
pub random_range: Option<Range<f32>>,
}
fn parse_dt(dt: &str) -> TractResult<DatumType> {
Ok(match dt.to_lowercase().as_ref() {
"bool" => DatumType::Bool,
"f16" => DatumType::F16,
"f32" => DatumType::F32,
"f64" => DatumType::F64,
"i8" => DatumType::I8,
"i16" => DatumType::I16,
"i32" => DatumType::I32,
"i64" => DatumType::I64,
"u8" => DatumType::U8,
"u16" => DatumType::U16,
"u32" => DatumType::U32,
"u64" => DatumType::U64,
"tdim" => DatumType::TDim,
_ => bail!(
"Type of the input should be f16, f32, f64, i8, i16, i16, i32, u8, u16, u32, u64, TDim."
),
})
}
pub fn parse_spec(symbol_table: &SymbolTable, size: &str) -> TractResult<InferenceFact> {
if size.is_empty() {
return Ok(InferenceFact::default());
}
parse_coma_spec(symbol_table, size)
}
pub fn parse_coma_spec(symbol_table: &SymbolTable, size: &str) -> TractResult<InferenceFact> {
let splits = size.split(',').collect::<Vec<_>>();
if splits.is_empty() {
// Hide '{' in this error message from the formatting machinery in bail macro
let msg = "The <size> argument should be formatted as {size},{...},{type}.";
bail!(msg);
}
let last = splits.last().unwrap();
let (datum_type, shape) = if let Ok(dt) = parse_dt(last) {
(Some(dt), &splits[0..splits.len() - 1])
} else {
(None, &*splits)
};
let shape = ShapeFactoid::closed(
shape
.iter()
.map(|&s| {
Ok(if s == "_" {
GenericFactoid::Any
} else {
GenericFactoid::Only(parse_tdim(symbol_table, s)?)
})
})
.collect::<TractResult<TVec<DimFact>>>()?,
);
if let Some(dt) = datum_type {
Ok(InferenceFact::dt_shape(dt, shape))
} else {
Ok(InferenceFact::shape(shape))
}
}
fn parse_values<T: Datum + FromStr>(shape: &[usize], it: Vec<&str>) -> TractResult<Tensor> {
let values = it
.into_iter()
.map(|v| v.parse::<T>().map_err(|_| format_err!("Failed to parse {}", v)))
.collect::<TractResult<Vec<T>>>()?;
Ok(tract_ndarray::Array::from_shape_vec(shape, values)?.into())
}
fn tensor_for_text_data(
symbol_table: &SymbolTable,
_filename: &str,
mut reader: impl Read,
) -> TractResult<Tensor> {
let mut data = String::new();
reader.read_to_string(&mut data)?;
let mut lines = data.lines();
let proto = parse_spec(symbol_table, lines.next().context("Empty data file")?)?;
let shape = proto.shape.concretize().unwrap();
let values = lines.flat_map(|l| l.split_whitespace()).collect::<Vec<&str>>();
// We know there is at most one streaming dimension, so we can deduce the
// missing value with a simple division.
let product: usize = shape.iter().map(|o| o.to_usize().unwrap_or(1)).product();
let missing = values.len() / product;
let shape: Vec<_> = shape.iter().map(|d| d.to_usize().unwrap_or(missing)).collect();
dispatch_numbers!(parse_values(proto.datum_type.concretize().unwrap())(&*shape, values))
}
/// Parses the `data` command-line argument.
pub fn for_data(
symbol_table: &SymbolTable,
filename: &str,
reader: impl Read + std::io::Seek,
) -> TractResult<(Option<String>, InferenceFact)> {
#[allow(unused_imports)]
use std::convert::TryFrom;
if filename.ends_with(".pb") {
#[cfg(feature = "onnx")]
{
/*
let file =
fs::File::open(filename).with_context(|| format!("Can't open {filename:?}"))?;
*/
let proto = ::tract_onnx::tensor::proto_from_reader(reader)?;
Ok((
Some(proto.name.to_string()).filter(|s| !s.is_empty()),
Tensor::try_from(proto)?.into(),
))
}
#[cfg(not(feature = "onnx"))]
{
panic!("Loading tensor from protobuf requires onnx features");
}
} else if filename.contains(".npz:") {
let mut tokens = filename.split(':');
let (_filename, inner) = (tokens.next().unwrap(), tokens.next().unwrap());
let mut npz = ndarray_npy::NpzReader::new(reader)?;
Ok((None, for_npz(&mut npz, inner)?.into()))
} else {
Ok((None, tensor_for_text_data(symbol_table, filename, reader)?.into()))
}
}
pub fn for_npz(
npz: &mut ndarray_npy::NpzReader<impl Read + Seek>,
name: &str,
) -> TractResult<Tensor> {
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i8>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i16>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u8>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u16>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u32>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u64>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<bool>, tract_ndarray::IxDyn>(name) {
return Ok(t.into_tensor());
}
bail!("Can not extract tensor from {}", name);
}
pub fn for_string(
symbol_table: &SymbolTable,
value: &str,
) -> TractResult<(Option<String>, InferenceFact)> {
let (name, value) = if value.contains(':') {
let mut splits = value.split(':');
(Some(splits.next().unwrap().to_string()), splits.next().unwrap())
} else {
(None, value)
};
if value.contains('=') {
let mut split = value.split('=');
let spec = parse_spec(symbol_table, split.next().unwrap())?;
let value = split.next().unwrap().split(',');
let dt =
spec.datum_type.concretize().context("Must specify type when giving tensor value")?;
let shape = spec
.shape
.as_concrete_finite()?
.context("Must specify concrete shape when giving tensor value")?;
let tensor = dispatch_numbers!(parse_values(dt)(&*shape, value.collect()))?;
Ok((name, tensor.into()))
} else {
Ok((name, parse_spec(symbol_table, value)?))
}
}
lazy_static::lazy_static! {
static ref WARNING_ONCE: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn warn_once(msg: String) {
if WARNING_ONCE.lock().unwrap().insert(msg.clone()) {
warn!("{}", msg);
}
}
pub struct RunParams {
pub tensors_values: TensorsValues,
pub allow_random_input: bool,
pub allow_float_casts: bool,
}
pub fn retrieve_or_make_inputs(
tract: &dyn Model,
params: &RunParams,
) -> TractResult<Vec<TVec<TValue>>> {
let mut tmp: TVec<Vec<TValue>> = tvec![];
for (ix, input) in tract.input_outlets().iter().enumerate() {
let name = tract.node_name(input.node);
let fact = tract.outlet_typedfact(*input)?;
if let Some(mut value) = params.tensors_values.by_name(name).and_then(|t| t.values.clone())
{
if !value[0].datum_type().is_quantized()
&& fact.datum_type.is_quantized()
&& value[0].datum_type() == fact.datum_type.unquantized()
{
value = value
.iter()
.map(|v| {
let mut v = v.clone().into_tensor();
unsafe { v.set_datum_type(fact.datum_type) };
v.into()
})
.collect();
}
if TypedFact::from(&*value[0]).compatible_with(&fact) {
info!("Using fixed input for input called {} ({} turn(s))", name, value.len());
tmp.push(value.iter().map(|t| t.clone().into_tensor().into()).collect())
} else if fact.datum_type == f16::datum_type()
&& value[0].datum_type() == f32::datum_type()
&& params.allow_float_casts
{
tmp.push(
value.iter().map(|t| t.cast_to::<f16>().unwrap().into_owned().into()).collect(),
)
} else if value.len() == 1 && tract.properties().contains_key("pulse.delay") {
let value = &value[0];
let input_pulse_axis = tract
.properties()
.get("pulse.input_axes")
.context("Expect pulse.input_axes property")?
.cast_to::<i64>()?
.as_slice::<i64>()?[ix] as usize;
let input_pulse = fact.shape.get(input_pulse_axis).unwrap().to_usize().unwrap();
let input_len = value.shape()[input_pulse_axis];
// how many pulses do we need to push full result out ?
// guess by looking at len and delay of the first output
let output_pulse_axis = tract
.properties()
.get("pulse.output_axes")
.context("Expect pulse.output_axes property")?
.cast_to::<i64>()?
.as_slice::<i64>()?[0] as usize;
let output_fact = tract.outlet_typedfact(tract.output_outlets()[0])?;
let output_pulse =
output_fact.shape.get(output_pulse_axis).unwrap().to_usize().unwrap();
let output_len = input_len * output_pulse / input_pulse;
let output_delay = tract.properties()["pulse.delay"].as_slice::<i64>()?[0] as usize;
let last_frame = output_len + output_delay;
let needed_pulses = last_frame.divceil(output_pulse);
let mut values = vec![];
for ix in 0..needed_pulses {
let mut t =
Tensor::zero_dt(fact.datum_type, fact.shape.as_concrete().unwrap())?;
let start = ix * input_pulse;
let end = (start + input_pulse).min(input_len);
if end > start {
t.assign_slice(0..end - start, value, start..end, input_pulse_axis)?;
}
values.push(t.into());
}
info!(
"Generated {} pulses of shape {:?} for input {}.",
needed_pulses, fact.shape, ix
);
tmp.push(values);
} else {
bail!("For input {}, can not reconcile model input fact {:?} with provided input {:?}", name, fact, value[0]);
};
} else if params.allow_random_input {
let fact = tract.outlet_typedfact(*input)?;
warn_once(format!("Using random input for input called {name:?}: {fact:?}"));
let tv = params
.tensors_values
.by_name(name)
.or_else(|| params.tensors_values.by_input_ix(ix));
tmp.push(vec![crate::tensor::tensor_for_fact(&fact, None, tv)?.into()]);
} else {
bail!("Unmatched tensor {}. Fix the input or use \"--allow-random-input\" if this was intended", name);
}
}
Ok((0..tmp[0].len()).map(|turn| tmp.iter().map(|t| t[turn].clone()).collect()).collect())
}
fn make_inputs(values: &[impl std::borrow::Borrow<TypedFact>]) -> TractResult<TVec<TValue>> {
values.iter().map(|v| tensor_for_fact(v.borrow(), None, None).map(|t| t.into())).collect()
}
pub fn make_inputs_for_model(model: &dyn Model) -> TractResult<TVec<TValue>> |
#[allow(unused_variables)]
pub fn tensor_for_fact(
fact: &TypedFact,
streaming_dim: Option<usize>,
tv: Option<&TensorValues>,
) -> TractResult<Tensor> {
if let Some(value) = &fact.konst {
return Ok(value.clone().into_tensor());
}
#[cfg(pulse)]
{
if fact.shape.stream_info().is_some() {
use tract_pulse::fact::StreamFact;
use tract_pulse::internal::stream_symbol;
let s = stream_symbol();
if let Some(dim) = streaming_dim {
let shape = fact
.shape
.iter()
.map(|d| {
d.eval(&SymbolValues::default().with(s, dim as i64)).to_usize().unwrap()
})
.collect::<TVec<_>>();
return Ok(random(&shape, fact.datum_type));
} else {
bail!("random tensor requires a streaming dim")
}
}
}
Ok(random(
fact.shape
.as_concrete()
.with_context(|| format!("Expected concrete shape, found: {fact:?}"))?,
fact.datum_type,
tv,
))
}
/// Generates a random tensor of a given size and type.
pub fn random(sizes: &[usize], datum_type: DatumType, tv: Option<&TensorValues>) -> Tensor {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(21242);
let mut tensor = Tensor::zero::<f32>(sizes).unwrap();
let slice = tensor.as_slice_mut::<f32>().unwrap();
if let Some(range) = tv.and_then(|tv| tv.random_range.as_ref()) {
slice.iter_mut().for_each(|x| *x = rng.gen_range(range.clone()))
} else {
slice.iter_mut().for_each(|x| *x = rng.gen())
};
tensor.cast_to_dt(datum_type).unwrap().into_owned()
}
| {
make_inputs(
&model
.input_outlets()
.iter()
.map(|&t| model.outlet_typedfact(t))
.collect::<TractResult<Vec<TypedFact>>>()?,
)
} | identifier_body |
setup.py | from numpy.distutils.core import setup, Extension
import distutils.sysconfig
import sys
import os
import os.path
import re
# Get BUILDTYPE for checking if this is intel-mac
buildtype = os.getenv("BUILDTYPE")
if buildtype:
buildtype = buildtype.strip()
if not buildtype:
raise ValueError("Environment variable BUILDTYPE is not defined")
# (Non-standard) Directories containing .h include files
incdir_list = [ "pyfermod",
os.path.join("fer", "common"),
os.path.join("fmt", "cmn"),
os.path.join("fer", "ef_utility"),
os.path.join("fer", "grdel"), ]
bind_and_hide_internal = os.getenv("BIND_AND_HIDE_INTERNAL")
if bind_and_hide_internal:
bind_and_hide_internal = bind_and_hide_internal.strip()
# NETCDF_LIBDIR must be given, either for the static library or the shared-object library
netcdf_libdir = os.getenv("NETCDF_LIBDIR")
if netcdf_libdir:
netcdf_libdir = netcdf_libdir.strip()
if not netcdf_libdir:
raise ValueError("Environment variable NETCDF_LIBDIR is not defined") |
# HDF5_LIBDIR is only given if the HDF5 and NetCDF libraries are to be statically linked
hdf5_libdir = os.getenv("HDF5_LIBDIR")
if hdf5_libdir:
hdf5_libdir = hdf5_libdir.strip()
# SZ_LIBDIR is the location of the SZ library to be linked in
sz_libdir = os.getenv("SZ_LIBDIR")
if sz_libdir:
sz_libdir = sz_libdir.strip()
# CAIRO_LIBDIR is only given if the cairo library is to be statically linked in
cairo_libdir = os.getenv("CAIRO_LIBDIR")
if cairo_libdir:
cairo_libdir = cairo_libdir.strip()
# PIXMAN_LIBDIR is only given if the pixman-1 library is to be statically linked in
pixman_libdir = os.getenv("PIXMAN_LIBDIR")
if pixman_libdir:
pixman_libdir = pixman_libdir.strip()
# PANGO_LIBDIR gives a non-standard location of the pango libraries
pango_libdir = os.getenv("PANGO_LIBDIR")
if pango_libdir:
pango_libdir = pango_libdir.strip()
# GFORTRAN_LIB gives a non-standard full-path location of the gfortran library to be used
# in the linking step. If not given or empty, the -lgfortran flag is used in the linking step.
gfortran_lib = os.getenv("GFORTRAN_LIB")
if gfortran_lib:
gfortran_lib = gfortran_lib.strip()
# The location of libpythonx.x.so, in case it is not in a standard location
python_libdir = os.path.split( distutils.sysconfig.get_python_lib(standard_lib=True) )[0]
# The list of additional directories to examine for libraries
libdir_list = [ "lib", netcdf_libdir, ]
if hdf5_libdir:
libdir_list.append(hdf5_libdir)
if sz_libdir:
libdir_list.append(sz_libdir)
if cairo_libdir:
libdir_list.append(cairo_libdir)
if pixman_libdir:
libdir_list.append(pixman_libdir)
if pango_libdir:
libdir_list.append(pango_libdir)
libdir_list.append(python_libdir)
# Get the list of ferret static libraries
# Stripping off the "lib" prefix and the ".a" suffix
fer_lib_list = [ ]
for libname in os.listdir("lib"):
if (libname[:3] == "lib") and (libname[-2:] == ".a"):
fer_lib_list.append(libname[3:-2])
# Create the list of libraries to link
lib_list = fer_lib_list[:]
if buildtype != "intel-mac":
# fer_lib_list is included multiple times to resolve interdependencies
lib_list.extend(fer_lib_list)
lib_list.extend(fer_lib_list)
lib_list.extend(fer_lib_list)
lib_list.extend(fer_lib_list)
# Linking in the rest of the system libraries were moved to addn_link_flags
# in order to make sure the appropriate netcdff, netcdf, hdf5_hl, hdf5, and
# cairo libraries are used.
addn_link_args = [ ]
# Link to the appropriate netcdf libraries.
# The hdf5 libraries are only used to resolve netcdf library function
# calls when statically linking in the netcdf libraries.
if hdf5_libdir:
netcdff_lib = os.path.join(netcdf_libdir, "libnetcdff.a")
addn_link_args.append(netcdff_lib)
netcdf_lib = os.path.join(netcdf_libdir, "libnetcdf.a")
addn_link_args.append(netcdf_lib)
hdf5_hl_lib = os.path.join(hdf5_libdir, "libhdf5_hl.a")
addn_link_args.append(hdf5_hl_lib)
hdf5_lib = os.path.join(hdf5_libdir, "libhdf5.a")
addn_link_args.append(hdf5_lib)
else:
addn_link_args.extend([ "-lnetcdff", "-lnetcdf" ])
# Link to the cairo library and the libraries it requires.
if cairo_libdir:
cairo_lib = os.path.join(cairo_libdir, "libcairo.a")
addn_link_args.append(cairo_lib);
if pixman_libdir:
pixman_lib = os.path.join(pixman_libdir, "libpixman-1.a")
else:
pixman_lib = "-lpixman-1"
addn_link_args.extend([ pixman_lib, "-lfreetype", "-lfontconfig", "-lpng" ])
else:
addn_link_args.append("-lcairo")
# The Pango-Cairo text-rendering libraries
addn_link_args.append("-lpangocairo-1.0")
# Link in the appropriate system libraries
if hdf5_libdir:
addn_link_args.append("-lcurl")
if sz_libdir:
addn_link_args.append("-lsz")
if gfortran_lib:
addn_link_args.append(gfortran_lib)
else:
addn_link_args.append("-lgfortran")
addn_link_args.extend([ "-lz", "-ldl", "-lm", "-fPIC", ])
if bind_and_hide_internal:
# Bind symbols and function symbols to any internal definitions
# and do not make any of the symbols or function symbols defined
# in any libraries externally visible (mainly for cairo and pixman).
# Those in the object files (including those from pyfermod and
# fer/ef_utility) will still be visible.
addn_link_args.extend([ "-Wl,-Bsymbolic", "-Wl,--exclude-libs,ALL"])
if os.uname()[0] == 'Darwin':
# For Mac OSX, leave room for library path renames
addn_link_args.append("-Wl,-headerpad_max_install_names")
# Get the list of C source files in pyfermod
src_list = [ ]
for srcname in os.listdir("pyfermod"):
if srcname[-2:] == ".c":
src_list.append(os.path.join("pyfermod", srcname))
# Get the list of additional objects to be linked in
# edited to remove reference to long-disused giu-fakes.o
addnobjs_list = [ ]
dirname = os.path.join("fer", "ef_utility")
for srcname in os.listdir(dirname):
if srcname[-2:] == ".o":
addnobjs_list.append(os.path.join(dirname, srcname))
dirname = os.path.join("fer", "special")
for srcname in ( "FerMem_routines.o", "fakes3.o", "ferret_dispatch.o", "linux_routines.o", ):
addnobjs_list.append(os.path.join(dirname, srcname))
for srcname in os.listdir(dirname):
if (srcname[0] == 'x') and (srcname[-7:] == "_data.o"):
addnobjs_list.append(os.path.join(dirname, srcname))
if bind_and_hide_internal:
# Duplicate objects in libraries to make them externally visible (e.g., for las
# external functions) if the '--exclude-libs ALL' flag was passed to the linker.
dirname = os.path.join("fmt", "src")
addnobjs_list.append(os.path.join(dirname, "tm_lenstr.o"));
addnobjs_list.append(os.path.join(dirname, "tm_fmt.o"));
addnobjs_list.append(os.path.join(dirname, "tm_lefint.o"));
# Create the pyferret.libpyferret Extension
ext_mods = [ Extension("pyferret.libpyferret", include_dirs = incdir_list,
sources = src_list,
extra_objects = addnobjs_list,
library_dirs = libdir_list,
libraries = lib_list,
extra_link_args = addn_link_args), ]
pyferret_version = None
xrev_name = os.path.join("fer", "dat", "xrevision_data.F")
xrev_file = open(xrev_name)
try:
pat = re.compile('\\s+DATA\\s+revision_level\\s*/\\s*(\\S+)\\s*/\\s*', flags=re.IGNORECASE)
for datlin in xrev_file:
mat = re.match(pat, datlin)
if mat:
pyferret_version = mat.group(1)
break
finally:
xrev_file.close()
if not pyferret_version:
raise ValueError('Unable to find the version number in ' + xrev_name)
# Configure the setup
setup(name = "pyferret",
version = pyferret_version,
description = "Python module providing Ferret functionality",
long_description = "Python module providing Ferret functionality",
author = "Karl M. Smith",
author_email = "karl.smith@noaa.gov",
url = "http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret",
license = "Public Domain",
requires = [ "numpy", ],
packages = [ "pyferret", "pyferret.eofanal", "pyferret.fershp",
"pyferret.graphbind", "pyferret.regrid", "pyferret.stats", ],
package_dir = { "pyferret":"pyfermod", },
ext_modules = ext_mods)
setup(name = "pipedviewer",
version = pyferret_version,
description = "Graphics viewer controlled by a command pipe",
long_description = "A graphics viewer application that receives its " \
"drawing and other commands primarily from another " \
"application through a pipe. A limited number of " \
"commands are provided by the viewer itself to allow " \
"saving and some manipulation of the displayed scene. " \
"The controlling application, however, will be unaware " \
"of these modifications made to the scene.",
author = "Karl M. Smith",
author_email = "karl.smith@noaa.gov",
url = "http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret",
license = "Public Domain",
requires = [ "multiprocessing", ],
packages = [ "pipedviewer", ],
package_dir = { "pipedviewer":"pviewmod", })
setup(name = "gcircle",
version = pyferret_version,
description = "Module of functions involving great circles with " \
"points given in longitudes and latitudes (thus " \
"assuming a spheroid model of the earth).",
long_description = "Module of functions involving great circles with " \
"points given in longitudes and latitudes (thus " \
"assuming a spheroid model of the earth).",
author = "Karl M. Smith",
author_email = "karl.smith@noaa.gov",
url = "http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret",
license = "Public Domain",
requires = [ "numpy", ],
py_modules = [ "gcircle", ]) | random_line_split | |
setup.py | from numpy.distutils.core import setup, Extension
import distutils.sysconfig
import sys
import os
import os.path
import re
# Get BUILDTYPE for checking if this is intel-mac
buildtype = os.getenv("BUILDTYPE")
if buildtype:
buildtype = buildtype.strip()
if not buildtype:
raise ValueError("Environment variable BUILDTYPE is not defined")
# (Non-standard) Directories containing .h include files
incdir_list = [ "pyfermod",
os.path.join("fer", "common"),
os.path.join("fmt", "cmn"),
os.path.join("fer", "ef_utility"),
os.path.join("fer", "grdel"), ]
bind_and_hide_internal = os.getenv("BIND_AND_HIDE_INTERNAL")
if bind_and_hide_internal:
bind_and_hide_internal = bind_and_hide_internal.strip()
# NETCDF_LIBDIR must be given, either for the static library or the shared-object library
netcdf_libdir = os.getenv("NETCDF_LIBDIR")
if netcdf_libdir:
netcdf_libdir = netcdf_libdir.strip()
if not netcdf_libdir:
raise ValueError("Environment variable NETCDF_LIBDIR is not defined")
# HDF5_LIBDIR is only given if the HDF5 and NetCDF libraries are to be statically linked
hdf5_libdir = os.getenv("HDF5_LIBDIR")
if hdf5_libdir:
hdf5_libdir = hdf5_libdir.strip()
# SZ_LIBDIR is the location of the SZ library to be linked in
sz_libdir = os.getenv("SZ_LIBDIR")
if sz_libdir:
sz_libdir = sz_libdir.strip()
# CAIRO_LIBDIR is only given if the cairo library is to be statically linked in
cairo_libdir = os.getenv("CAIRO_LIBDIR")
if cairo_libdir:
cairo_libdir = cairo_libdir.strip()
# PIXMAN_LIBDIR is only given if the pixman-1 library is to be statically linked in
pixman_libdir = os.getenv("PIXMAN_LIBDIR")
if pixman_libdir:
pixman_libdir = pixman_libdir.strip()
# PANGO_LIBDIR gives a non-standard location of the pango libraries
pango_libdir = os.getenv("PANGO_LIBDIR")
if pango_libdir:
pango_libdir = pango_libdir.strip()
# GFORTRAN_LIB gives a non-standard full-path location of the gfortran library to be used
# in the linking step. If not given or empty, the -lgfortran flag is used in the linking step.
gfortran_lib = os.getenv("GFORTRAN_LIB")
if gfortran_lib:
gfortran_lib = gfortran_lib.strip()
# The location of libpythonx.x.so, in case it is not in a standard location
python_libdir = os.path.split( distutils.sysconfig.get_python_lib(standard_lib=True) )[0]
# The list of additional directories to examine for libraries
libdir_list = [ "lib", netcdf_libdir, ]
if hdf5_libdir:
libdir_list.append(hdf5_libdir)
if sz_libdir:
libdir_list.append(sz_libdir)
if cairo_libdir:
libdir_list.append(cairo_libdir)
if pixman_libdir:
libdir_list.append(pixman_libdir)
if pango_libdir:
libdir_list.append(pango_libdir)
libdir_list.append(python_libdir)
# Get the list of ferret static libraries
# Stripping off the "lib" prefix and the ".a" suffix
fer_lib_list = [ ]
for libname in os.listdir("lib"):
if (libname[:3] == "lib") and (libname[-2:] == ".a"):
fer_lib_list.append(libname[3:-2])
# Create the list of libraries to link
lib_list = fer_lib_list[:]
if buildtype != "intel-mac":
# fer_lib_list is included multiple times to resolve interdependencies
|
# Linking in the rest of the system libraries were moved to addn_link_flags
# in order to make sure the appropriate netcdff, netcdf, hdf5_hl, hdf5, and
# cairo libraries are used.
addn_link_args = [ ]
# Link to the appropriate netcdf libraries.
# The hdf5 libraries are only used to resolve netcdf library function
# calls when statically linking in the netcdf libraries.
if hdf5_libdir:
netcdff_lib = os.path.join(netcdf_libdir, "libnetcdff.a")
addn_link_args.append(netcdff_lib)
netcdf_lib = os.path.join(netcdf_libdir, "libnetcdf.a")
addn_link_args.append(netcdf_lib)
hdf5_hl_lib = os.path.join(hdf5_libdir, "libhdf5_hl.a")
addn_link_args.append(hdf5_hl_lib)
hdf5_lib = os.path.join(hdf5_libdir, "libhdf5.a")
addn_link_args.append(hdf5_lib)
else:
addn_link_args.extend([ "-lnetcdff", "-lnetcdf" ])
# Link to the cairo library and the libraries it requires.
if cairo_libdir:
cairo_lib = os.path.join(cairo_libdir, "libcairo.a")
addn_link_args.append(cairo_lib);
if pixman_libdir:
pixman_lib = os.path.join(pixman_libdir, "libpixman-1.a")
else:
pixman_lib = "-lpixman-1"
addn_link_args.extend([ pixman_lib, "-lfreetype", "-lfontconfig", "-lpng" ])
else:
addn_link_args.append("-lcairo")
# The Pango-Cairo text-rendering libraries
addn_link_args.append("-lpangocairo-1.0")
# Link in the appropriate system libraries
if hdf5_libdir:
addn_link_args.append("-lcurl")
if sz_libdir:
addn_link_args.append("-lsz")
if gfortran_lib:
addn_link_args.append(gfortran_lib)
else:
addn_link_args.append("-lgfortran")
addn_link_args.extend([ "-lz", "-ldl", "-lm", "-fPIC", ])
if bind_and_hide_internal:
# Bind symbols and function symbols to any internal definitions
# and do not make any of the symbols or function symbols defined
# in any libraries externally visible (mainly for cairo and pixman).
# Those in the object files (including those from pyfermod and
# fer/ef_utility) will still be visible.
addn_link_args.extend([ "-Wl,-Bsymbolic", "-Wl,--exclude-libs,ALL"])
if os.uname()[0] == 'Darwin':
# For Mac OSX, leave room for library path renames
addn_link_args.append("-Wl,-headerpad_max_install_names")
# Get the list of C source files in pyfermod
src_list = [ ]
for srcname in os.listdir("pyfermod"):
if srcname[-2:] == ".c":
src_list.append(os.path.join("pyfermod", srcname))
# Get the list of additional objects to be linked in
# edited to remove reference to long-disused giu-fakes.o
addnobjs_list = [ ]
dirname = os.path.join("fer", "ef_utility")
for srcname in os.listdir(dirname):
if srcname[-2:] == ".o":
addnobjs_list.append(os.path.join(dirname, srcname))
dirname = os.path.join("fer", "special")
for srcname in ( "FerMem_routines.o", "fakes3.o", "ferret_dispatch.o", "linux_routines.o", ):
addnobjs_list.append(os.path.join(dirname, srcname))
for srcname in os.listdir(dirname):
if (srcname[0] == 'x') and (srcname[-7:] == "_data.o"):
addnobjs_list.append(os.path.join(dirname, srcname))
if bind_and_hide_internal:
# Duplicate objects in libraries to make them externally visible (e.g., for las
# external functions) if the '--exclude-libs ALL' flag was passed to the linker.
dirname = os.path.join("fmt", "src")
addnobjs_list.append(os.path.join(dirname, "tm_lenstr.o"));
addnobjs_list.append(os.path.join(dirname, "tm_fmt.o"));
addnobjs_list.append(os.path.join(dirname, "tm_lefint.o"));
# Create the pyferret.libpyferret Extension
ext_mods = [ Extension("pyferret.libpyferret", include_dirs = incdir_list,
sources = src_list,
extra_objects = addnobjs_list,
library_dirs = libdir_list,
libraries = lib_list,
extra_link_args = addn_link_args), ]
pyferret_version = None
xrev_name = os.path.join("fer", "dat", "xrevision_data.F")
xrev_file = open(xrev_name)
try:
pat = re.compile('\\s+DATA\\s+revision_level\\s*/\\s*(\\S+)\\s*/\\s*', flags=re.IGNORECASE)
for datlin in xrev_file:
mat = re.match(pat, datlin)
if mat:
pyferret_version = mat.group(1)
break
finally:
xrev_file.close()
if not pyferret_version:
raise ValueError('Unable to find the version number in ' + xrev_name)
# Configure the setup
setup(name = "pyferret",
version = pyferret_version,
description = "Python module providing Ferret functionality",
long_description = "Python module providing Ferret functionality",
author = "Karl M. Smith",
author_email = "karl.smith@noaa.gov",
url = "http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret",
license = "Public Domain",
requires = [ "numpy", ],
packages = [ "pyferret", "pyferret.eofanal", "pyferret.fershp",
"pyferret.graphbind", "pyferret.regrid", "pyferret.stats", ],
package_dir = { "pyferret":"pyfermod", },
ext_modules = ext_mods)
setup(name = "pipedviewer",
version = pyferret_version,
description = "Graphics viewer controlled by a command pipe",
long_description = "A graphics viewer application that receives its " \
"drawing and other commands primarily from another " \
"application through a pipe. A limited number of " \
"commands are provided by the viewer itself to allow " \
"saving and some manipulation of the displayed scene. " \
"The controlling application, however, will be unaware " \
"of these modifications made to the scene.",
author = "Karl M. Smith",
author_email = "karl.smith@noaa.gov",
url = "http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret",
license = "Public Domain",
requires = [ "multiprocessing", ],
packages = [ "pipedviewer", ],
package_dir = { "pipedviewer":"pviewmod", })
setup(name = "gcircle",
version = pyferret_version,
description = "Module of functions involving great circles with " \
"points given in longitudes and latitudes (thus " \
"assuming a spheroid model of the earth).",
long_description = "Module of functions involving great circles with " \
"points given in longitudes and latitudes (thus " \
"assuming a spheroid model of the earth).",
author = "Karl M. Smith",
author_email = "karl.smith@noaa.gov",
url = "http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret",
license = "Public Domain",
requires = [ "numpy", ],
py_modules = [ "gcircle", ])
| lib_list.extend(fer_lib_list)
lib_list.extend(fer_lib_list)
lib_list.extend(fer_lib_list)
lib_list.extend(fer_lib_list) | conditional_block |
viewer.ts | /******************************************************************************
*
* Copyright (c) 2018, the Perspective Authors.
*
* This file is part of the Perspective library, distributed under the terms
* of the Apache License 2.0. The full license can be found in the LICENSE
* file.
*
*/
import type * as perspective from "@finos/perspective";
import {
PerspectiveViewerElement,
register_plugin,
get_exprtk_commands,
} from "@finos/perspective-viewer/dist/pkg/perspective_viewer.js";
import {WASM_MODULE} from "./init";
export type PerspectiveViewerConfig = perspective.ViewConfig & {
plugin?: string;
settings?: boolean;
plugin_config?: any;
};
/**
* The Custom Elements implementation for `<perspective-viewer>`, as well at its
* API. `PerspectiveViewerElement` should not be constructed directly (like its
* parent class `HTMLElement`); instead, use `document.createElement()` or
* declare your `<perspective-viewer>` element in HTML. Once instantiated,
* `<perspective-viewer>` works just like a standard `HTMLElement`, with a few
* extra perspective-specific methods.
*
* @example
* ```javascript
* const viewer = document.createElement("perspective-viewer");
* ```
* @example
* ```javascript
* document.body.innerHTML = `
* <perspective-viewer id="viewer"></perspective-viewer>
* `;
* const viewer = document.body.querySelector("#viewer");
* ```
* @noInheritDoc
*/
export class HTMLPerspectiveViewerElement extends HTMLElement {
private instance: PerspectiveViewerElement;
/**
* Should not be called directly (will throw `TypeError: Illegal
* constructor`).
*
* @ignore
*/
constructor() {
super();
this.load_wasm();
}
private async load_wasm(): Promise<void> {
await WASM_MODULE;
if (!this.instance) {
this.instance = new PerspectiveViewerElement(this);
}
}
/**
* Part of the Custom Elements API. This method is called by the browser,
* and should not be called directly by applications.
*
* @ignore
*/
async connectedCallback(): Promise<void> {
await this.load_wasm();
this.instance.connected_callback();
}
/**
* Register a new plugin via its custom element name. This method is called
* automatically as a side effect of importing a plugin module, so this
* method should only typically be called by plugin authors.
*
* @category Plugin
* @param name The `name` of the custom element to register, as supplied
* to the `customElements.define(name)` method.
* @example
* ```javascript
* customElements.get("perspective-viewer").registerPlugin("my-plugin");
* ```
*/
static async registerPlugin(name: string): Promise<void> {
await WASM_MODULE;
register_plugin(name);
}
/**
* Load a `perspective.Table`. If `load` or `update` have already been
* called on this element, its internal `perspective.Table` will _not_ be
* deleted, but it will bed de-referenced by this `<perspective-viewer>`.
*
* @category Data
* @param data A `Promise` which resolves to the `perspective.Table`
* @returns {Promise<void>} A promise which resolves once the data is
* loaded, a `perspective.View` has been created, and the active plugin has
* rendered.
* @example <caption>Load perspective.table</caption>
* ```javascript
* const my_viewer = document.getElementById('#my_viewer');
* const tbl = perspective.table("x,y\n1,a\n2,b");
* my_viewer.load(tbl);
* ```
* @example <caption>Load Promise<perspective.table></caption>
* ```javascript
* const my_viewer = document.getElementById('#my_viewer');
* const tbl = perspective.table("x,y\n1,a\n2,b");
* my_viewer.load(tbl);
* ```
*/
async load(
table: Promise<perspective.Table> | perspective.Table
): Promise<void> {
await this.load_wasm();
await this.instance.js_load(Promise.resolve(table));
}
/**
* Redraw this `<perspective-viewer>` and plugin when its dimensions or
* visibility has been updated. By default, `<perspective-viewer>` will
* auto-size when its own dimensions change, so this method need not be
* called; when disabled via `setAutoSize(false)` however, this method
* _must_ be called, and will not respond to dimension or style changes to
* its parent container otherwise. `notifyResize()` does not recalculate
* the current `View`, but all plugins will re-request the data window
* (which itself may be smaller or larger due to resize).
*
* @category Util
* @param force Whether to re-render, even if the dimenions have not
* changed. When set to `false` and auto-size is enabled (the defaults),
* calling this method will automatically disable auto-size.
* @returns A `Promise<void>` which resolves when this resize event has
* finished rendering.
* @example <caption>Bind `notfyResize()` to browser dimensions</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* viewer.setAutoSize(false);
* window.addEventListener("resize", () => viewer.notifyResize());
* ```
*/
async notifyResize(force = false): Promise<void> {
await this.load_wasm();
await this.instance.js_resize(force);
}
/**
* Determines the auto-size behavior. When `true` (the default), this
* element will re-render itself whenever its own dimensions change,
* utilizing a `ResizeObserver`; when `false`, you must explicitly call
* `notifyResize()` when the element's dimensions have changed.
*
* @category Util
* @param autosize Whether to re-render when this element's dimensions
* change.
* @example <caption>Disable auto-size</caption>
* ```javascript
* await viewer.setAutoSize(false);
* ```
*/
async setAutoSize(autosize = true): Promise<void> {
await this.load_wasm();
await this.instance.js_set_auto_size(autosize);
}
/**
* Returns the `perspective.Table()` which was supplied to `load()`
*
* @category Data
* @param wait_for_table Whether to await `load()` if it has not yet been
* invoked, or fail immediately.
* @returns A `Promise` which resolves to a `perspective.Table`
* @example <caption>Share a `Table`</caption>
* ```javascript
* const viewers = document.querySelectorAll("perspective-viewer");
* const [viewer1, viewer2] = Array.from(viewers);
* const table = await viewer1.getTable();
* await viewer2.load(table);
* ```
*/
async getTable(wait_for_table?: boolean): Promise<perspective.Table> {
await this.load_wasm();
const table = await this.instance.js_get_table(!!wait_for_table);
return table;
}
/**
* Returns the underlying `perspective.View` currently configured for this
* `<perspective-viewer>`. Because ownership of the `perspective.View` is
* mainainted by the `<perspective-viewer>` it was created by, this `View`
* may become deleted (invalidated by calling `delete()`) at any time -
* specifically, it will be deleted whenever the `PerspectiveViewConfig`
* changes. Because of this, when using this API, prefer calling
* `getView()` repeatedly over caching the returned `perspective.View`,
* especially in `async` contexts.
*
* @category Data
* @returns A `Promise` which ressolves to a `perspective.View`.
* @example <caption>Collapse grid to root</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const view = await viewer.getView();
* await view.set_depth(0);
* ```
*/
async getView(): Promise<perspective.View> {
await this.load_wasm();
const view = await this.instance.js_get_view();
return view;
}
/**
* Restore this element to a state as generated by a reciprocal call to
* `save`. In `json` (default) format, `PerspectiveViewerConfig`'s fields
* have specific semantics:
*
* - When a key is missing, this field is ignored; `<perspective-viewer>`
* will maintain whatever settings for this field is currently applied.
* - When the key is supplied, but the value is `undefined`, the field is
* reset to its default value for this current `View`, i.e. the state it
* would be in after `load()` resolves.
* - When the key is defined to a value, the value is applied for this
* field.
*
* This behavior is convenient for explicitly controlling current vs desired
* UI state in a single request, but it does make it a bit inconvenient to
* use `restore()` to reset a `<perspective-viewer>` to default as you must
* do so explicitly for each key; for this case, use `reset()` instead of
* restore.
*
* As noted in `save()`, this configuration state does not include the
* `Table` or its `Schema`. In order for `restore()` to work correctly, it
* must be called on a `<perspective-viewer>` that has a `Table already
* `load()`-ed, with the same (or a type-compatible superset) `Schema`.
* It does not need have the same rows, or even be populated.
*
* @category Persistence
* @param config returned by `save()`. This can be any format returned by
* `save()`; the specific deserialization is chosen by `typeof config`.
* @returns A promise which resolves when the changes have been applied and
* rendered.
* @example <caption>Restore a viewer from `localStorage`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const token = localStorage.getItem("viewer_state");
* await viewer.restore(token);
* ```
*/
async restore(
config: PerspectiveViewerConfig | string | ArrayBuffer
): Promise<void> {
await this.load_wasm();
await this.instance.js_restore(config);
}
/**
* Serialize this element's attribute/interaction state, but _not_ the
* `perspective.Table` or its `Schema`. `save()` is designed to be used in
* conjunction with `restore()` to persist user settings and bookmarks, but
* the `PerspectiveViewerConfig` object returned in `json` format can also
* be written by hand quite easily, which is useful for authoring
* pre-conceived configs.
*
* @category Persistence
* @param format The serialization format - `json` (JavaScript object),
* `arraybuffer` or `string`. `restore()` uses the returned config's type
* to infer format.
* @returns a serialized element in the chosen format.
* @example <caption>Save a viewer to `localStorage`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const token = await viewer.save("string");
* localStorage.setItem("viewer_state", token);
* ```
*/
async save(): Promise<PerspectiveViewerConfig>;
async save(format: "json"): Promise<PerspectiveViewerConfig>;
async save(format: "arraybuffer"): Promise<ArrayBuffer>;
async save(format: "string"): Promise<string>;
async save(
format?: "json" | "arraybuffer" | "string"
): Promise<PerspectiveViewerConfig | string | ArrayBuffer> {
await this.load_wasm();
const config = await this.instance.js_save(format);
return config;
}
/**
* Flush any pending modifications to this `<perspective-viewer>`. Since
* `<perspective-viewer>`'s API is almost entirely `async`, it may take
* some milliseconds before any method call such as `restore()` affects
* the rendered element. If you want to make sure any invoked method which
* affects the rendered has had its results rendered, call and await
* `flush()`
*
* @category Util
* @returns {Promise<void>} A promise which resolves when the current
* pending state changes have been applied and rendered.
* @example <caption>Flush an unawaited `restore()`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* viewer.restore({group_by: ["State"]});
* await viewer.flush();
* console.log("Viewer has been rendered with a pivot!");
* ```
*/
async flush(): Promise<void> {
await this.load_wasm();
await this.instance.js_flush();
}
/**
* Reset's this element's view state and attributes to default. Does not
* delete this element's `perspective.table` or otherwise modify the data
* state.
*
* @category Persistence
* @param all Should `expressions` param be reset as well, defaults to
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* await viewer.reset();
* ```
*/
async reset(all = false): Promise<void> {
await this.load_wasm();
await this.instance.js_reset(all);
}
/**
* Deletes this element and clears it's internal state (but not its
* user state). This (or the underlying `perspective.view`'s equivalent
* method) must be called in order for its memory to be reclaimed, as well
* as the reciprocal method on the `perspective.table` which this viewer is
* bound to.
*
* @category Util
*/
async delete(): Promise<void> {
await this.load_wasm();
await this.instance.js_delete();
}
/**
* Download this element's data as a CSV file.
*
* @category UI Action
* @param flat Whether to use the element's current view
* config, or to use a default "flat" view.
*/
async download(flat: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_download(flat);
}
/**
* Copies this element's view data (as a CSV) to the clipboard. This method
* must be called from an event handler, subject to the browser's
* restrictions on clipboard access. See
* {@link https://www.w3.org/TR/clipboard-apis/#allow-read-clipboard}.
*
* @category UI Action
* @param flat Whether to use the element's current view
* config, or to use a default "flat" view.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const button = document.querySelector("button");
* button.addEventListener("click", async () => {
* await viewer.copy();
* });
* ```
*/
async copy(flat: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_copy(flat);
}
/**
* Restyles the elements and to pick up any style changes. While most of
* perspective styling is plain CSS and can be updated at any time, some
* CSS rules are read and cached, e.g. the series colors for
* `@finos/perspective-viewer-d3fc` which are read from CSS then reapplied
* as SVG and Canvas attributes.
*
* @category Util
*/
async restyleElement(): Promise<void> {
await this.load_wasm();
await this.instance.js_restyle_element();
}
/**
* Sets the theme names available via the `<perspective-viewer>` status bar
* UI. Typically these will be auto-detected simply by including the
* theme `.css` in a `<link>` tag; however, auto-detection can fail if
* the `<link>` tag is not a same-origin request due to CORS. For servers
* configured to allow cross-origin requests, you can use the
* [`crossorigin` attribute](https://html.spec.whatwg.org/multipage/semantics.html#attr-link-crossorigin)
* to enable detection, e.g. `<link crossorigin="anonymous" .. >`. If for
* whatever reason auto-detection still fails, you may set the themes via
* this method. Note the theme `.css` must still be loaded in this case -
* the `resetThemes()` method only lets the `<perspective-viewer>` know what
* theme names are available.
*
* @category Util
* @param themes A list of theme names to use, or auto-detect from
* document's stylesheets if `undefined`.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* await viewer.resetThemes(["Material Light", "Material Dark"]);
* ```
*/
async | (themes?: Array<string>): Promise<void> {
await this.load_wasm();
await this.instance.js_reset_themes(themes);
}
/**
* Gets the edit port, the port number for which `Table` updates from this
* `<perspective-viewer>` are generated. This port number will be present
* in the options object for a `View.on_update()` callback for any update
* which was originated by the `<perspective-viewer>`/user, which can be
* used to distinguish server-originated updates from user edits.
*
* @category Util
* @returns A promise which resolves to the current edit port.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const editport = await viewer.getEditPort();
* const table = await viewer.getTable();
* const view = await table.view();
* view.on_update(obj => {
* if (obj.port_id = editport) {
* console.log("User edit detected");
* }
* });
* ```
*/
async getEditPort(): Promise<number> {
await this.load_wasm();
const port = await this.instance.js_get_edit_port();
return port;
}
/**
* Determines the render throttling behavior. Can be an integer, for
* millisecond window to throttle render event; or, if `undefined`,
* will try to determine the optimal throttle time from this component's
* render framerate.
*
* @category Util
* @param value an optional throttle rate in milliseconds (integer). If not
* supplied, adaptive throttling is calculated from the average plugin
* render time.
* @example <caption>Limit FPS to 1 frame per second</caption>
* ```javascript
* await viewer.setThrottle(1000);
* ```
*/
async setThrottle(value?: number): Promise<void> {
await this.load_wasm();
await this.instance.js_set_throttle(value);
}
/**
* Opens/closes the element's config menu, equivalent to clicking the
* settings button in the UI. This method is equivalent to
* `viewer.restore({settings: force})` when `force` is present, but
* `restore()` cannot toggle as `toggleConfig()` can, you would need to
* first read the settings state from `save()` otherwise.
*
* Calling `toggleConfig()` may be delayed if an async render is currently
* in process, and it may only partially render the UI if `load()` has not
* yet resolved.
*
* @category UI Action
* @param force If supplied, explicitly set the config state to "open"
* (`true`) or "closed" (`false`).
* @example
* ```javascript
* await viewer.toggleConfig();
* ```
*/
async toggleConfig(force?: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_toggle_config(force);
}
/**
* Get the currently active plugin custom element instance, or a specific
* named instance if requested. `getPlugin(name)` does not activate the
* plugin requested, so if this plugin is not active the returned
* `HTMLElement` will not have a `parentElement`.
*
* If no plugins have been registered (via `registerPlugin()`), calling
* `getPlugin()` will cause `perspective-viewer-plugin` to be registered as
* a side effect.
*
* @category Plugin
* @param name Optionally a specific plugin name, defaulting to the current
* active plugin.
* @returns The active or requested plugin instance.
*/
async getPlugin(name?: string): Promise<HTMLElement> {
await this.load_wasm();
const plugin = await this.instance.js_get_plugin(name);
return plugin;
}
/**
* Get all plugin custom element instances, in order of registration.
*
* If no plugins have been registered (via `registerPlugin()`), calling
* `getAllPlugins()` will cause `perspective-viewer-plugin` to be registered
* as a side effect.
*
* @category Plugin
* @returns An `Array` of the plugin instances for this
* `<perspective-viewer>`.
*/
async getAllPlugins(): Promise<Array<HTMLElement>> {
await this.load_wasm();
const plugins = await this.instance.js_get_all_plugins();
return plugins;
}
/**
* Get the raw pointer to this `<perspective-viewer>` WASM model, such that
* it may be passed back to WASM function calls that take a
* `PerspectiveViewerElement` as an argument.
*
* @category Internal
* @returns A pointer to this model
*/
async unsafe_get_model(): Promise<number> {
await this.load_wasm();
return await this.instance.js_unsafe_get_model();
}
/**
* Get metadata for ExprTK's supported commands.
*
* @category Internal
* @returns An array of JSON descriptors for ExprTK commands
*/
static async getExprtkCommands(): Promise<Array<Record<string, string>>> {
await WASM_MODULE;
return get_exprtk_commands();
}
}
if (document.createElement("perspective-viewer").constructor === HTMLElement) {
window.customElements.define(
"perspective-viewer",
HTMLPerspectiveViewerElement
);
}
| resetThemes | identifier_name |
viewer.ts | /******************************************************************************
*
* Copyright (c) 2018, the Perspective Authors.
*
* This file is part of the Perspective library, distributed under the terms
* of the Apache License 2.0. The full license can be found in the LICENSE
* file.
*
*/
import type * as perspective from "@finos/perspective";
import {
PerspectiveViewerElement,
register_plugin,
get_exprtk_commands,
} from "@finos/perspective-viewer/dist/pkg/perspective_viewer.js";
import {WASM_MODULE} from "./init";
export type PerspectiveViewerConfig = perspective.ViewConfig & {
plugin?: string;
settings?: boolean;
plugin_config?: any;
};
/**
* The Custom Elements implementation for `<perspective-viewer>`, as well at its
* API. `PerspectiveViewerElement` should not be constructed directly (like its
* parent class `HTMLElement`); instead, use `document.createElement()` or
* declare your `<perspective-viewer>` element in HTML. Once instantiated,
* `<perspective-viewer>` works just like a standard `HTMLElement`, with a few
* extra perspective-specific methods.
*
* @example
* ```javascript
* const viewer = document.createElement("perspective-viewer");
* ```
* @example
* ```javascript
* document.body.innerHTML = `
* <perspective-viewer id="viewer"></perspective-viewer>
* `;
* const viewer = document.body.querySelector("#viewer");
* ```
* @noInheritDoc
*/
export class HTMLPerspectiveViewerElement extends HTMLElement {
private instance: PerspectiveViewerElement;
/**
* Should not be called directly (will throw `TypeError: Illegal
* constructor`).
*
* @ignore
*/
constructor() {
super();
this.load_wasm();
}
private async load_wasm(): Promise<void> {
await WASM_MODULE;
if (!this.instance) {
this.instance = new PerspectiveViewerElement(this);
}
}
/**
* Part of the Custom Elements API. This method is called by the browser,
* and should not be called directly by applications.
*
* @ignore
*/
async connectedCallback(): Promise<void> {
await this.load_wasm();
this.instance.connected_callback();
}
/**
* Register a new plugin via its custom element name. This method is called
* automatically as a side effect of importing a plugin module, so this
* method should only typically be called by plugin authors.
*
* @category Plugin
* @param name The `name` of the custom element to register, as supplied
* to the `customElements.define(name)` method.
* @example
* ```javascript
* customElements.get("perspective-viewer").registerPlugin("my-plugin");
* ```
*/
static async registerPlugin(name: string): Promise<void> {
await WASM_MODULE;
register_plugin(name);
}
/**
* Load a `perspective.Table`. If `load` or `update` have already been
* called on this element, its internal `perspective.Table` will _not_ be
* deleted, but it will bed de-referenced by this `<perspective-viewer>`.
*
* @category Data
* @param data A `Promise` which resolves to the `perspective.Table`
* @returns {Promise<void>} A promise which resolves once the data is
* loaded, a `perspective.View` has been created, and the active plugin has
* rendered.
* @example <caption>Load perspective.table</caption>
* ```javascript
* const my_viewer = document.getElementById('#my_viewer');
* const tbl = perspective.table("x,y\n1,a\n2,b");
* my_viewer.load(tbl);
* ```
* @example <caption>Load Promise<perspective.table></caption>
* ```javascript
* const my_viewer = document.getElementById('#my_viewer');
* const tbl = perspective.table("x,y\n1,a\n2,b");
* my_viewer.load(tbl);
* ```
*/
async load(
table: Promise<perspective.Table> | perspective.Table
): Promise<void> {
await this.load_wasm();
await this.instance.js_load(Promise.resolve(table));
}
/**
* Redraw this `<perspective-viewer>` and plugin when its dimensions or
* visibility has been updated. By default, `<perspective-viewer>` will
* auto-size when its own dimensions change, so this method need not be
* called; when disabled via `setAutoSize(false)` however, this method
* _must_ be called, and will not respond to dimension or style changes to
* its parent container otherwise. `notifyResize()` does not recalculate
* the current `View`, but all plugins will re-request the data window
* (which itself may be smaller or larger due to resize).
*
* @category Util
* @param force Whether to re-render, even if the dimenions have not
* changed. When set to `false` and auto-size is enabled (the defaults),
* calling this method will automatically disable auto-size.
* @returns A `Promise<void>` which resolves when this resize event has
* finished rendering.
* @example <caption>Bind `notfyResize()` to browser dimensions</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* viewer.setAutoSize(false);
* window.addEventListener("resize", () => viewer.notifyResize());
* ```
*/
async notifyResize(force = false): Promise<void> {
await this.load_wasm();
await this.instance.js_resize(force);
}
/**
* Determines the auto-size behavior. When `true` (the default), this
* element will re-render itself whenever its own dimensions change,
* utilizing a `ResizeObserver`; when `false`, you must explicitly call
* `notifyResize()` when the element's dimensions have changed.
*
* @category Util
* @param autosize Whether to re-render when this element's dimensions
* change.
* @example <caption>Disable auto-size</caption>
* ```javascript
* await viewer.setAutoSize(false);
* ```
*/
async setAutoSize(autosize = true): Promise<void> |
/**
* Returns the `perspective.Table()` which was supplied to `load()`
*
* @category Data
* @param wait_for_table Whether to await `load()` if it has not yet been
* invoked, or fail immediately.
* @returns A `Promise` which resolves to a `perspective.Table`
* @example <caption>Share a `Table`</caption>
* ```javascript
* const viewers = document.querySelectorAll("perspective-viewer");
* const [viewer1, viewer2] = Array.from(viewers);
* const table = await viewer1.getTable();
* await viewer2.load(table);
* ```
*/
async getTable(wait_for_table?: boolean): Promise<perspective.Table> {
await this.load_wasm();
const table = await this.instance.js_get_table(!!wait_for_table);
return table;
}
/**
* Returns the underlying `perspective.View` currently configured for this
* `<perspective-viewer>`. Because ownership of the `perspective.View` is
* mainainted by the `<perspective-viewer>` it was created by, this `View`
* may become deleted (invalidated by calling `delete()`) at any time -
* specifically, it will be deleted whenever the `PerspectiveViewConfig`
* changes. Because of this, when using this API, prefer calling
* `getView()` repeatedly over caching the returned `perspective.View`,
* especially in `async` contexts.
*
* @category Data
* @returns A `Promise` which ressolves to a `perspective.View`.
* @example <caption>Collapse grid to root</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const view = await viewer.getView();
* await view.set_depth(0);
* ```
*/
async getView(): Promise<perspective.View> {
await this.load_wasm();
const view = await this.instance.js_get_view();
return view;
}
/**
* Restore this element to a state as generated by a reciprocal call to
* `save`. In `json` (default) format, `PerspectiveViewerConfig`'s fields
* have specific semantics:
*
* - When a key is missing, this field is ignored; `<perspective-viewer>`
* will maintain whatever settings for this field is currently applied.
* - When the key is supplied, but the value is `undefined`, the field is
* reset to its default value for this current `View`, i.e. the state it
* would be in after `load()` resolves.
* - When the key is defined to a value, the value is applied for this
* field.
*
* This behavior is convenient for explicitly controlling current vs desired
* UI state in a single request, but it does make it a bit inconvenient to
* use `restore()` to reset a `<perspective-viewer>` to default as you must
* do so explicitly for each key; for this case, use `reset()` instead of
* restore.
*
* As noted in `save()`, this configuration state does not include the
* `Table` or its `Schema`. In order for `restore()` to work correctly, it
* must be called on a `<perspective-viewer>` that has a `Table already
* `load()`-ed, with the same (or a type-compatible superset) `Schema`.
* It does not need have the same rows, or even be populated.
*
* @category Persistence
* @param config returned by `save()`. This can be any format returned by
* `save()`; the specific deserialization is chosen by `typeof config`.
* @returns A promise which resolves when the changes have been applied and
* rendered.
* @example <caption>Restore a viewer from `localStorage`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const token = localStorage.getItem("viewer_state");
* await viewer.restore(token);
* ```
*/
async restore(
config: PerspectiveViewerConfig | string | ArrayBuffer
): Promise<void> {
await this.load_wasm();
await this.instance.js_restore(config);
}
/**
* Serialize this element's attribute/interaction state, but _not_ the
* `perspective.Table` or its `Schema`. `save()` is designed to be used in
* conjunction with `restore()` to persist user settings and bookmarks, but
* the `PerspectiveViewerConfig` object returned in `json` format can also
* be written by hand quite easily, which is useful for authoring
* pre-conceived configs.
*
* @category Persistence
* @param format The serialization format - `json` (JavaScript object),
* `arraybuffer` or `string`. `restore()` uses the returned config's type
* to infer format.
* @returns a serialized element in the chosen format.
* @example <caption>Save a viewer to `localStorage`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const token = await viewer.save("string");
* localStorage.setItem("viewer_state", token);
* ```
*/
async save(): Promise<PerspectiveViewerConfig>;
async save(format: "json"): Promise<PerspectiveViewerConfig>;
async save(format: "arraybuffer"): Promise<ArrayBuffer>;
async save(format: "string"): Promise<string>;
async save(
format?: "json" | "arraybuffer" | "string"
): Promise<PerspectiveViewerConfig | string | ArrayBuffer> {
await this.load_wasm();
const config = await this.instance.js_save(format);
return config;
}
/**
* Flush any pending modifications to this `<perspective-viewer>`. Since
* `<perspective-viewer>`'s API is almost entirely `async`, it may take
* some milliseconds before any method call such as `restore()` affects
* the rendered element. If you want to make sure any invoked method which
* affects the rendered has had its results rendered, call and await
* `flush()`
*
* @category Util
* @returns {Promise<void>} A promise which resolves when the current
* pending state changes have been applied and rendered.
* @example <caption>Flush an unawaited `restore()`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* viewer.restore({group_by: ["State"]});
* await viewer.flush();
* console.log("Viewer has been rendered with a pivot!");
* ```
*/
async flush(): Promise<void> {
await this.load_wasm();
await this.instance.js_flush();
}
/**
* Reset's this element's view state and attributes to default. Does not
* delete this element's `perspective.table` or otherwise modify the data
* state.
*
* @category Persistence
* @param all Should `expressions` param be reset as well, defaults to
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* await viewer.reset();
* ```
*/
async reset(all = false): Promise<void> {
await this.load_wasm();
await this.instance.js_reset(all);
}
/**
* Deletes this element and clears it's internal state (but not its
* user state). This (or the underlying `perspective.view`'s equivalent
* method) must be called in order for its memory to be reclaimed, as well
* as the reciprocal method on the `perspective.table` which this viewer is
* bound to.
*
* @category Util
*/
async delete(): Promise<void> {
await this.load_wasm();
await this.instance.js_delete();
}
/**
* Download this element's data as a CSV file.
*
* @category UI Action
* @param flat Whether to use the element's current view
* config, or to use a default "flat" view.
*/
async download(flat: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_download(flat);
}
/**
* Copies this element's view data (as a CSV) to the clipboard. This method
* must be called from an event handler, subject to the browser's
* restrictions on clipboard access. See
* {@link https://www.w3.org/TR/clipboard-apis/#allow-read-clipboard}.
*
* @category UI Action
* @param flat Whether to use the element's current view
* config, or to use a default "flat" view.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const button = document.querySelector("button");
* button.addEventListener("click", async () => {
* await viewer.copy();
* });
* ```
*/
async copy(flat: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_copy(flat);
}
/**
* Restyles the elements and to pick up any style changes. While most of
* perspective styling is plain CSS and can be updated at any time, some
* CSS rules are read and cached, e.g. the series colors for
* `@finos/perspective-viewer-d3fc` which are read from CSS then reapplied
* as SVG and Canvas attributes.
*
* @category Util
*/
async restyleElement(): Promise<void> {
await this.load_wasm();
await this.instance.js_restyle_element();
}
/**
* Sets the theme names available via the `<perspective-viewer>` status bar
* UI. Typically these will be auto-detected simply by including the
* theme `.css` in a `<link>` tag; however, auto-detection can fail if
* the `<link>` tag is not a same-origin request due to CORS. For servers
* configured to allow cross-origin requests, you can use the
* [`crossorigin` attribute](https://html.spec.whatwg.org/multipage/semantics.html#attr-link-crossorigin)
* to enable detection, e.g. `<link crossorigin="anonymous" .. >`. If for
* whatever reason auto-detection still fails, you may set the themes via
* this method. Note the theme `.css` must still be loaded in this case -
* the `resetThemes()` method only lets the `<perspective-viewer>` know what
* theme names are available.
*
* @category Util
* @param themes A list of theme names to use, or auto-detect from
* document's stylesheets if `undefined`.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* await viewer.resetThemes(["Material Light", "Material Dark"]);
* ```
*/
async resetThemes(themes?: Array<string>): Promise<void> {
await this.load_wasm();
await this.instance.js_reset_themes(themes);
}
/**
* Gets the edit port, the port number for which `Table` updates from this
* `<perspective-viewer>` are generated. This port number will be present
* in the options object for a `View.on_update()` callback for any update
* which was originated by the `<perspective-viewer>`/user, which can be
* used to distinguish server-originated updates from user edits.
*
* @category Util
* @returns A promise which resolves to the current edit port.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const editport = await viewer.getEditPort();
* const table = await viewer.getTable();
* const view = await table.view();
* view.on_update(obj => {
* if (obj.port_id = editport) {
* console.log("User edit detected");
* }
* });
* ```
*/
async getEditPort(): Promise<number> {
await this.load_wasm();
const port = await this.instance.js_get_edit_port();
return port;
}
/**
* Determines the render throttling behavior. Can be an integer, for
* millisecond window to throttle render event; or, if `undefined`,
* will try to determine the optimal throttle time from this component's
* render framerate.
*
* @category Util
* @param value an optional throttle rate in milliseconds (integer). If not
* supplied, adaptive throttling is calculated from the average plugin
* render time.
* @example <caption>Limit FPS to 1 frame per second</caption>
* ```javascript
* await viewer.setThrottle(1000);
* ```
*/
async setThrottle(value?: number): Promise<void> {
await this.load_wasm();
await this.instance.js_set_throttle(value);
}
/**
* Opens/closes the element's config menu, equivalent to clicking the
* settings button in the UI. This method is equivalent to
* `viewer.restore({settings: force})` when `force` is present, but
* `restore()` cannot toggle as `toggleConfig()` can, you would need to
* first read the settings state from `save()` otherwise.
*
* Calling `toggleConfig()` may be delayed if an async render is currently
* in process, and it may only partially render the UI if `load()` has not
* yet resolved.
*
* @category UI Action
* @param force If supplied, explicitly set the config state to "open"
* (`true`) or "closed" (`false`).
* @example
* ```javascript
* await viewer.toggleConfig();
* ```
*/
async toggleConfig(force?: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_toggle_config(force);
}
/**
* Get the currently active plugin custom element instance, or a specific
* named instance if requested. `getPlugin(name)` does not activate the
* plugin requested, so if this plugin is not active the returned
* `HTMLElement` will not have a `parentElement`.
*
* If no plugins have been registered (via `registerPlugin()`), calling
* `getPlugin()` will cause `perspective-viewer-plugin` to be registered as
* a side effect.
*
* @category Plugin
* @param name Optionally a specific plugin name, defaulting to the current
* active plugin.
* @returns The active or requested plugin instance.
*/
async getPlugin(name?: string): Promise<HTMLElement> {
await this.load_wasm();
const plugin = await this.instance.js_get_plugin(name);
return plugin;
}
/**
* Get all plugin custom element instances, in order of registration.
*
* If no plugins have been registered (via `registerPlugin()`), calling
* `getAllPlugins()` will cause `perspective-viewer-plugin` to be registered
* as a side effect.
*
* @category Plugin
* @returns An `Array` of the plugin instances for this
* `<perspective-viewer>`.
*/
async getAllPlugins(): Promise<Array<HTMLElement>> {
await this.load_wasm();
const plugins = await this.instance.js_get_all_plugins();
return plugins;
}
/**
* Get the raw pointer to this `<perspective-viewer>` WASM model, such that
* it may be passed back to WASM function calls that take a
* `PerspectiveViewerElement` as an argument.
*
* @category Internal
* @returns A pointer to this model
*/
async unsafe_get_model(): Promise<number> {
await this.load_wasm();
return await this.instance.js_unsafe_get_model();
}
/**
* Get metadata for ExprTK's supported commands.
*
* @category Internal
* @returns An array of JSON descriptors for ExprTK commands
*/
static async getExprtkCommands(): Promise<Array<Record<string, string>>> {
await WASM_MODULE;
return get_exprtk_commands();
}
}
if (document.createElement("perspective-viewer").constructor === HTMLElement) {
window.customElements.define(
"perspective-viewer",
HTMLPerspectiveViewerElement
);
}
| {
await this.load_wasm();
await this.instance.js_set_auto_size(autosize);
} | identifier_body |
viewer.ts | /******************************************************************************
*
* Copyright (c) 2018, the Perspective Authors.
*
* This file is part of the Perspective library, distributed under the terms
* of the Apache License 2.0. The full license can be found in the LICENSE
* file.
*
*/
import type * as perspective from "@finos/perspective";
import {
PerspectiveViewerElement,
register_plugin,
get_exprtk_commands,
} from "@finos/perspective-viewer/dist/pkg/perspective_viewer.js";
import {WASM_MODULE} from "./init";
export type PerspectiveViewerConfig = perspective.ViewConfig & {
plugin?: string;
settings?: boolean;
plugin_config?: any;
};
/**
* The Custom Elements implementation for `<perspective-viewer>`, as well at its
* API. `PerspectiveViewerElement` should not be constructed directly (like its
* parent class `HTMLElement`); instead, use `document.createElement()` or
* declare your `<perspective-viewer>` element in HTML. Once instantiated,
* `<perspective-viewer>` works just like a standard `HTMLElement`, with a few
* extra perspective-specific methods.
*
* @example
* ```javascript
* const viewer = document.createElement("perspective-viewer");
* ```
* @example
* ```javascript
* document.body.innerHTML = `
* <perspective-viewer id="viewer"></perspective-viewer>
* `;
* const viewer = document.body.querySelector("#viewer");
* ```
* @noInheritDoc
*/
export class HTMLPerspectiveViewerElement extends HTMLElement {
private instance: PerspectiveViewerElement;
/**
* Should not be called directly (will throw `TypeError: Illegal
* constructor`).
*
* @ignore
*/
constructor() {
super();
this.load_wasm();
}
private async load_wasm(): Promise<void> {
await WASM_MODULE;
if (!this.instance) |
}
/**
* Part of the Custom Elements API. This method is called by the browser,
* and should not be called directly by applications.
*
* @ignore
*/
async connectedCallback(): Promise<void> {
await this.load_wasm();
this.instance.connected_callback();
}
/**
* Register a new plugin via its custom element name. This method is called
* automatically as a side effect of importing a plugin module, so this
* method should only typically be called by plugin authors.
*
* @category Plugin
* @param name The `name` of the custom element to register, as supplied
* to the `customElements.define(name)` method.
* @example
* ```javascript
* customElements.get("perspective-viewer").registerPlugin("my-plugin");
* ```
*/
static async registerPlugin(name: string): Promise<void> {
await WASM_MODULE;
register_plugin(name);
}
/**
* Load a `perspective.Table`. If `load` or `update` have already been
* called on this element, its internal `perspective.Table` will _not_ be
* deleted, but it will bed de-referenced by this `<perspective-viewer>`.
*
* @category Data
* @param data A `Promise` which resolves to the `perspective.Table`
* @returns {Promise<void>} A promise which resolves once the data is
* loaded, a `perspective.View` has been created, and the active plugin has
* rendered.
* @example <caption>Load perspective.table</caption>
* ```javascript
* const my_viewer = document.getElementById('#my_viewer');
* const tbl = perspective.table("x,y\n1,a\n2,b");
* my_viewer.load(tbl);
* ```
* @example <caption>Load Promise<perspective.table></caption>
* ```javascript
* const my_viewer = document.getElementById('#my_viewer');
* const tbl = perspective.table("x,y\n1,a\n2,b");
* my_viewer.load(tbl);
* ```
*/
async load(
table: Promise<perspective.Table> | perspective.Table
): Promise<void> {
await this.load_wasm();
await this.instance.js_load(Promise.resolve(table));
}
/**
* Redraw this `<perspective-viewer>` and plugin when its dimensions or
* visibility has been updated. By default, `<perspective-viewer>` will
* auto-size when its own dimensions change, so this method need not be
* called; when disabled via `setAutoSize(false)` however, this method
* _must_ be called, and will not respond to dimension or style changes to
* its parent container otherwise. `notifyResize()` does not recalculate
* the current `View`, but all plugins will re-request the data window
* (which itself may be smaller or larger due to resize).
*
* @category Util
* @param force Whether to re-render, even if the dimenions have not
* changed. When set to `false` and auto-size is enabled (the defaults),
* calling this method will automatically disable auto-size.
* @returns A `Promise<void>` which resolves when this resize event has
* finished rendering.
* @example <caption>Bind `notfyResize()` to browser dimensions</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* viewer.setAutoSize(false);
* window.addEventListener("resize", () => viewer.notifyResize());
* ```
*/
async notifyResize(force = false): Promise<void> {
await this.load_wasm();
await this.instance.js_resize(force);
}
/**
* Determines the auto-size behavior. When `true` (the default), this
* element will re-render itself whenever its own dimensions change,
* utilizing a `ResizeObserver`; when `false`, you must explicitly call
* `notifyResize()` when the element's dimensions have changed.
*
* @category Util
* @param autosize Whether to re-render when this element's dimensions
* change.
* @example <caption>Disable auto-size</caption>
* ```javascript
* await viewer.setAutoSize(false);
* ```
*/
async setAutoSize(autosize = true): Promise<void> {
await this.load_wasm();
await this.instance.js_set_auto_size(autosize);
}
/**
* Returns the `perspective.Table()` which was supplied to `load()`
*
* @category Data
* @param wait_for_table Whether to await `load()` if it has not yet been
* invoked, or fail immediately.
* @returns A `Promise` which resolves to a `perspective.Table`
* @example <caption>Share a `Table`</caption>
* ```javascript
* const viewers = document.querySelectorAll("perspective-viewer");
* const [viewer1, viewer2] = Array.from(viewers);
* const table = await viewer1.getTable();
* await viewer2.load(table);
* ```
*/
async getTable(wait_for_table?: boolean): Promise<perspective.Table> {
await this.load_wasm();
const table = await this.instance.js_get_table(!!wait_for_table);
return table;
}
/**
* Returns the underlying `perspective.View` currently configured for this
* `<perspective-viewer>`. Because ownership of the `perspective.View` is
* mainainted by the `<perspective-viewer>` it was created by, this `View`
* may become deleted (invalidated by calling `delete()`) at any time -
* specifically, it will be deleted whenever the `PerspectiveViewConfig`
* changes. Because of this, when using this API, prefer calling
* `getView()` repeatedly over caching the returned `perspective.View`,
* especially in `async` contexts.
*
* @category Data
* @returns A `Promise` which ressolves to a `perspective.View`.
* @example <caption>Collapse grid to root</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const view = await viewer.getView();
* await view.set_depth(0);
* ```
*/
async getView(): Promise<perspective.View> {
await this.load_wasm();
const view = await this.instance.js_get_view();
return view;
}
/**
* Restore this element to a state as generated by a reciprocal call to
* `save`. In `json` (default) format, `PerspectiveViewerConfig`'s fields
* have specific semantics:
*
* - When a key is missing, this field is ignored; `<perspective-viewer>`
* will maintain whatever settings for this field is currently applied.
* - When the key is supplied, but the value is `undefined`, the field is
* reset to its default value for this current `View`, i.e. the state it
* would be in after `load()` resolves.
* - When the key is defined to a value, the value is applied for this
* field.
*
* This behavior is convenient for explicitly controlling current vs desired
* UI state in a single request, but it does make it a bit inconvenient to
* use `restore()` to reset a `<perspective-viewer>` to default as you must
* do so explicitly for each key; for this case, use `reset()` instead of
* restore.
*
* As noted in `save()`, this configuration state does not include the
* `Table` or its `Schema`. In order for `restore()` to work correctly, it
* must be called on a `<perspective-viewer>` that has a `Table already
* `load()`-ed, with the same (or a type-compatible superset) `Schema`.
* It does not need have the same rows, or even be populated.
*
* @category Persistence
* @param config returned by `save()`. This can be any format returned by
* `save()`; the specific deserialization is chosen by `typeof config`.
* @returns A promise which resolves when the changes have been applied and
* rendered.
* @example <caption>Restore a viewer from `localStorage`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const token = localStorage.getItem("viewer_state");
* await viewer.restore(token);
* ```
*/
async restore(
config: PerspectiveViewerConfig | string | ArrayBuffer
): Promise<void> {
await this.load_wasm();
await this.instance.js_restore(config);
}
/**
* Serialize this element's attribute/interaction state, but _not_ the
* `perspective.Table` or its `Schema`. `save()` is designed to be used in
* conjunction with `restore()` to persist user settings and bookmarks, but
* the `PerspectiveViewerConfig` object returned in `json` format can also
* be written by hand quite easily, which is useful for authoring
* pre-conceived configs.
*
* @category Persistence
* @param format The serialization format - `json` (JavaScript object),
* `arraybuffer` or `string`. `restore()` uses the returned config's type
* to infer format.
* @returns a serialized element in the chosen format.
* @example <caption>Save a viewer to `localStorage`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const token = await viewer.save("string");
* localStorage.setItem("viewer_state", token);
* ```
*/
async save(): Promise<PerspectiveViewerConfig>;
async save(format: "json"): Promise<PerspectiveViewerConfig>;
async save(format: "arraybuffer"): Promise<ArrayBuffer>;
async save(format: "string"): Promise<string>;
async save(
format?: "json" | "arraybuffer" | "string"
): Promise<PerspectiveViewerConfig | string | ArrayBuffer> {
await this.load_wasm();
const config = await this.instance.js_save(format);
return config;
}
/**
* Flush any pending modifications to this `<perspective-viewer>`. Since
* `<perspective-viewer>`'s API is almost entirely `async`, it may take
* some milliseconds before any method call such as `restore()` affects
* the rendered element. If you want to make sure any invoked method which
* affects the rendered has had its results rendered, call and await
* `flush()`
*
* @category Util
* @returns {Promise<void>} A promise which resolves when the current
* pending state changes have been applied and rendered.
* @example <caption>Flush an unawaited `restore()`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* viewer.restore({group_by: ["State"]});
* await viewer.flush();
* console.log("Viewer has been rendered with a pivot!");
* ```
*/
async flush(): Promise<void> {
await this.load_wasm();
await this.instance.js_flush();
}
/**
* Reset's this element's view state and attributes to default. Does not
* delete this element's `perspective.table` or otherwise modify the data
* state.
*
* @category Persistence
* @param all Should `expressions` param be reset as well, defaults to
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* await viewer.reset();
* ```
*/
async reset(all = false): Promise<void> {
await this.load_wasm();
await this.instance.js_reset(all);
}
/**
* Deletes this element and clears it's internal state (but not its
* user state). This (or the underlying `perspective.view`'s equivalent
* method) must be called in order for its memory to be reclaimed, as well
* as the reciprocal method on the `perspective.table` which this viewer is
* bound to.
*
* @category Util
*/
async delete(): Promise<void> {
await this.load_wasm();
await this.instance.js_delete();
}
/**
* Download this element's data as a CSV file.
*
* @category UI Action
* @param flat Whether to use the element's current view
* config, or to use a default "flat" view.
*/
async download(flat: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_download(flat);
}
/**
* Copies this element's view data (as a CSV) to the clipboard. This method
* must be called from an event handler, subject to the browser's
* restrictions on clipboard access. See
* {@link https://www.w3.org/TR/clipboard-apis/#allow-read-clipboard}.
*
* @category UI Action
* @param flat Whether to use the element's current view
* config, or to use a default "flat" view.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const button = document.querySelector("button");
* button.addEventListener("click", async () => {
* await viewer.copy();
* });
* ```
*/
async copy(flat: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_copy(flat);
}
/**
* Restyles the elements and to pick up any style changes. While most of
* perspective styling is plain CSS and can be updated at any time, some
* CSS rules are read and cached, e.g. the series colors for
* `@finos/perspective-viewer-d3fc` which are read from CSS then reapplied
* as SVG and Canvas attributes.
*
* @category Util
*/
async restyleElement(): Promise<void> {
await this.load_wasm();
await this.instance.js_restyle_element();
}
/**
* Sets the theme names available via the `<perspective-viewer>` status bar
* UI. Typically these will be auto-detected simply by including the
* theme `.css` in a `<link>` tag; however, auto-detection can fail if
* the `<link>` tag is not a same-origin request due to CORS. For servers
* configured to allow cross-origin requests, you can use the
* [`crossorigin` attribute](https://html.spec.whatwg.org/multipage/semantics.html#attr-link-crossorigin)
* to enable detection, e.g. `<link crossorigin="anonymous" .. >`. If for
* whatever reason auto-detection still fails, you may set the themes via
* this method. Note the theme `.css` must still be loaded in this case -
* the `resetThemes()` method only lets the `<perspective-viewer>` know what
* theme names are available.
*
* @category Util
* @param themes A list of theme names to use, or auto-detect from
* document's stylesheets if `undefined`.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* await viewer.resetThemes(["Material Light", "Material Dark"]);
* ```
*/
async resetThemes(themes?: Array<string>): Promise<void> {
await this.load_wasm();
await this.instance.js_reset_themes(themes);
}
/**
* Gets the edit port, the port number for which `Table` updates from this
* `<perspective-viewer>` are generated. This port number will be present
* in the options object for a `View.on_update()` callback for any update
* which was originated by the `<perspective-viewer>`/user, which can be
* used to distinguish server-originated updates from user edits.
*
* @category Util
* @returns A promise which resolves to the current edit port.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const editport = await viewer.getEditPort();
* const table = await viewer.getTable();
* const view = await table.view();
* view.on_update(obj => {
* if (obj.port_id = editport) {
* console.log("User edit detected");
* }
* });
* ```
*/
async getEditPort(): Promise<number> {
await this.load_wasm();
const port = await this.instance.js_get_edit_port();
return port;
}
/**
* Determines the render throttling behavior. Can be an integer, for
* millisecond window to throttle render event; or, if `undefined`,
* will try to determine the optimal throttle time from this component's
* render framerate.
*
* @category Util
* @param value an optional throttle rate in milliseconds (integer). If not
* supplied, adaptive throttling is calculated from the average plugin
* render time.
* @example <caption>Limit FPS to 1 frame per second</caption>
* ```javascript
* await viewer.setThrottle(1000);
* ```
*/
async setThrottle(value?: number): Promise<void> {
await this.load_wasm();
await this.instance.js_set_throttle(value);
}
/**
* Opens/closes the element's config menu, equivalent to clicking the
* settings button in the UI. This method is equivalent to
* `viewer.restore({settings: force})` when `force` is present, but
* `restore()` cannot toggle as `toggleConfig()` can, you would need to
* first read the settings state from `save()` otherwise.
*
* Calling `toggleConfig()` may be delayed if an async render is currently
* in process, and it may only partially render the UI if `load()` has not
* yet resolved.
*
* @category UI Action
* @param force If supplied, explicitly set the config state to "open"
* (`true`) or "closed" (`false`).
* @example
* ```javascript
* await viewer.toggleConfig();
* ```
*/
async toggleConfig(force?: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_toggle_config(force);
}
/**
* Get the currently active plugin custom element instance, or a specific
* named instance if requested. `getPlugin(name)` does not activate the
* plugin requested, so if this plugin is not active the returned
* `HTMLElement` will not have a `parentElement`.
*
* If no plugins have been registered (via `registerPlugin()`), calling
* `getPlugin()` will cause `perspective-viewer-plugin` to be registered as
* a side effect.
*
* @category Plugin
* @param name Optionally a specific plugin name, defaulting to the current
* active plugin.
* @returns The active or requested plugin instance.
*/
async getPlugin(name?: string): Promise<HTMLElement> {
await this.load_wasm();
const plugin = await this.instance.js_get_plugin(name);
return plugin;
}
/**
* Get all plugin custom element instances, in order of registration.
*
* If no plugins have been registered (via `registerPlugin()`), calling
* `getAllPlugins()` will cause `perspective-viewer-plugin` to be registered
* as a side effect.
*
* @category Plugin
* @returns An `Array` of the plugin instances for this
* `<perspective-viewer>`.
*/
async getAllPlugins(): Promise<Array<HTMLElement>> {
await this.load_wasm();
const plugins = await this.instance.js_get_all_plugins();
return plugins;
}
/**
* Get the raw pointer to this `<perspective-viewer>` WASM model, such that
* it may be passed back to WASM function calls that take a
* `PerspectiveViewerElement` as an argument.
*
* @category Internal
* @returns A pointer to this model
*/
async unsafe_get_model(): Promise<number> {
await this.load_wasm();
return await this.instance.js_unsafe_get_model();
}
/**
* Get metadata for ExprTK's supported commands.
*
* @category Internal
* @returns An array of JSON descriptors for ExprTK commands
*/
static async getExprtkCommands(): Promise<Array<Record<string, string>>> {
await WASM_MODULE;
return get_exprtk_commands();
}
}
if (document.createElement("perspective-viewer").constructor === HTMLElement) {
window.customElements.define(
"perspective-viewer",
HTMLPerspectiveViewerElement
);
}
| {
this.instance = new PerspectiveViewerElement(this);
} | conditional_block |
viewer.ts | /******************************************************************************
*
* Copyright (c) 2018, the Perspective Authors.
*
* This file is part of the Perspective library, distributed under the terms
* of the Apache License 2.0. The full license can be found in the LICENSE
* file.
*
*/
import type * as perspective from "@finos/perspective";
import {
PerspectiveViewerElement,
register_plugin,
get_exprtk_commands,
} from "@finos/perspective-viewer/dist/pkg/perspective_viewer.js";
import {WASM_MODULE} from "./init";
export type PerspectiveViewerConfig = perspective.ViewConfig & {
plugin?: string;
settings?: boolean;
plugin_config?: any;
};
/**
* The Custom Elements implementation for `<perspective-viewer>`, as well at its
* API. `PerspectiveViewerElement` should not be constructed directly (like its
* parent class `HTMLElement`); instead, use `document.createElement()` or
* declare your `<perspective-viewer>` element in HTML. Once instantiated,
* `<perspective-viewer>` works just like a standard `HTMLElement`, with a few
* extra perspective-specific methods.
*
* @example
* ```javascript
* const viewer = document.createElement("perspective-viewer");
* ```
* @example
* ```javascript
* document.body.innerHTML = `
* <perspective-viewer id="viewer"></perspective-viewer>
* `;
* const viewer = document.body.querySelector("#viewer");
* ```
* @noInheritDoc
*/
export class HTMLPerspectiveViewerElement extends HTMLElement {
private instance: PerspectiveViewerElement;
/**
* Should not be called directly (will throw `TypeError: Illegal
* constructor`).
*
* @ignore
*/
constructor() {
super();
this.load_wasm();
}
private async load_wasm(): Promise<void> {
await WASM_MODULE;
if (!this.instance) {
this.instance = new PerspectiveViewerElement(this);
}
}
/**
* Part of the Custom Elements API. This method is called by the browser,
* and should not be called directly by applications.
*
* @ignore
*/
async connectedCallback(): Promise<void> {
await this.load_wasm(); | }
/**
* Register a new plugin via its custom element name. This method is called
* automatically as a side effect of importing a plugin module, so this
* method should only typically be called by plugin authors.
*
* @category Plugin
* @param name The `name` of the custom element to register, as supplied
* to the `customElements.define(name)` method.
* @example
* ```javascript
* customElements.get("perspective-viewer").registerPlugin("my-plugin");
* ```
*/
static async registerPlugin(name: string): Promise<void> {
await WASM_MODULE;
register_plugin(name);
}
/**
* Load a `perspective.Table`. If `load` or `update` have already been
* called on this element, its internal `perspective.Table` will _not_ be
* deleted, but it will bed de-referenced by this `<perspective-viewer>`.
*
* @category Data
* @param data A `Promise` which resolves to the `perspective.Table`
* @returns {Promise<void>} A promise which resolves once the data is
* loaded, a `perspective.View` has been created, and the active plugin has
* rendered.
* @example <caption>Load perspective.table</caption>
* ```javascript
* const my_viewer = document.getElementById('#my_viewer');
* const tbl = perspective.table("x,y\n1,a\n2,b");
* my_viewer.load(tbl);
* ```
* @example <caption>Load Promise<perspective.table></caption>
* ```javascript
* const my_viewer = document.getElementById('#my_viewer');
* const tbl = perspective.table("x,y\n1,a\n2,b");
* my_viewer.load(tbl);
* ```
*/
async load(
table: Promise<perspective.Table> | perspective.Table
): Promise<void> {
await this.load_wasm();
await this.instance.js_load(Promise.resolve(table));
}
/**
* Redraw this `<perspective-viewer>` and plugin when its dimensions or
* visibility has been updated. By default, `<perspective-viewer>` will
* auto-size when its own dimensions change, so this method need not be
* called; when disabled via `setAutoSize(false)` however, this method
* _must_ be called, and will not respond to dimension or style changes to
* its parent container otherwise. `notifyResize()` does not recalculate
* the current `View`, but all plugins will re-request the data window
* (which itself may be smaller or larger due to resize).
*
* @category Util
* @param force Whether to re-render, even if the dimenions have not
* changed. When set to `false` and auto-size is enabled (the defaults),
* calling this method will automatically disable auto-size.
* @returns A `Promise<void>` which resolves when this resize event has
* finished rendering.
* @example <caption>Bind `notfyResize()` to browser dimensions</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* viewer.setAutoSize(false);
* window.addEventListener("resize", () => viewer.notifyResize());
* ```
*/
async notifyResize(force = false): Promise<void> {
await this.load_wasm();
await this.instance.js_resize(force);
}
/**
* Determines the auto-size behavior. When `true` (the default), this
* element will re-render itself whenever its own dimensions change,
* utilizing a `ResizeObserver`; when `false`, you must explicitly call
* `notifyResize()` when the element's dimensions have changed.
*
* @category Util
* @param autosize Whether to re-render when this element's dimensions
* change.
* @example <caption>Disable auto-size</caption>
* ```javascript
* await viewer.setAutoSize(false);
* ```
*/
async setAutoSize(autosize = true): Promise<void> {
await this.load_wasm();
await this.instance.js_set_auto_size(autosize);
}
/**
* Returns the `perspective.Table()` which was supplied to `load()`
*
* @category Data
* @param wait_for_table Whether to await `load()` if it has not yet been
* invoked, or fail immediately.
* @returns A `Promise` which resolves to a `perspective.Table`
* @example <caption>Share a `Table`</caption>
* ```javascript
* const viewers = document.querySelectorAll("perspective-viewer");
* const [viewer1, viewer2] = Array.from(viewers);
* const table = await viewer1.getTable();
* await viewer2.load(table);
* ```
*/
async getTable(wait_for_table?: boolean): Promise<perspective.Table> {
await this.load_wasm();
const table = await this.instance.js_get_table(!!wait_for_table);
return table;
}
/**
* Returns the underlying `perspective.View` currently configured for this
* `<perspective-viewer>`. Because ownership of the `perspective.View` is
* mainainted by the `<perspective-viewer>` it was created by, this `View`
* may become deleted (invalidated by calling `delete()`) at any time -
* specifically, it will be deleted whenever the `PerspectiveViewConfig`
* changes. Because of this, when using this API, prefer calling
* `getView()` repeatedly over caching the returned `perspective.View`,
* especially in `async` contexts.
*
* @category Data
* @returns A `Promise` which ressolves to a `perspective.View`.
* @example <caption>Collapse grid to root</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const view = await viewer.getView();
* await view.set_depth(0);
* ```
*/
async getView(): Promise<perspective.View> {
await this.load_wasm();
const view = await this.instance.js_get_view();
return view;
}
/**
* Restore this element to a state as generated by a reciprocal call to
* `save`. In `json` (default) format, `PerspectiveViewerConfig`'s fields
* have specific semantics:
*
* - When a key is missing, this field is ignored; `<perspective-viewer>`
* will maintain whatever settings for this field is currently applied.
* - When the key is supplied, but the value is `undefined`, the field is
* reset to its default value for this current `View`, i.e. the state it
* would be in after `load()` resolves.
* - When the key is defined to a value, the value is applied for this
* field.
*
* This behavior is convenient for explicitly controlling current vs desired
* UI state in a single request, but it does make it a bit inconvenient to
* use `restore()` to reset a `<perspective-viewer>` to default as you must
* do so explicitly for each key; for this case, use `reset()` instead of
* restore.
*
* As noted in `save()`, this configuration state does not include the
* `Table` or its `Schema`. In order for `restore()` to work correctly, it
* must be called on a `<perspective-viewer>` that has a `Table already
* `load()`-ed, with the same (or a type-compatible superset) `Schema`.
* It does not need have the same rows, or even be populated.
*
* @category Persistence
* @param config returned by `save()`. This can be any format returned by
* `save()`; the specific deserialization is chosen by `typeof config`.
* @returns A promise which resolves when the changes have been applied and
* rendered.
* @example <caption>Restore a viewer from `localStorage`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const token = localStorage.getItem("viewer_state");
* await viewer.restore(token);
* ```
*/
async restore(
config: PerspectiveViewerConfig | string | ArrayBuffer
): Promise<void> {
await this.load_wasm();
await this.instance.js_restore(config);
}
/**
* Serialize this element's attribute/interaction state, but _not_ the
* `perspective.Table` or its `Schema`. `save()` is designed to be used in
* conjunction with `restore()` to persist user settings and bookmarks, but
* the `PerspectiveViewerConfig` object returned in `json` format can also
* be written by hand quite easily, which is useful for authoring
* pre-conceived configs.
*
* @category Persistence
* @param format The serialization format - `json` (JavaScript object),
* `arraybuffer` or `string`. `restore()` uses the returned config's type
* to infer format.
* @returns a serialized element in the chosen format.
* @example <caption>Save a viewer to `localStorage`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const token = await viewer.save("string");
* localStorage.setItem("viewer_state", token);
* ```
*/
async save(): Promise<PerspectiveViewerConfig>;
async save(format: "json"): Promise<PerspectiveViewerConfig>;
async save(format: "arraybuffer"): Promise<ArrayBuffer>;
async save(format: "string"): Promise<string>;
async save(
format?: "json" | "arraybuffer" | "string"
): Promise<PerspectiveViewerConfig | string | ArrayBuffer> {
await this.load_wasm();
const config = await this.instance.js_save(format);
return config;
}
/**
* Flush any pending modifications to this `<perspective-viewer>`. Since
* `<perspective-viewer>`'s API is almost entirely `async`, it may take
* some milliseconds before any method call such as `restore()` affects
* the rendered element. If you want to make sure any invoked method which
* affects the rendered has had its results rendered, call and await
* `flush()`
*
* @category Util
* @returns {Promise<void>} A promise which resolves when the current
* pending state changes have been applied and rendered.
* @example <caption>Flush an unawaited `restore()`</caption>
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* viewer.restore({group_by: ["State"]});
* await viewer.flush();
* console.log("Viewer has been rendered with a pivot!");
* ```
*/
async flush(): Promise<void> {
await this.load_wasm();
await this.instance.js_flush();
}
/**
* Reset's this element's view state and attributes to default. Does not
* delete this element's `perspective.table` or otherwise modify the data
* state.
*
* @category Persistence
* @param all Should `expressions` param be reset as well, defaults to
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* await viewer.reset();
* ```
*/
async reset(all = false): Promise<void> {
await this.load_wasm();
await this.instance.js_reset(all);
}
/**
* Deletes this element and clears it's internal state (but not its
* user state). This (or the underlying `perspective.view`'s equivalent
* method) must be called in order for its memory to be reclaimed, as well
* as the reciprocal method on the `perspective.table` which this viewer is
* bound to.
*
* @category Util
*/
async delete(): Promise<void> {
await this.load_wasm();
await this.instance.js_delete();
}
/**
* Download this element's data as a CSV file.
*
* @category UI Action
* @param flat Whether to use the element's current view
* config, or to use a default "flat" view.
*/
async download(flat: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_download(flat);
}
/**
* Copies this element's view data (as a CSV) to the clipboard. This method
* must be called from an event handler, subject to the browser's
* restrictions on clipboard access. See
* {@link https://www.w3.org/TR/clipboard-apis/#allow-read-clipboard}.
*
* @category UI Action
* @param flat Whether to use the element's current view
* config, or to use a default "flat" view.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const button = document.querySelector("button");
* button.addEventListener("click", async () => {
* await viewer.copy();
* });
* ```
*/
async copy(flat: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_copy(flat);
}
/**
* Restyles the elements and to pick up any style changes. While most of
* perspective styling is plain CSS and can be updated at any time, some
* CSS rules are read and cached, e.g. the series colors for
* `@finos/perspective-viewer-d3fc` which are read from CSS then reapplied
* as SVG and Canvas attributes.
*
* @category Util
*/
async restyleElement(): Promise<void> {
await this.load_wasm();
await this.instance.js_restyle_element();
}
/**
* Sets the theme names available via the `<perspective-viewer>` status bar
* UI. Typically these will be auto-detected simply by including the
* theme `.css` in a `<link>` tag; however, auto-detection can fail if
* the `<link>` tag is not a same-origin request due to CORS. For servers
* configured to allow cross-origin requests, you can use the
* [`crossorigin` attribute](https://html.spec.whatwg.org/multipage/semantics.html#attr-link-crossorigin)
* to enable detection, e.g. `<link crossorigin="anonymous" .. >`. If for
* whatever reason auto-detection still fails, you may set the themes via
* this method. Note the theme `.css` must still be loaded in this case -
* the `resetThemes()` method only lets the `<perspective-viewer>` know what
* theme names are available.
*
* @category Util
* @param themes A list of theme names to use, or auto-detect from
* document's stylesheets if `undefined`.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* await viewer.resetThemes(["Material Light", "Material Dark"]);
* ```
*/
async resetThemes(themes?: Array<string>): Promise<void> {
await this.load_wasm();
await this.instance.js_reset_themes(themes);
}
/**
* Gets the edit port, the port number for which `Table` updates from this
* `<perspective-viewer>` are generated. This port number will be present
* in the options object for a `View.on_update()` callback for any update
* which was originated by the `<perspective-viewer>`/user, which can be
* used to distinguish server-originated updates from user edits.
*
* @category Util
* @returns A promise which resolves to the current edit port.
* @example
* ```javascript
* const viewer = document.querySelector("perspective-viewer");
* const editport = await viewer.getEditPort();
* const table = await viewer.getTable();
* const view = await table.view();
* view.on_update(obj => {
* if (obj.port_id = editport) {
* console.log("User edit detected");
* }
* });
* ```
*/
async getEditPort(): Promise<number> {
await this.load_wasm();
const port = await this.instance.js_get_edit_port();
return port;
}
/**
* Determines the render throttling behavior. Can be an integer, for
* millisecond window to throttle render event; or, if `undefined`,
* will try to determine the optimal throttle time from this component's
* render framerate.
*
* @category Util
* @param value an optional throttle rate in milliseconds (integer). If not
* supplied, adaptive throttling is calculated from the average plugin
* render time.
* @example <caption>Limit FPS to 1 frame per second</caption>
* ```javascript
* await viewer.setThrottle(1000);
* ```
*/
async setThrottle(value?: number): Promise<void> {
await this.load_wasm();
await this.instance.js_set_throttle(value);
}
/**
* Opens/closes the element's config menu, equivalent to clicking the
* settings button in the UI. This method is equivalent to
* `viewer.restore({settings: force})` when `force` is present, but
* `restore()` cannot toggle as `toggleConfig()` can, you would need to
* first read the settings state from `save()` otherwise.
*
* Calling `toggleConfig()` may be delayed if an async render is currently
* in process, and it may only partially render the UI if `load()` has not
* yet resolved.
*
* @category UI Action
* @param force If supplied, explicitly set the config state to "open"
* (`true`) or "closed" (`false`).
* @example
* ```javascript
* await viewer.toggleConfig();
* ```
*/
async toggleConfig(force?: boolean): Promise<void> {
await this.load_wasm();
await this.instance.js_toggle_config(force);
}
/**
* Get the currently active plugin custom element instance, or a specific
* named instance if requested. `getPlugin(name)` does not activate the
* plugin requested, so if this plugin is not active the returned
* `HTMLElement` will not have a `parentElement`.
*
* If no plugins have been registered (via `registerPlugin()`), calling
* `getPlugin()` will cause `perspective-viewer-plugin` to be registered as
* a side effect.
*
* @category Plugin
* @param name Optionally a specific plugin name, defaulting to the current
* active plugin.
* @returns The active or requested plugin instance.
*/
async getPlugin(name?: string): Promise<HTMLElement> {
await this.load_wasm();
const plugin = await this.instance.js_get_plugin(name);
return plugin;
}
/**
* Get all plugin custom element instances, in order of registration.
*
* If no plugins have been registered (via `registerPlugin()`), calling
* `getAllPlugins()` will cause `perspective-viewer-plugin` to be registered
* as a side effect.
*
* @category Plugin
* @returns An `Array` of the plugin instances for this
* `<perspective-viewer>`.
*/
async getAllPlugins(): Promise<Array<HTMLElement>> {
await this.load_wasm();
const plugins = await this.instance.js_get_all_plugins();
return plugins;
}
/**
* Get the raw pointer to this `<perspective-viewer>` WASM model, such that
* it may be passed back to WASM function calls that take a
* `PerspectiveViewerElement` as an argument.
*
* @category Internal
* @returns A pointer to this model
*/
async unsafe_get_model(): Promise<number> {
await this.load_wasm();
return await this.instance.js_unsafe_get_model();
}
/**
* Get metadata for ExprTK's supported commands.
*
* @category Internal
* @returns An array of JSON descriptors for ExprTK commands
*/
static async getExprtkCommands(): Promise<Array<Record<string, string>>> {
await WASM_MODULE;
return get_exprtk_commands();
}
}
if (document.createElement("perspective-viewer").constructor === HTMLElement) {
window.customElements.define(
"perspective-viewer",
HTMLPerspectiveViewerElement
);
} | this.instance.connected_callback(); | random_line_split |
Toys.py | from datetime import datetime, timedelta
import json
import urllib.request
from enum import Enum
from random import randint, sample, choices
from typing import Optional
from discord.ext import commands, tasks
from discord import Embed, TextChannel
from Code.Cogs.Base import ConfiguredCog
from Code.Data import DataAccess
from Code.Base.Parsing import DiceLexer, DiceParser
class CookieHuntSugarOptions(Enum):
"""An enum listing out all the available sugar command options."""
HIGH = 'high'
class CookieHuntTarget(Enum):
"""An enum listing out all the available target options."""
CLAIMER = 'claimer'
LEADER = 'leader'
class CookieHuntCog(ConfiguredCog):
"""A class supporting the "Cookie Hunt" feature, including the `gimme` and `sugar` commands."""
def __init__(self, bot: commands.Bot):
"""Initializes the cog and starts the automated task.
:param bot: A discord bot instance which will be saved within the class instance.
"""
super().__init__(bot)
# Init instance vars
self.cookie_data = self._parse_cookie_data()
self.cookie_available = False
self.cookie_prepared_timestamp = None
self.cookie_drop_delay_hours = None
self.cookie_drop_delay_minutes = None
self.cookie_type = None
@commands.command()
async def gimme(self, ctx: commands.Context):
"""The origin point for the `gimme` command. Claims a cookie for the calling user if one has been dropped, and
resets the points for all if the goal was reached.
:param ctx: The command context.
"""
if not self.cookie_available:
# No cookie available message
await ctx.send('There is no cookie available right now. Sorry!')
return
# Write down the pertinent information for the drop since it's about to get wiped
cookie_type = self.cookie_type
# Mark that we got the cookie so no one else takes it (and prepare the next one)
self._prep_cookie_drop()
# Find the target's ID
if cookie_type['target'] == CookieHuntTarget.CLAIMER:
target_discord_id = ctx.author.id
elif cookie_type['target'] == CookieHuntTarget.LEADER:
target_discord_id = DataAccess.get_top_cookie_collectors(1)[0].Discord_Id
else:
# Invalid target, just assume it's the claimer
target_discord_id = ctx.author.id
# Award points as needed
db_user_id = DataAccess.find_user_id_by_discord_id(target_discord_id)
cookie_count = DataAccess.modify_cookie_count(db_user_id, cookie_type['modifier'])
# check if goal was reached by the claimer
cookie_goal = ConfiguredCog.config['content']['cookie_hunt_goal']
if cookie_count >= cookie_goal:
# announce winner
await ctx.send(f'Oh my, it looks like {ctx.author.name} is the cookie monster!')
# Award the role
winner_role_name = ConfiguredCog.config['content']['cookie_hunt_winner_role']
role = self.find_role_in_guild(winner_role_name, ctx.guild)
if role:
# Remove role from all users
for member in ctx.guild.members:
if role in member.roles:
await member.remove_roles(role, reason='No longer the cookie hunt winner.')
# Give the role to the winner
if not self.member_contains_role(role.name, ctx.author):
await ctx.author.add_roles(role, reason=f'First to grab {cookie_goal} cookies.')
# reset cookie counts
DataAccess.reset_all_cookies()
else:
# Figure out proper grammar
if cookie_count == 1:
cookie_grammar_word = 'cookie'
else:
cookie_grammar_word = 'cookies'
# Send a message saying they got the cookie
if cookie_type['target'] == CookieHuntTarget.CLAIMER:
await ctx.send(f'{ctx.author.name} got a {cookie_type["name"]} cookie! '
f'They now have {cookie_count} {cookie_grammar_word}.')
else:
target_user = self.bot.get_user(int(target_discord_id))
if target_user:
target_user_name = target_user.name
else:
target_user_name = f'Unknown ({target_discord_id})'
await ctx.send(f'{ctx.author.name} got a {cookie_type["name"]} cookie! '
f'The leader, {target_user_name}, now has {cookie_count} {cookie_grammar_word}.')
@commands.command()
async def sugar(self, ctx: commands.Context, options: str = None):
"""The origin point for the `sugar` command. Shows relevant cookie count scores based on the options provided.
:param ctx: The command context.
:param options: The (optional) parameters for the sugar command, as enumerated by the
`CookieHuntSugarOptions` enumeration.
"""
if options is not None:
if options.lower() == CookieHuntSugarOptions.HIGH.value:
# Get the high scores
top_collectors = DataAccess.get_top_cookie_collectors(3)
# convert IDs to nicknames and display them
collectors_displayed = False
embed = None
for Discord_Id, Cookie_Count in top_collectors:
if not collectors_displayed:
# Only build the embed the first time through the loop
embed = Embed(title='Top Cookie Collectors',
color=ConfiguredCog.convert_color('#8a4b38'))
collectors_displayed = True
discord_user = self.bot.get_user(int(Discord_Id))
if discord_user:
user_name = discord_user.name
else:
user_name = f'Unknown ({Discord_Id})'
user_name = f'{user_name}:'
# Add field
embed.add_field(name=user_name, value=Cookie_Count, inline=False)
if collectors_displayed:
# We found collectors to display
await ctx.send(embed=embed)
else:
# Our query returned no results
await ctx.send('_No one has gotten any cookies yet!_')
else:
# Unknown option error
await ctx.send(f'Unknown command `{options}`, please re-enter your command and try again.')
else:
# Find cookie count for the user
cookie_count = DataAccess.get_cookie_count_by_discord_id(ctx.author.id)
# Figure out proper grammar
if cookie_count == 1:
cookie_word = 'cookie'
else:
cookie_word = 'cookies'
# Give the requesting user's score
await ctx.send(f'{ctx.author.name} has {cookie_count} {cookie_word}.')
@commands.command('forcedrop')
@commands.has_any_role(*ConfiguredCog.config['mod_roles'])
async def force_drop(self, ctx: commands.Context):
"""Forces a cookie to drop ahead of schedule.
:param ctx: The command context.
"""
await self._check_to_send_cookie(True)
@tasks.loop(minutes=1)
async def _check_to_send_cookie(self, force_drop: bool = False):
"""A looping task to check if a cookie needs to be sent. Checks a few parameters such as a randomized time
delay and whether there's already an available cookie to claim. If all the parameters have been met,
picks a random channel from a configured list and drops a cookie into that channel for claiming.
:param force_drop: Overrides any delays and force a cookie to drop immediately.
"""
# If random number isn't set, plan out a new cookie drop
if self.cookie_drop_delay_hours is None:
self._prep_cookie_drop()
# If current timestamp is after the logged timestamp + random number's hours, then drop a cookie in a
# random channel from the list of channels (assuming we can find the channels by name)
time_delta = datetime.now() - self.cookie_prepared_timestamp
if (force_drop or time_delta > timedelta(hours=self.cookie_drop_delay_hours,
minutes=self.cookie_drop_delay_minutes)) \
and not self.cookie_available:
self.logger.debug('Dropping a cookie.')
# Build the cookie drop message
prefix = ConfiguredCog.config['command_prefix']
color = ConfiguredCog.convert_color('#8a4b38')
cookie_drop_embed = Embed(color=color, title=':cookie:', description=f'Here, have a cookie! Use '
f'`{prefix}gimme` to take it!')
# Pick a random channel to send it to
channel = self._pick_random_channel_to_send()
if channel is not None:
self.cookie_available = True
await channel.send(embed=cookie_drop_embed)
else:
self.logger.error('No valid channels were found. Skipping drop.')
def cog_load(self) -> None:
"""Overridden from commands.Cog; starts the automated task."""
self._check_to_send_cookie.start()
def cog_unload(self):
"""Overridden from commands.Cog; stops the automated task."""
self._check_to_send_cookie.cancel()
def _prep_cookie_drop(self):
"""Sets up the class's instance variables for a new cookie drop in the future."""
min_hour = ConfiguredCog.config['content']['cookie_hunt_hour_variance'][0]
max_hour = ConfiguredCog.config['content']['cookie_hunt_hour_variance'][1]
hour_delay = randint(min_hour, max_hour)
minute_delay = randint(0, 59) # Picks a random minute within the hour to drop it
cookie_type = choices(self.cookie_data, self._get_cookie_weights())[0]
self.logger.debug(f'Preparing a cookie drop for about {hour_delay} hours and {minute_delay} minutes from now.'
f'It is a {cookie_type["name"]} cookie.')
self.cookie_available = False
self.cookie_prepared_timestamp = datetime.now()
self.cookie_drop_delay_hours = hour_delay
self.cookie_drop_delay_minutes = minute_delay
self.cookie_type = cookie_type
@staticmethod
def _parse_cookie_data() -> dict:
"""Parses the cookie file out into its corresponding data
:return: The parsed json data from the necessary data file
"""
with open('Data/cookies.json') as cookie_data_file:
cookie_data_dict = json.load(cookie_data_file)
# Cast the necessary data
for cookie_type in cookie_data_dict:
cookie_type['weight'] = float(cookie_type['weight'])
cookie_type['target'] = CookieHuntTarget(cookie_type['target'])
return cookie_data_dict
def _get_cookie_weights(self) -> list:
"""Gets an arbitrarily ordered list of weights mapped to the cookie data dictionary.
:return: A list of weights.
"""
cookie_weights = []
for cookie_type in self.cookie_data:
cookie_weights.append(cookie_type['weight'])
return cookie_weights
def _pick_random_channel_to_send(self) -> Optional[TextChannel]:
"""Takes the preconfigured list of available channels that we can drop a cookie into, and returns a possible
one.
:return: The randomly selected channel to send a cookie to, or None if no valid options were found.
"""
# Shuffle the whole list of all the channels we can access, so that in case we can't find the first channel | # that we randomly picked, we move on to the next one safely.
random_channel_pick_list = sample(ConfiguredCog.config['content']['cookie_hunt_allowed_channels'],
len(ConfiguredCog.config['content']['cookie_hunt_allowed_channels']))
for selected_channel_name in random_channel_pick_list:
for channel in self.bot.get_all_channels():
if channel.name == selected_channel_name and isinstance(channel, TextChannel):
# Found a channel that matches the name in the config, therefore this is the random channel selected
return channel
# No valid channel options, return None
return None
class DiceRollerCog(ConfiguredCog):
"""A class supporting discord dice rolling features"""
@commands.command()
async def roll(self, ctx: commands.context, dice: str):
"""The origin point for the dice roll command.
:param ctx: The command context.
:param dice: The dice roll command to parse.
"""
if dice:
lexer = DiceLexer()
parser = DiceParser()
try:
step_data, result = parser.parse(lexer.tokenize(dice))
except TypeError:
await ctx.send("There was an error with your roll syntax. Please try again.")
return
if result.is_integer():
result = int(result)
color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['dice_result_embed_color'])
title = f'Roll for {ctx.author.name}'
description = f'**Result:**\n' \
f'```\n' \
f'{result}\n' \
f'```\n' \
f'**Steps:**\n' \
f'```\n'
for step in step_data:
description += step + '\n'
description += '```'
embed = Embed(color=color, title=title, description=description)
await ctx.send(embed=embed)
@commands.command()
async def r(self, ctx: commands.context, text: str):
"""An alias for the `roll` method.
:param ctx: The command context.
:param text: The dice roll command to parse.
"""
return await self.roll(ctx, text)
class AutoDrawingPrompt(ConfiguredCog):
"""A class supporting the Drawing Prompt automatic posting functionality"""
def __init__(self, bot: commands.Bot):
"""Initializes the cog and starts the automated task
:param bot: A discord bot instance which will be saved within the class instance.
"""
super().__init__(bot)
self.current_prompt = ''
@commands.Cog.listener()
async def on_ready(self):
"""Cog Listener to automatically run the task on start."""
await self._get_sketch_prompt()
async def cog_load(self):
"""Overridden from commands.Cog; starts the automated task."""
self._get_sketch_prompt.start()
def cog_unload(self):
"""Overridden from commands.Cog; stops the automated task."""
self._get_sketch_prompt.cancel()
@staticmethod
def _get_neat_date(date: datetime) -> str:
"""Takes a datetime object and converts the day and month into a cleanly formatted string.
:param date: The datetime object to convert to a neat string
:return: The formatted month and day in the format `[Month] [Numeric Day][st|nd|rd|th]`
"""
month_selector = ["January", "February", "March", "April", "May", "June", "July", "August", "September",
"October", "November", "December"]
month_string = month_selector[date.month - 1]
day = date.day
if day == 1 or day == 21 or day == 31:
suffix = "st"
elif day == 2 or day == 22:
suffix = "nd"
elif day == 3 or day == 23:
suffix = "rd"
else:
suffix = "th"
neat_date = f"{month_string} {day}{suffix}"
return neat_date
def _get_daily_drawing_prompt(self) -> str:
"""Gets today's drawing prompt from reddit.com/r/SketchDaily, if it exists.
:return: The daily drawing prompt if there is one found for today; or an empty string if none for today was
found.
"""
site = urllib.request.urlopen(
urllib.request.Request("https://reddit.com/r/SketchDaily/new", headers={'User-Agent': 'Mozilla/5.0'}))
site_str = site.read().decode('utf-8')
# search for today's theme on the skd site
now = datetime.now()
neat_today_date = self._get_neat_date(now)
loc = site_str.find(neat_today_date + " - ")
# if we can't find today's theme, return a blank string
if loc == -1:
return ''
# FIND YESTERDAY'S THEME:
# yesterday = datetime.now() - timedelta(days=1)
# neat_today_date = self._get_neat_date(yesterday)
# loc = site_str.find(neat_today_date + " - ")
site_str = site_str[loc:]
site_str = site_str[:site_str.find('"')]
if len(site_str) > 100:
site_str = site_str[:100]
return site_str
@tasks.loop(minutes=30)
async def _get_sketch_prompt(self):
"""A looping task to query the web for today's sketch prompt and announce it in a given discord channel if it
was found. If today's prompt was already announced, or if the prompt for today wasn't found, nothing is
announced in the channel.
"""
drawing_prompt = self._get_daily_drawing_prompt()
if drawing_prompt == '':
# No drawing prompt found for today; don't do anything
return
elif not drawing_prompt == self.current_prompt:
# The prompt we pulled does not match what we found before, so post the new text.
for channel in self.bot.get_all_channels():
if channel.name == ConfiguredCog.config['content']['daily_prompt_channel'] \
and isinstance(channel, TextChannel):
# Build the prompt message
color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['prompt_color'])
title = 'Prompt for today, courtesy of r/SketchDaily'
url = 'https://reddit.com/r/SketchDaily'
description = drawing_prompt
message = Embed(color=color, title=title, url=url, description=description)
# Send the message
await channel.send(embed=message)
# Note down that we found today's prompt (so as not to re-send it)
self.current_prompt = drawing_prompt
break | random_line_split | |
Toys.py | from datetime import datetime, timedelta
import json
import urllib.request
from enum import Enum
from random import randint, sample, choices
from typing import Optional
from discord.ext import commands, tasks
from discord import Embed, TextChannel
from Code.Cogs.Base import ConfiguredCog
from Code.Data import DataAccess
from Code.Base.Parsing import DiceLexer, DiceParser
class CookieHuntSugarOptions(Enum):
"""An enum listing out all the available sugar command options."""
HIGH = 'high'
class CookieHuntTarget(Enum):
"""An enum listing out all the available target options."""
CLAIMER = 'claimer'
LEADER = 'leader'
class CookieHuntCog(ConfiguredCog):
"""A class supporting the "Cookie Hunt" feature, including the `gimme` and `sugar` commands."""
def __init__(self, bot: commands.Bot):
"""Initializes the cog and starts the automated task.
:param bot: A discord bot instance which will be saved within the class instance.
"""
super().__init__(bot)
# Init instance vars
self.cookie_data = self._parse_cookie_data()
self.cookie_available = False
self.cookie_prepared_timestamp = None
self.cookie_drop_delay_hours = None
self.cookie_drop_delay_minutes = None
self.cookie_type = None
@commands.command()
async def gimme(self, ctx: commands.Context):
"""The origin point for the `gimme` command. Claims a cookie for the calling user if one has been dropped, and
resets the points for all if the goal was reached.
:param ctx: The command context.
"""
if not self.cookie_available:
# No cookie available message
await ctx.send('There is no cookie available right now. Sorry!')
return
# Write down the pertinent information for the drop since it's about to get wiped
cookie_type = self.cookie_type
# Mark that we got the cookie so no one else takes it (and prepare the next one)
self._prep_cookie_drop()
# Find the target's ID
if cookie_type['target'] == CookieHuntTarget.CLAIMER:
target_discord_id = ctx.author.id
elif cookie_type['target'] == CookieHuntTarget.LEADER:
target_discord_id = DataAccess.get_top_cookie_collectors(1)[0].Discord_Id
else:
# Invalid target, just assume it's the claimer
target_discord_id = ctx.author.id
# Award points as needed
db_user_id = DataAccess.find_user_id_by_discord_id(target_discord_id)
cookie_count = DataAccess.modify_cookie_count(db_user_id, cookie_type['modifier'])
# check if goal was reached by the claimer
cookie_goal = ConfiguredCog.config['content']['cookie_hunt_goal']
if cookie_count >= cookie_goal:
# announce winner
await ctx.send(f'Oh my, it looks like {ctx.author.name} is the cookie monster!')
# Award the role
winner_role_name = ConfiguredCog.config['content']['cookie_hunt_winner_role']
role = self.find_role_in_guild(winner_role_name, ctx.guild)
if role:
# Remove role from all users
for member in ctx.guild.members:
if role in member.roles:
await member.remove_roles(role, reason='No longer the cookie hunt winner.')
# Give the role to the winner
if not self.member_contains_role(role.name, ctx.author):
await ctx.author.add_roles(role, reason=f'First to grab {cookie_goal} cookies.')
# reset cookie counts
DataAccess.reset_all_cookies()
else:
# Figure out proper grammar
if cookie_count == 1:
cookie_grammar_word = 'cookie'
else:
cookie_grammar_word = 'cookies'
# Send a message saying they got the cookie
if cookie_type['target'] == CookieHuntTarget.CLAIMER:
await ctx.send(f'{ctx.author.name} got a {cookie_type["name"]} cookie! '
f'They now have {cookie_count} {cookie_grammar_word}.')
else:
target_user = self.bot.get_user(int(target_discord_id))
if target_user:
target_user_name = target_user.name
else:
target_user_name = f'Unknown ({target_discord_id})'
await ctx.send(f'{ctx.author.name} got a {cookie_type["name"]} cookie! '
f'The leader, {target_user_name}, now has {cookie_count} {cookie_grammar_word}.')
@commands.command()
async def sugar(self, ctx: commands.Context, options: str = None):
"""The origin point for the `sugar` command. Shows relevant cookie count scores based on the options provided.
:param ctx: The command context.
:param options: The (optional) parameters for the sugar command, as enumerated by the
`CookieHuntSugarOptions` enumeration.
"""
if options is not None:
if options.lower() == CookieHuntSugarOptions.HIGH.value:
# Get the high scores
top_collectors = DataAccess.get_top_cookie_collectors(3)
# convert IDs to nicknames and display them
collectors_displayed = False
embed = None
for Discord_Id, Cookie_Count in top_collectors:
if not collectors_displayed:
# Only build the embed the first time through the loop
|
discord_user = self.bot.get_user(int(Discord_Id))
if discord_user:
user_name = discord_user.name
else:
user_name = f'Unknown ({Discord_Id})'
user_name = f'{user_name}:'
# Add field
embed.add_field(name=user_name, value=Cookie_Count, inline=False)
if collectors_displayed:
# We found collectors to display
await ctx.send(embed=embed)
else:
# Our query returned no results
await ctx.send('_No one has gotten any cookies yet!_')
else:
# Unknown option error
await ctx.send(f'Unknown command `{options}`, please re-enter your command and try again.')
else:
# Find cookie count for the user
cookie_count = DataAccess.get_cookie_count_by_discord_id(ctx.author.id)
# Figure out proper grammar
if cookie_count == 1:
cookie_word = 'cookie'
else:
cookie_word = 'cookies'
# Give the requesting user's score
await ctx.send(f'{ctx.author.name} has {cookie_count} {cookie_word}.')
@commands.command('forcedrop')
@commands.has_any_role(*ConfiguredCog.config['mod_roles'])
async def force_drop(self, ctx: commands.Context):
"""Forces a cookie to drop ahead of schedule.
:param ctx: The command context.
"""
await self._check_to_send_cookie(True)
@tasks.loop(minutes=1)
async def _check_to_send_cookie(self, force_drop: bool = False):
"""A looping task to check if a cookie needs to be sent. Checks a few parameters such as a randomized time
delay and whether there's already an available cookie to claim. If all the parameters have been met,
picks a random channel from a configured list and drops a cookie into that channel for claiming.
:param force_drop: Overrides any delays and force a cookie to drop immediately.
"""
# If random number isn't set, plan out a new cookie drop
if self.cookie_drop_delay_hours is None:
self._prep_cookie_drop()
# If current timestamp is after the logged timestamp + random number's hours, then drop a cookie in a
# random channel from the list of channels (assuming we can find the channels by name)
time_delta = datetime.now() - self.cookie_prepared_timestamp
if (force_drop or time_delta > timedelta(hours=self.cookie_drop_delay_hours,
minutes=self.cookie_drop_delay_minutes)) \
and not self.cookie_available:
self.logger.debug('Dropping a cookie.')
# Build the cookie drop message
prefix = ConfiguredCog.config['command_prefix']
color = ConfiguredCog.convert_color('#8a4b38')
cookie_drop_embed = Embed(color=color, title=':cookie:', description=f'Here, have a cookie! Use '
f'`{prefix}gimme` to take it!')
# Pick a random channel to send it to
channel = self._pick_random_channel_to_send()
if channel is not None:
self.cookie_available = True
await channel.send(embed=cookie_drop_embed)
else:
self.logger.error('No valid channels were found. Skipping drop.')
def cog_load(self) -> None:
"""Overridden from commands.Cog; starts the automated task."""
self._check_to_send_cookie.start()
def cog_unload(self):
"""Overridden from commands.Cog; stops the automated task."""
self._check_to_send_cookie.cancel()
def _prep_cookie_drop(self):
"""Sets up the class's instance variables for a new cookie drop in the future."""
min_hour = ConfiguredCog.config['content']['cookie_hunt_hour_variance'][0]
max_hour = ConfiguredCog.config['content']['cookie_hunt_hour_variance'][1]
hour_delay = randint(min_hour, max_hour)
minute_delay = randint(0, 59) # Picks a random minute within the hour to drop it
cookie_type = choices(self.cookie_data, self._get_cookie_weights())[0]
self.logger.debug(f'Preparing a cookie drop for about {hour_delay} hours and {minute_delay} minutes from now.'
f'It is a {cookie_type["name"]} cookie.')
self.cookie_available = False
self.cookie_prepared_timestamp = datetime.now()
self.cookie_drop_delay_hours = hour_delay
self.cookie_drop_delay_minutes = minute_delay
self.cookie_type = cookie_type
@staticmethod
def _parse_cookie_data() -> dict:
"""Parses the cookie file out into its corresponding data
:return: The parsed json data from the necessary data file
"""
with open('Data/cookies.json') as cookie_data_file:
cookie_data_dict = json.load(cookie_data_file)
# Cast the necessary data
for cookie_type in cookie_data_dict:
cookie_type['weight'] = float(cookie_type['weight'])
cookie_type['target'] = CookieHuntTarget(cookie_type['target'])
return cookie_data_dict
def _get_cookie_weights(self) -> list:
"""Gets an arbitrarily ordered list of weights mapped to the cookie data dictionary.
:return: A list of weights.
"""
cookie_weights = []
for cookie_type in self.cookie_data:
cookie_weights.append(cookie_type['weight'])
return cookie_weights
def _pick_random_channel_to_send(self) -> Optional[TextChannel]:
"""Takes the preconfigured list of available channels that we can drop a cookie into, and returns a possible
one.
:return: The randomly selected channel to send a cookie to, or None if no valid options were found.
"""
# Shuffle the whole list of all the channels we can access, so that in case we can't find the first channel
# that we randomly picked, we move on to the next one safely.
random_channel_pick_list = sample(ConfiguredCog.config['content']['cookie_hunt_allowed_channels'],
len(ConfiguredCog.config['content']['cookie_hunt_allowed_channels']))
for selected_channel_name in random_channel_pick_list:
for channel in self.bot.get_all_channels():
if channel.name == selected_channel_name and isinstance(channel, TextChannel):
# Found a channel that matches the name in the config, therefore this is the random channel selected
return channel
# No valid channel options, return None
return None
class DiceRollerCog(ConfiguredCog):
"""A class supporting discord dice rolling features"""
@commands.command()
async def roll(self, ctx: commands.context, dice: str):
"""The origin point for the dice roll command.
:param ctx: The command context.
:param dice: The dice roll command to parse.
"""
if dice:
lexer = DiceLexer()
parser = DiceParser()
try:
step_data, result = parser.parse(lexer.tokenize(dice))
except TypeError:
await ctx.send("There was an error with your roll syntax. Please try again.")
return
if result.is_integer():
result = int(result)
color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['dice_result_embed_color'])
title = f'Roll for {ctx.author.name}'
description = f'**Result:**\n' \
f'```\n' \
f'{result}\n' \
f'```\n' \
f'**Steps:**\n' \
f'```\n'
for step in step_data:
description += step + '\n'
description += '```'
embed = Embed(color=color, title=title, description=description)
await ctx.send(embed=embed)
@commands.command()
async def r(self, ctx: commands.context, text: str):
"""An alias for the `roll` method.
:param ctx: The command context.
:param text: The dice roll command to parse.
"""
return await self.roll(ctx, text)
class AutoDrawingPrompt(ConfiguredCog):
"""A class supporting the Drawing Prompt automatic posting functionality"""
def __init__(self, bot: commands.Bot):
"""Initializes the cog and starts the automated task
:param bot: A discord bot instance which will be saved within the class instance.
"""
super().__init__(bot)
self.current_prompt = ''
@commands.Cog.listener()
async def on_ready(self):
"""Cog Listener to automatically run the task on start."""
await self._get_sketch_prompt()
async def cog_load(self):
"""Overridden from commands.Cog; starts the automated task."""
self._get_sketch_prompt.start()
def cog_unload(self):
"""Overridden from commands.Cog; stops the automated task."""
self._get_sketch_prompt.cancel()
@staticmethod
def _get_neat_date(date: datetime) -> str:
"""Takes a datetime object and converts the day and month into a cleanly formatted string.
:param date: The datetime object to convert to a neat string
:return: The formatted month and day in the format `[Month] [Numeric Day][st|nd|rd|th]`
"""
month_selector = ["January", "February", "March", "April", "May", "June", "July", "August", "September",
"October", "November", "December"]
month_string = month_selector[date.month - 1]
day = date.day
if day == 1 or day == 21 or day == 31:
suffix = "st"
elif day == 2 or day == 22:
suffix = "nd"
elif day == 3 or day == 23:
suffix = "rd"
else:
suffix = "th"
neat_date = f"{month_string} {day}{suffix}"
return neat_date
def _get_daily_drawing_prompt(self) -> str:
"""Gets today's drawing prompt from reddit.com/r/SketchDaily, if it exists.
:return: The daily drawing prompt if there is one found for today; or an empty string if none for today was
found.
"""
site = urllib.request.urlopen(
urllib.request.Request("https://reddit.com/r/SketchDaily/new", headers={'User-Agent': 'Mozilla/5.0'}))
site_str = site.read().decode('utf-8')
# search for today's theme on the skd site
now = datetime.now()
neat_today_date = self._get_neat_date(now)
loc = site_str.find(neat_today_date + " - ")
# if we can't find today's theme, return a blank string
if loc == -1:
return ''
# FIND YESTERDAY'S THEME:
# yesterday = datetime.now() - timedelta(days=1)
# neat_today_date = self._get_neat_date(yesterday)
# loc = site_str.find(neat_today_date + " - ")
site_str = site_str[loc:]
site_str = site_str[:site_str.find('"')]
if len(site_str) > 100:
site_str = site_str[:100]
return site_str
@tasks.loop(minutes=30)
async def _get_sketch_prompt(self):
"""A looping task to query the web for today's sketch prompt and announce it in a given discord channel if it
was found. If today's prompt was already announced, or if the prompt for today wasn't found, nothing is
announced in the channel.
"""
drawing_prompt = self._get_daily_drawing_prompt()
if drawing_prompt == '':
# No drawing prompt found for today; don't do anything
return
elif not drawing_prompt == self.current_prompt:
# The prompt we pulled does not match what we found before, so post the new text.
for channel in self.bot.get_all_channels():
if channel.name == ConfiguredCog.config['content']['daily_prompt_channel'] \
and isinstance(channel, TextChannel):
# Build the prompt message
color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['prompt_color'])
title = 'Prompt for today, courtesy of r/SketchDaily'
url = 'https://reddit.com/r/SketchDaily'
description = drawing_prompt
message = Embed(color=color, title=title, url=url, description=description)
# Send the message
await channel.send(embed=message)
# Note down that we found today's prompt (so as not to re-send it)
self.current_prompt = drawing_prompt
break
| embed = Embed(title='Top Cookie Collectors',
color=ConfiguredCog.convert_color('#8a4b38'))
collectors_displayed = True | conditional_block |
Toys.py | from datetime import datetime, timedelta
import json
import urllib.request
from enum import Enum
from random import randint, sample, choices
from typing import Optional
from discord.ext import commands, tasks
from discord import Embed, TextChannel
from Code.Cogs.Base import ConfiguredCog
from Code.Data import DataAccess
from Code.Base.Parsing import DiceLexer, DiceParser
class CookieHuntSugarOptions(Enum):
"""An enum listing out all the available sugar command options."""
HIGH = 'high'
class CookieHuntTarget(Enum):
"""An enum listing out all the available target options."""
CLAIMER = 'claimer'
LEADER = 'leader'
class CookieHuntCog(ConfiguredCog):
"""A class supporting the "Cookie Hunt" feature, including the `gimme` and `sugar` commands."""
def __init__(self, bot: commands.Bot):
"""Initializes the cog and starts the automated task.
:param bot: A discord bot instance which will be saved within the class instance.
"""
super().__init__(bot)
# Init instance vars
self.cookie_data = self._parse_cookie_data()
self.cookie_available = False
self.cookie_prepared_timestamp = None
self.cookie_drop_delay_hours = None
self.cookie_drop_delay_minutes = None
self.cookie_type = None
@commands.command()
async def gimme(self, ctx: commands.Context):
"""The origin point for the `gimme` command. Claims a cookie for the calling user if one has been dropped, and
resets the points for all if the goal was reached.
:param ctx: The command context.
"""
if not self.cookie_available:
# No cookie available message
await ctx.send('There is no cookie available right now. Sorry!')
return
# Write down the pertinent information for the drop since it's about to get wiped
cookie_type = self.cookie_type
# Mark that we got the cookie so no one else takes it (and prepare the next one)
self._prep_cookie_drop()
# Find the target's ID
if cookie_type['target'] == CookieHuntTarget.CLAIMER:
target_discord_id = ctx.author.id
elif cookie_type['target'] == CookieHuntTarget.LEADER:
target_discord_id = DataAccess.get_top_cookie_collectors(1)[0].Discord_Id
else:
# Invalid target, just assume it's the claimer
target_discord_id = ctx.author.id
# Award points as needed
db_user_id = DataAccess.find_user_id_by_discord_id(target_discord_id)
cookie_count = DataAccess.modify_cookie_count(db_user_id, cookie_type['modifier'])
# check if goal was reached by the claimer
cookie_goal = ConfiguredCog.config['content']['cookie_hunt_goal']
if cookie_count >= cookie_goal:
# announce winner
await ctx.send(f'Oh my, it looks like {ctx.author.name} is the cookie monster!')
# Award the role
winner_role_name = ConfiguredCog.config['content']['cookie_hunt_winner_role']
role = self.find_role_in_guild(winner_role_name, ctx.guild)
if role:
# Remove role from all users
for member in ctx.guild.members:
if role in member.roles:
await member.remove_roles(role, reason='No longer the cookie hunt winner.')
# Give the role to the winner
if not self.member_contains_role(role.name, ctx.author):
await ctx.author.add_roles(role, reason=f'First to grab {cookie_goal} cookies.')
# reset cookie counts
DataAccess.reset_all_cookies()
else:
# Figure out proper grammar
if cookie_count == 1:
cookie_grammar_word = 'cookie'
else:
cookie_grammar_word = 'cookies'
# Send a message saying they got the cookie
if cookie_type['target'] == CookieHuntTarget.CLAIMER:
await ctx.send(f'{ctx.author.name} got a {cookie_type["name"]} cookie! '
f'They now have {cookie_count} {cookie_grammar_word}.')
else:
target_user = self.bot.get_user(int(target_discord_id))
if target_user:
target_user_name = target_user.name
else:
target_user_name = f'Unknown ({target_discord_id})'
await ctx.send(f'{ctx.author.name} got a {cookie_type["name"]} cookie! '
f'The leader, {target_user_name}, now has {cookie_count} {cookie_grammar_word}.')
@commands.command()
async def sugar(self, ctx: commands.Context, options: str = None):
"""The origin point for the `sugar` command. Shows relevant cookie count scores based on the options provided.
:param ctx: The command context.
:param options: The (optional) parameters for the sugar command, as enumerated by the
`CookieHuntSugarOptions` enumeration.
"""
if options is not None:
if options.lower() == CookieHuntSugarOptions.HIGH.value:
# Get the high scores
top_collectors = DataAccess.get_top_cookie_collectors(3)
# convert IDs to nicknames and display them
collectors_displayed = False
embed = None
for Discord_Id, Cookie_Count in top_collectors:
if not collectors_displayed:
# Only build the embed the first time through the loop
embed = Embed(title='Top Cookie Collectors',
color=ConfiguredCog.convert_color('#8a4b38'))
collectors_displayed = True
discord_user = self.bot.get_user(int(Discord_Id))
if discord_user:
user_name = discord_user.name
else:
user_name = f'Unknown ({Discord_Id})'
user_name = f'{user_name}:'
# Add field
embed.add_field(name=user_name, value=Cookie_Count, inline=False)
if collectors_displayed:
# We found collectors to display
await ctx.send(embed=embed)
else:
# Our query returned no results
await ctx.send('_No one has gotten any cookies yet!_')
else:
# Unknown option error
await ctx.send(f'Unknown command `{options}`, please re-enter your command and try again.')
else:
# Find cookie count for the user
cookie_count = DataAccess.get_cookie_count_by_discord_id(ctx.author.id)
# Figure out proper grammar
if cookie_count == 1:
cookie_word = 'cookie'
else:
cookie_word = 'cookies'
# Give the requesting user's score
await ctx.send(f'{ctx.author.name} has {cookie_count} {cookie_word}.')
@commands.command('forcedrop')
@commands.has_any_role(*ConfiguredCog.config['mod_roles'])
async def force_drop(self, ctx: commands.Context):
"""Forces a cookie to drop ahead of schedule.
:param ctx: The command context.
"""
await self._check_to_send_cookie(True)
@tasks.loop(minutes=1)
async def _check_to_send_cookie(self, force_drop: bool = False):
"""A looping task to check if a cookie needs to be sent. Checks a few parameters such as a randomized time
delay and whether there's already an available cookie to claim. If all the parameters have been met,
picks a random channel from a configured list and drops a cookie into that channel for claiming.
:param force_drop: Overrides any delays and force a cookie to drop immediately.
"""
# If random number isn't set, plan out a new cookie drop
if self.cookie_drop_delay_hours is None:
self._prep_cookie_drop()
# If current timestamp is after the logged timestamp + random number's hours, then drop a cookie in a
# random channel from the list of channels (assuming we can find the channels by name)
time_delta = datetime.now() - self.cookie_prepared_timestamp
if (force_drop or time_delta > timedelta(hours=self.cookie_drop_delay_hours,
minutes=self.cookie_drop_delay_minutes)) \
and not self.cookie_available:
self.logger.debug('Dropping a cookie.')
# Build the cookie drop message
prefix = ConfiguredCog.config['command_prefix']
color = ConfiguredCog.convert_color('#8a4b38')
cookie_drop_embed = Embed(color=color, title=':cookie:', description=f'Here, have a cookie! Use '
f'`{prefix}gimme` to take it!')
# Pick a random channel to send it to
channel = self._pick_random_channel_to_send()
if channel is not None:
self.cookie_available = True
await channel.send(embed=cookie_drop_embed)
else:
self.logger.error('No valid channels were found. Skipping drop.')
def cog_load(self) -> None:
"""Overridden from commands.Cog; starts the automated task."""
self._check_to_send_cookie.start()
def cog_unload(self):
"""Overridden from commands.Cog; stops the automated task."""
self._check_to_send_cookie.cancel()
def _prep_cookie_drop(self):
"""Sets up the class's instance variables for a new cookie drop in the future."""
min_hour = ConfiguredCog.config['content']['cookie_hunt_hour_variance'][0]
max_hour = ConfiguredCog.config['content']['cookie_hunt_hour_variance'][1]
hour_delay = randint(min_hour, max_hour)
minute_delay = randint(0, 59) # Picks a random minute within the hour to drop it
cookie_type = choices(self.cookie_data, self._get_cookie_weights())[0]
self.logger.debug(f'Preparing a cookie drop for about {hour_delay} hours and {minute_delay} minutes from now.'
f'It is a {cookie_type["name"]} cookie.')
self.cookie_available = False
self.cookie_prepared_timestamp = datetime.now()
self.cookie_drop_delay_hours = hour_delay
self.cookie_drop_delay_minutes = minute_delay
self.cookie_type = cookie_type
@staticmethod
def _parse_cookie_data() -> dict:
"""Parses the cookie file out into its corresponding data
:return: The parsed json data from the necessary data file
"""
with open('Data/cookies.json') as cookie_data_file:
cookie_data_dict = json.load(cookie_data_file)
# Cast the necessary data
for cookie_type in cookie_data_dict:
cookie_type['weight'] = float(cookie_type['weight'])
cookie_type['target'] = CookieHuntTarget(cookie_type['target'])
return cookie_data_dict
def _get_cookie_weights(self) -> list:
"""Gets an arbitrarily ordered list of weights mapped to the cookie data dictionary.
:return: A list of weights.
"""
cookie_weights = []
for cookie_type in self.cookie_data:
cookie_weights.append(cookie_type['weight'])
return cookie_weights
def _pick_random_channel_to_send(self) -> Optional[TextChannel]:
"""Takes the preconfigured list of available channels that we can drop a cookie into, and returns a possible
one.
:return: The randomly selected channel to send a cookie to, or None if no valid options were found.
"""
# Shuffle the whole list of all the channels we can access, so that in case we can't find the first channel
# that we randomly picked, we move on to the next one safely.
random_channel_pick_list = sample(ConfiguredCog.config['content']['cookie_hunt_allowed_channels'],
len(ConfiguredCog.config['content']['cookie_hunt_allowed_channels']))
for selected_channel_name in random_channel_pick_list:
for channel in self.bot.get_all_channels():
if channel.name == selected_channel_name and isinstance(channel, TextChannel):
# Found a channel that matches the name in the config, therefore this is the random channel selected
return channel
# No valid channel options, return None
return None
class DiceRollerCog(ConfiguredCog):
"""A class supporting discord dice rolling features"""
@commands.command()
async def roll(self, ctx: commands.context, dice: str):
"""The origin point for the dice roll command.
:param ctx: The command context.
:param dice: The dice roll command to parse.
"""
if dice:
lexer = DiceLexer()
parser = DiceParser()
try:
step_data, result = parser.parse(lexer.tokenize(dice))
except TypeError:
await ctx.send("There was an error with your roll syntax. Please try again.")
return
if result.is_integer():
result = int(result)
color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['dice_result_embed_color'])
title = f'Roll for {ctx.author.name}'
description = f'**Result:**\n' \
f'```\n' \
f'{result}\n' \
f'```\n' \
f'**Steps:**\n' \
f'```\n'
for step in step_data:
description += step + '\n'
description += '```'
embed = Embed(color=color, title=title, description=description)
await ctx.send(embed=embed)
@commands.command()
async def r(self, ctx: commands.context, text: str):
"""An alias for the `roll` method.
:param ctx: The command context.
:param text: The dice roll command to parse.
"""
return await self.roll(ctx, text)
class AutoDrawingPrompt(ConfiguredCog):
"""A class supporting the Drawing Prompt automatic posting functionality"""
def __init__(self, bot: commands.Bot):
"""Initializes the cog and starts the automated task
:param bot: A discord bot instance which will be saved within the class instance.
"""
super().__init__(bot)
self.current_prompt = ''
@commands.Cog.listener()
async def on_ready(self):
"""Cog Listener to automatically run the task on start."""
await self._get_sketch_prompt()
async def cog_load(self):
|
def cog_unload(self):
"""Overridden from commands.Cog; stops the automated task."""
self._get_sketch_prompt.cancel()
@staticmethod
def _get_neat_date(date: datetime) -> str:
"""Takes a datetime object and converts the day and month into a cleanly formatted string.
:param date: The datetime object to convert to a neat string
:return: The formatted month and day in the format `[Month] [Numeric Day][st|nd|rd|th]`
"""
month_selector = ["January", "February", "March", "April", "May", "June", "July", "August", "September",
"October", "November", "December"]
month_string = month_selector[date.month - 1]
day = date.day
if day == 1 or day == 21 or day == 31:
suffix = "st"
elif day == 2 or day == 22:
suffix = "nd"
elif day == 3 or day == 23:
suffix = "rd"
else:
suffix = "th"
neat_date = f"{month_string} {day}{suffix}"
return neat_date
def _get_daily_drawing_prompt(self) -> str:
"""Gets today's drawing prompt from reddit.com/r/SketchDaily, if it exists.
:return: The daily drawing prompt if there is one found for today; or an empty string if none for today was
found.
"""
site = urllib.request.urlopen(
urllib.request.Request("https://reddit.com/r/SketchDaily/new", headers={'User-Agent': 'Mozilla/5.0'}))
site_str = site.read().decode('utf-8')
# search for today's theme on the skd site
now = datetime.now()
neat_today_date = self._get_neat_date(now)
loc = site_str.find(neat_today_date + " - ")
# if we can't find today's theme, return a blank string
if loc == -1:
return ''
# FIND YESTERDAY'S THEME:
# yesterday = datetime.now() - timedelta(days=1)
# neat_today_date = self._get_neat_date(yesterday)
# loc = site_str.find(neat_today_date + " - ")
site_str = site_str[loc:]
site_str = site_str[:site_str.find('"')]
if len(site_str) > 100:
site_str = site_str[:100]
return site_str
@tasks.loop(minutes=30)
async def _get_sketch_prompt(self):
"""A looping task to query the web for today's sketch prompt and announce it in a given discord channel if it
was found. If today's prompt was already announced, or if the prompt for today wasn't found, nothing is
announced in the channel.
"""
drawing_prompt = self._get_daily_drawing_prompt()
if drawing_prompt == '':
# No drawing prompt found for today; don't do anything
return
elif not drawing_prompt == self.current_prompt:
# The prompt we pulled does not match what we found before, so post the new text.
for channel in self.bot.get_all_channels():
if channel.name == ConfiguredCog.config['content']['daily_prompt_channel'] \
and isinstance(channel, TextChannel):
# Build the prompt message
color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['prompt_color'])
title = 'Prompt for today, courtesy of r/SketchDaily'
url = 'https://reddit.com/r/SketchDaily'
description = drawing_prompt
message = Embed(color=color, title=title, url=url, description=description)
# Send the message
await channel.send(embed=message)
# Note down that we found today's prompt (so as not to re-send it)
self.current_prompt = drawing_prompt
break
| """Overridden from commands.Cog; starts the automated task."""
self._get_sketch_prompt.start() | identifier_body |
Toys.py | from datetime import datetime, timedelta
import json
import urllib.request
from enum import Enum
from random import randint, sample, choices
from typing import Optional
from discord.ext import commands, tasks
from discord import Embed, TextChannel
from Code.Cogs.Base import ConfiguredCog
from Code.Data import DataAccess
from Code.Base.Parsing import DiceLexer, DiceParser
class CookieHuntSugarOptions(Enum):
"""An enum listing out all the available sugar command options."""
HIGH = 'high'
class CookieHuntTarget(Enum):
"""An enum listing out all the available target options."""
CLAIMER = 'claimer'
LEADER = 'leader'
class CookieHuntCog(ConfiguredCog):
"""A class supporting the "Cookie Hunt" feature, including the `gimme` and `sugar` commands."""
def __init__(self, bot: commands.Bot):
"""Initializes the cog and starts the automated task.
:param bot: A discord bot instance which will be saved within the class instance.
"""
super().__init__(bot)
# Init instance vars
self.cookie_data = self._parse_cookie_data()
self.cookie_available = False
self.cookie_prepared_timestamp = None
self.cookie_drop_delay_hours = None
self.cookie_drop_delay_minutes = None
self.cookie_type = None
@commands.command()
async def gimme(self, ctx: commands.Context):
"""The origin point for the `gimme` command. Claims a cookie for the calling user if one has been dropped, and
resets the points for all if the goal was reached.
:param ctx: The command context.
"""
if not self.cookie_available:
# No cookie available message
await ctx.send('There is no cookie available right now. Sorry!')
return
# Write down the pertinent information for the drop since it's about to get wiped
cookie_type = self.cookie_type
# Mark that we got the cookie so no one else takes it (and prepare the next one)
self._prep_cookie_drop()
# Find the target's ID
if cookie_type['target'] == CookieHuntTarget.CLAIMER:
target_discord_id = ctx.author.id
elif cookie_type['target'] == CookieHuntTarget.LEADER:
target_discord_id = DataAccess.get_top_cookie_collectors(1)[0].Discord_Id
else:
# Invalid target, just assume it's the claimer
target_discord_id = ctx.author.id
# Award points as needed
db_user_id = DataAccess.find_user_id_by_discord_id(target_discord_id)
cookie_count = DataAccess.modify_cookie_count(db_user_id, cookie_type['modifier'])
# check if goal was reached by the claimer
cookie_goal = ConfiguredCog.config['content']['cookie_hunt_goal']
if cookie_count >= cookie_goal:
# announce winner
await ctx.send(f'Oh my, it looks like {ctx.author.name} is the cookie monster!')
# Award the role
winner_role_name = ConfiguredCog.config['content']['cookie_hunt_winner_role']
role = self.find_role_in_guild(winner_role_name, ctx.guild)
if role:
# Remove role from all users
for member in ctx.guild.members:
if role in member.roles:
await member.remove_roles(role, reason='No longer the cookie hunt winner.')
# Give the role to the winner
if not self.member_contains_role(role.name, ctx.author):
await ctx.author.add_roles(role, reason=f'First to grab {cookie_goal} cookies.')
# reset cookie counts
DataAccess.reset_all_cookies()
else:
# Figure out proper grammar
if cookie_count == 1:
cookie_grammar_word = 'cookie'
else:
cookie_grammar_word = 'cookies'
# Send a message saying they got the cookie
if cookie_type['target'] == CookieHuntTarget.CLAIMER:
await ctx.send(f'{ctx.author.name} got a {cookie_type["name"]} cookie! '
f'They now have {cookie_count} {cookie_grammar_word}.')
else:
target_user = self.bot.get_user(int(target_discord_id))
if target_user:
target_user_name = target_user.name
else:
target_user_name = f'Unknown ({target_discord_id})'
await ctx.send(f'{ctx.author.name} got a {cookie_type["name"]} cookie! '
f'The leader, {target_user_name}, now has {cookie_count} {cookie_grammar_word}.')
@commands.command()
async def sugar(self, ctx: commands.Context, options: str = None):
"""The origin point for the `sugar` command. Shows relevant cookie count scores based on the options provided.
:param ctx: The command context.
:param options: The (optional) parameters for the sugar command, as enumerated by the
`CookieHuntSugarOptions` enumeration.
"""
if options is not None:
if options.lower() == CookieHuntSugarOptions.HIGH.value:
# Get the high scores
top_collectors = DataAccess.get_top_cookie_collectors(3)
# convert IDs to nicknames and display them
collectors_displayed = False
embed = None
for Discord_Id, Cookie_Count in top_collectors:
if not collectors_displayed:
# Only build the embed the first time through the loop
embed = Embed(title='Top Cookie Collectors',
color=ConfiguredCog.convert_color('#8a4b38'))
collectors_displayed = True
discord_user = self.bot.get_user(int(Discord_Id))
if discord_user:
user_name = discord_user.name
else:
user_name = f'Unknown ({Discord_Id})'
user_name = f'{user_name}:'
# Add field
embed.add_field(name=user_name, value=Cookie_Count, inline=False)
if collectors_displayed:
# We found collectors to display
await ctx.send(embed=embed)
else:
# Our query returned no results
await ctx.send('_No one has gotten any cookies yet!_')
else:
# Unknown option error
await ctx.send(f'Unknown command `{options}`, please re-enter your command and try again.')
else:
# Find cookie count for the user
cookie_count = DataAccess.get_cookie_count_by_discord_id(ctx.author.id)
# Figure out proper grammar
if cookie_count == 1:
cookie_word = 'cookie'
else:
cookie_word = 'cookies'
# Give the requesting user's score
await ctx.send(f'{ctx.author.name} has {cookie_count} {cookie_word}.')
@commands.command('forcedrop')
@commands.has_any_role(*ConfiguredCog.config['mod_roles'])
async def force_drop(self, ctx: commands.Context):
"""Forces a cookie to drop ahead of schedule.
:param ctx: The command context.
"""
await self._check_to_send_cookie(True)
@tasks.loop(minutes=1)
async def _check_to_send_cookie(self, force_drop: bool = False):
"""A looping task to check if a cookie needs to be sent. Checks a few parameters such as a randomized time
delay and whether there's already an available cookie to claim. If all the parameters have been met,
picks a random channel from a configured list and drops a cookie into that channel for claiming.
:param force_drop: Overrides any delays and force a cookie to drop immediately.
"""
# If random number isn't set, plan out a new cookie drop
if self.cookie_drop_delay_hours is None:
self._prep_cookie_drop()
# If current timestamp is after the logged timestamp + random number's hours, then drop a cookie in a
# random channel from the list of channels (assuming we can find the channels by name)
time_delta = datetime.now() - self.cookie_prepared_timestamp
if (force_drop or time_delta > timedelta(hours=self.cookie_drop_delay_hours,
minutes=self.cookie_drop_delay_minutes)) \
and not self.cookie_available:
self.logger.debug('Dropping a cookie.')
# Build the cookie drop message
prefix = ConfiguredCog.config['command_prefix']
color = ConfiguredCog.convert_color('#8a4b38')
cookie_drop_embed = Embed(color=color, title=':cookie:', description=f'Here, have a cookie! Use '
f'`{prefix}gimme` to take it!')
# Pick a random channel to send it to
channel = self._pick_random_channel_to_send()
if channel is not None:
self.cookie_available = True
await channel.send(embed=cookie_drop_embed)
else:
self.logger.error('No valid channels were found. Skipping drop.')
def cog_load(self) -> None:
"""Overridden from commands.Cog; starts the automated task."""
self._check_to_send_cookie.start()
def cog_unload(self):
"""Overridden from commands.Cog; stops the automated task."""
self._check_to_send_cookie.cancel()
def _prep_cookie_drop(self):
"""Sets up the class's instance variables for a new cookie drop in the future."""
min_hour = ConfiguredCog.config['content']['cookie_hunt_hour_variance'][0]
max_hour = ConfiguredCog.config['content']['cookie_hunt_hour_variance'][1]
hour_delay = randint(min_hour, max_hour)
minute_delay = randint(0, 59) # Picks a random minute within the hour to drop it
cookie_type = choices(self.cookie_data, self._get_cookie_weights())[0]
self.logger.debug(f'Preparing a cookie drop for about {hour_delay} hours and {minute_delay} minutes from now.'
f'It is a {cookie_type["name"]} cookie.')
self.cookie_available = False
self.cookie_prepared_timestamp = datetime.now()
self.cookie_drop_delay_hours = hour_delay
self.cookie_drop_delay_minutes = minute_delay
self.cookie_type = cookie_type
@staticmethod
def _parse_cookie_data() -> dict:
"""Parses the cookie file out into its corresponding data
:return: The parsed json data from the necessary data file
"""
with open('Data/cookies.json') as cookie_data_file:
cookie_data_dict = json.load(cookie_data_file)
# Cast the necessary data
for cookie_type in cookie_data_dict:
cookie_type['weight'] = float(cookie_type['weight'])
cookie_type['target'] = CookieHuntTarget(cookie_type['target'])
return cookie_data_dict
def | (self) -> list:
"""Gets an arbitrarily ordered list of weights mapped to the cookie data dictionary.
:return: A list of weights.
"""
cookie_weights = []
for cookie_type in self.cookie_data:
cookie_weights.append(cookie_type['weight'])
return cookie_weights
def _pick_random_channel_to_send(self) -> Optional[TextChannel]:
"""Takes the preconfigured list of available channels that we can drop a cookie into, and returns a possible
one.
:return: The randomly selected channel to send a cookie to, or None if no valid options were found.
"""
# Shuffle the whole list of all the channels we can access, so that in case we can't find the first channel
# that we randomly picked, we move on to the next one safely.
random_channel_pick_list = sample(ConfiguredCog.config['content']['cookie_hunt_allowed_channels'],
len(ConfiguredCog.config['content']['cookie_hunt_allowed_channels']))
for selected_channel_name in random_channel_pick_list:
for channel in self.bot.get_all_channels():
if channel.name == selected_channel_name and isinstance(channel, TextChannel):
# Found a channel that matches the name in the config, therefore this is the random channel selected
return channel
# No valid channel options, return None
return None
class DiceRollerCog(ConfiguredCog):
"""A class supporting discord dice rolling features"""
@commands.command()
async def roll(self, ctx: commands.context, dice: str):
"""The origin point for the dice roll command.
:param ctx: The command context.
:param dice: The dice roll command to parse.
"""
if dice:
lexer = DiceLexer()
parser = DiceParser()
try:
step_data, result = parser.parse(lexer.tokenize(dice))
except TypeError:
await ctx.send("There was an error with your roll syntax. Please try again.")
return
if result.is_integer():
result = int(result)
color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['dice_result_embed_color'])
title = f'Roll for {ctx.author.name}'
description = f'**Result:**\n' \
f'```\n' \
f'{result}\n' \
f'```\n' \
f'**Steps:**\n' \
f'```\n'
for step in step_data:
description += step + '\n'
description += '```'
embed = Embed(color=color, title=title, description=description)
await ctx.send(embed=embed)
@commands.command()
async def r(self, ctx: commands.context, text: str):
"""An alias for the `roll` method.
:param ctx: The command context.
:param text: The dice roll command to parse.
"""
return await self.roll(ctx, text)
class AutoDrawingPrompt(ConfiguredCog):
"""A class supporting the Drawing Prompt automatic posting functionality"""
def __init__(self, bot: commands.Bot):
"""Initializes the cog and starts the automated task
:param bot: A discord bot instance which will be saved within the class instance.
"""
super().__init__(bot)
self.current_prompt = ''
@commands.Cog.listener()
async def on_ready(self):
"""Cog Listener to automatically run the task on start."""
await self._get_sketch_prompt()
async def cog_load(self):
"""Overridden from commands.Cog; starts the automated task."""
self._get_sketch_prompt.start()
def cog_unload(self):
"""Overridden from commands.Cog; stops the automated task."""
self._get_sketch_prompt.cancel()
@staticmethod
def _get_neat_date(date: datetime) -> str:
"""Takes a datetime object and converts the day and month into a cleanly formatted string.
:param date: The datetime object to convert to a neat string
:return: The formatted month and day in the format `[Month] [Numeric Day][st|nd|rd|th]`
"""
month_selector = ["January", "February", "March", "April", "May", "June", "July", "August", "September",
"October", "November", "December"]
month_string = month_selector[date.month - 1]
day = date.day
if day == 1 or day == 21 or day == 31:
suffix = "st"
elif day == 2 or day == 22:
suffix = "nd"
elif day == 3 or day == 23:
suffix = "rd"
else:
suffix = "th"
neat_date = f"{month_string} {day}{suffix}"
return neat_date
def _get_daily_drawing_prompt(self) -> str:
"""Gets today's drawing prompt from reddit.com/r/SketchDaily, if it exists.
:return: The daily drawing prompt if there is one found for today; or an empty string if none for today was
found.
"""
site = urllib.request.urlopen(
urllib.request.Request("https://reddit.com/r/SketchDaily/new", headers={'User-Agent': 'Mozilla/5.0'}))
site_str = site.read().decode('utf-8')
# search for today's theme on the skd site
now = datetime.now()
neat_today_date = self._get_neat_date(now)
loc = site_str.find(neat_today_date + " - ")
# if we can't find today's theme, return a blank string
if loc == -1:
return ''
# FIND YESTERDAY'S THEME:
# yesterday = datetime.now() - timedelta(days=1)
# neat_today_date = self._get_neat_date(yesterday)
# loc = site_str.find(neat_today_date + " - ")
site_str = site_str[loc:]
site_str = site_str[:site_str.find('"')]
if len(site_str) > 100:
site_str = site_str[:100]
return site_str
@tasks.loop(minutes=30)
async def _get_sketch_prompt(self):
"""A looping task to query the web for today's sketch prompt and announce it in a given discord channel if it
was found. If today's prompt was already announced, or if the prompt for today wasn't found, nothing is
announced in the channel.
"""
drawing_prompt = self._get_daily_drawing_prompt()
if drawing_prompt == '':
# No drawing prompt found for today; don't do anything
return
elif not drawing_prompt == self.current_prompt:
# The prompt we pulled does not match what we found before, so post the new text.
for channel in self.bot.get_all_channels():
if channel.name == ConfiguredCog.config['content']['daily_prompt_channel'] \
and isinstance(channel, TextChannel):
# Build the prompt message
color = ConfiguredCog.convert_color(ConfiguredCog.config['content']['prompt_color'])
title = 'Prompt for today, courtesy of r/SketchDaily'
url = 'https://reddit.com/r/SketchDaily'
description = drawing_prompt
message = Embed(color=color, title=title, url=url, description=description)
# Send the message
await channel.send(embed=message)
# Note down that we found today's prompt (so as not to re-send it)
self.current_prompt = drawing_prompt
break
| _get_cookie_weights | identifier_name |
rtic-i2s-audio-in-out.rs | //! # I2S example with rtic
//!
//! This application show how to use I2sDriver with interruption. Be careful to you ear, wrong
//! operation can trigger loud noise on the DAC output.
//!
//! # Hardware required
//!
//! * a STM32F411 based board
//! * I2S ADC and DAC, eg PCM1808 and PCM5102 from TI
//! * Audio signal at ADC input, and something to ear at DAC output.
//!
//! # Hardware Wiring
//!
//! The wiring assume using PCM1808 and PCM5102 module that can be found on Aliexpress, ebay,
//! Amazon...
//!
//! ## Stm32
//!
//! | stm32 | PCM1808 | PCM5102 |
//! |-------------|---------|---------|
//! | pb12 + pa4 | LRC | LCK |
//! | pb13 + pc10 | BCK | BCK |
//! | pc6 | SCK | SCK |
//! | pc12 | | DIN |
//! | pb15 | OUT | |
//!
//! ## PCM1808 ADC module
//!
//! | Pin | Connected To |
//! |-----|----------------|
//! | LIN | audio in left |
//! | - | audio in gnd |
//! | RIN | audio in right |
//! | FMT | Gnd or NC |
//! | MD1 | Gnd or NC |
//! | MD0 | Gnd or NC |
//! | Gnd | Gnd |
//! | 3.3 | +3V3 |
//! | +5V | +5v |
//! | BCK | pb13 + pc10 |
//! | OUT | pb15 |
//! | LRC | pb12 + pa4 |
//! | SCK | pc6 |
//!
//! ## PCM5102 module
//!
//! | Pin | Connected to |
//! |-------|-----------------|
//! | SCK | pc6 |
//! | BCK | pb13 + pc10 |
//! | DIN | pc12 |
//! | LCK | pb12 + pa4 |
//! | GND | Gnd |
//! | VIN | +3V3 |
//! | FLT | Gnd or +3V3 |
//! | DEMP | Gnd |
//! | XSMT | +3V3 |
//! | A3V3 | |
//! | AGND | audio out gnd |
//! | ROUT | audio out left |
//! | LROUT | audio out right |
//!
//! Notes: on the module (not the chip) A3V3 is connected to VIN and AGND is connected to GND
//!
//!
//! Expected behavior: you should ear a crappy stereo effect. This is actually 2 square tremolo
//! applied with a 90 degrees phase shift.
#![no_std]
#![no_main]
use core::panic::PanicInfo;
use rtt_target::rprintln;
use stm32f4xx_hal as hal;
#[rtic::app(device = stm32f4xx_hal::pac, peripherals = true,dispatchers = [EXTI0, EXTI1, EXTI2])]
mod app {
use core::fmt::Write;
use super::hal;
use hal::gpio::{Edge, NoPin};
use hal::i2s::stm32_i2s_v12x::driver::*;
use hal::i2s::I2s;
use hal::pac::Interrupt;
use hal::pac::{EXTI, SPI2, SPI3};
use hal::prelude::*;
use heapless::spsc::*;
use rtt_target::{rprintln, rtt_init, set_print_channel};
type I2s2Driver = I2sDriver<I2s<SPI2>, Master, Receive, Philips>;
type I2s3Driver = I2sDriver<I2s<SPI3>, Slave, Transmit, Philips>;
// Part of the frame we currently transmit or receive
#[derive(Copy, Clone)]
pub enum FrameState {
LeftMsb,
LeftLsb,
RightMsb,
RightLsb,
}
use FrameState::{LeftLsb, LeftMsb, RightLsb, RightMsb};
impl Default for FrameState {
fn default() -> Self {
Self::LeftMsb
}
}
#[shared]
struct Shared {
#[lock_free]
i2s2_driver: I2s2Driver,
#[lock_free]
i2s3_driver: I2s3Driver,
#[lock_free]
exti: EXTI,
}
#[local]
struct Local {
logs_chan: rtt_target::UpChannel,
adc_p: Producer<'static, (i32, i32), 2>,
process_c: Consumer<'static, (i32, i32), 2>,
process_p: Producer<'static, (i32, i32), 2>,
dac_c: Consumer<'static, (i32, i32), 2>,
}
#[init(local = [queue_1: Queue<(i32,i32), 2> = Queue::new(),queue_2: Queue<(i32,i32), 2> = Queue::new()])]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
let queue_1 = cx.local.queue_1;
let queue_2 = cx.local.queue_2;
let channels = rtt_init! {
up: {
0: {
size: 128
name: "Logs"
}
1: {
size: 128
name: "Panics"
}
}
};
let logs_chan = channels.up.0;
let panics_chan = channels.up.1;
set_print_channel(panics_chan);
let (adc_p, process_c) = queue_1.split();
let (process_p, dac_c) = queue_2.split();
let device = cx.device;
let mut syscfg = device.SYSCFG.constrain();
let mut exti = device.EXTI;
let gpioa = device.GPIOA.split();
let gpiob = device.GPIOB.split();
let gpioc = device.GPIOC.split();
let rcc = device.RCC.constrain();
let clocks = rcc
.cfgr
.use_hse(8u32.MHz())
.sysclk(96.MHz())
.hclk(96.MHz())
.pclk1(50.MHz())
.pclk2(100.MHz())
.i2s_clk(61440.kHz())
.freeze();
// I2S pins: (WS, CK, MCLK, SD) for I2S2
let i2s2_pins = (
gpiob.pb12, //WS
gpiob.pb13, //CK
gpioc.pc6, //MCK
gpiob.pb15, //SD
);
let i2s2 = I2s::new(device.SPI2, i2s2_pins, &clocks);
let i2s2_config = I2sDriverConfig::new_master()
.receive()
.standard(Philips)
.data_format(DataFormat::Data24Channel32)
.master_clock(true)
.request_frequency(48_000);
let mut i2s2_driver = I2sDriver::new(i2s2, i2s2_config);
rprintln!("actual sample rate is {}", i2s2_driver.sample_rate());
i2s2_driver.set_rx_interrupt(true);
i2s2_driver.set_error_interrupt(true);
// I2S3 pins: (WS, CK, NoPin, SD) for I2S3
let i2s3_pins = (gpioa.pa4, gpioc.pc10, NoPin::new(), gpioc.pc12);
let i2s3 = I2s::new(device.SPI3, i2s3_pins, &clocks);
let i2s3_config = i2s2_config.to_slave().transmit();
let mut i2s3_driver = I2sDriver::new(i2s3, i2s3_config);
i2s3_driver.set_tx_interrupt(true);
i2s3_driver.set_error_interrupt(true);
// set up an interrupt on WS pin
let ws_pin = i2s3_driver.ws_pin_mut();
ws_pin.make_interrupt_source(&mut syscfg);
ws_pin.trigger_on_edge(&mut exti, Edge::Rising);
// we will enable i2s3 in interrupt
ws_pin.enable_interrupt(&mut exti);
i2s2_driver.enable();
(
Shared {
i2s2_driver,
i2s3_driver,
exti,
},
Local {
logs_chan,
adc_p,
process_c,
process_p,
dac_c,
},
init::Monotonics(),
)
}
#[idle(shared = [], local = [])]
fn idle(_cx: idle::Context) -> ! {
#[allow(clippy::empty_loop)]
loop {}
}
// Printing message directly in a i2s interrupt can cause timing issues.
#[task(capacity = 10, local = [logs_chan])]
fn log(cx: log::Context, message: &'static str) |
// processing audio
#[task(binds = SPI5, local = [count: u32 = 0,process_c,process_p])]
fn process(cx: process::Context) {
let count = cx.local.count;
let process_c = cx.local.process_c;
let process_p = cx.local.process_p;
while let Some(mut smpl) = process_c.dequeue() {
let period = 24000;
if *count > period / 2 {
smpl.0 >>= 1;
}
if *count > period / 4 && *count <= period * 3 / 4 {
smpl.1 >>= 1;
}
*count += 1;
if *count >= period {
*count = 0;
}
process_p.enqueue(smpl).ok();
}
}
#[task(
priority = 4,
binds = SPI2,
local = [frame_state: FrameState = LeftMsb, frame: (u32,u32) = (0,0),adc_p],
shared = [i2s2_driver]
)]
fn i2s2(cx: i2s2::Context) {
let frame_state = cx.local.frame_state;
let frame = cx.local.frame;
let adc_p = cx.local.adc_p;
let i2s2_driver = cx.shared.i2s2_driver;
let status = i2s2_driver.status();
// It's better to read first to avoid triggering ovr flag
if status.rxne() {
let data = i2s2_driver.read_data_register();
match (*frame_state, status.chside()) {
(LeftMsb, Channel::Left) => {
frame.0 = (data as u32) << 16;
*frame_state = LeftLsb;
}
(LeftLsb, Channel::Left) => {
frame.0 |= data as u32;
*frame_state = RightMsb;
}
(RightMsb, Channel::Right) => {
frame.1 = (data as u32) << 16;
*frame_state = RightLsb;
}
(RightLsb, Channel::Right) => {
frame.1 |= data as u32;
// defer sample processing to another task
let (l, r) = *frame;
adc_p.enqueue((l as i32, r as i32)).ok();
rtic::pend(Interrupt::SPI5);
*frame_state = LeftMsb;
}
// in case of ovr this resynchronize at start of new frame
_ => *frame_state = LeftMsb,
}
}
if status.ovr() {
log::spawn("i2s2 Overrun").ok();
// sequence to delete ovr flag
i2s2_driver.read_data_register();
i2s2_driver.status();
}
}
#[task(
priority = 4,
binds = SPI3,
local = [frame_state: FrameState = LeftMsb,frame: (u32,u32) = (0,0),dac_c],
shared = [i2s3_driver,exti]
)]
fn i2s3(cx: i2s3::Context) {
let frame_state = cx.local.frame_state;
let frame = cx.local.frame;
let dac_c = cx.local.dac_c;
let i2s3_driver = cx.shared.i2s3_driver;
let exti = cx.shared.exti;
let status = i2s3_driver.status();
// it's better to write data first to avoid to trigger udr flag
if status.txe() {
let data;
match (*frame_state, status.chside()) {
(LeftMsb, Channel::Left) => {
let (l, r) = dac_c.dequeue().unwrap_or_default();
*frame = (l as u32, r as u32);
data = (frame.0 >> 16) as u16;
*frame_state = LeftLsb;
}
(LeftLsb, Channel::Left) => {
data = (frame.0 & 0xFFFF) as u16;
*frame_state = RightMsb;
}
(RightMsb, Channel::Right) => {
data = (frame.1 >> 16) as u16;
*frame_state = RightLsb;
}
(RightLsb, Channel::Right) => {
data = (frame.1 & 0xFFFF) as u16;
*frame_state = LeftMsb;
}
// in case of udr this resynchronize tracked and actual channel
_ => {
*frame_state = LeftMsb;
data = 0; //garbage data to avoid additional underrun
}
}
i2s3_driver.write_data_register(data);
}
if status.fre() {
log::spawn("i2s3 Frame error").ok();
i2s3_driver.disable();
i2s3_driver.ws_pin_mut().enable_interrupt(exti);
}
if status.udr() {
log::spawn("i2s3 udr").ok();
i2s3_driver.status();
i2s3_driver.write_data_register(0);
}
}
// Look i2s3 WS line for (re) synchronisation
#[task(priority = 4, binds = EXTI4, shared = [i2s3_driver,exti])]
fn exti4(cx: exti4::Context) {
let i2s3_driver = cx.shared.i2s3_driver;
let exti = cx.shared.exti;
let ws_pin = i2s3_driver.ws_pin_mut();
// check if that pin triggered the interrupt.
if ws_pin.check_interrupt() {
// Here we know ws pin is high because the interrupt was triggerd by it's rising edge
ws_pin.clear_interrupt_pending_bit();
ws_pin.disable_interrupt(exti);
i2s3_driver.write_data_register(0);
i2s3_driver.enable();
}
}
}
#[inline(never)]
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
rprintln!("{}", info);
loop {} // You might need a compiler fence in here.
}
| {
writeln!(cx.local.logs_chan, "{}", message).unwrap();
} | identifier_body |
rtic-i2s-audio-in-out.rs | //! # I2S example with rtic
//!
//! This application show how to use I2sDriver with interruption. Be careful to you ear, wrong
//! operation can trigger loud noise on the DAC output.
//!
//! # Hardware required
//!
//! * a STM32F411 based board
//! * I2S ADC and DAC, eg PCM1808 and PCM5102 from TI
//! * Audio signal at ADC input, and something to ear at DAC output.
//!
//! # Hardware Wiring
//!
//! The wiring assume using PCM1808 and PCM5102 module that can be found on Aliexpress, ebay,
//! Amazon...
//!
//! ## Stm32
//!
//! | stm32 | PCM1808 | PCM5102 |
//! |-------------|---------|---------|
//! | pb12 + pa4 | LRC | LCK |
//! | pb13 + pc10 | BCK | BCK |
//! | pc6 | SCK | SCK |
//! | pc12 | | DIN |
//! | pb15 | OUT | |
//!
//! ## PCM1808 ADC module
//!
//! | Pin | Connected To |
//! |-----|----------------|
//! | LIN | audio in left |
//! | - | audio in gnd |
//! | RIN | audio in right |
//! | FMT | Gnd or NC |
//! | MD1 | Gnd or NC |
//! | MD0 | Gnd or NC |
//! | Gnd | Gnd |
//! | 3.3 | +3V3 |
//! | +5V | +5v |
//! | BCK | pb13 + pc10 |
//! | OUT | pb15 |
//! | LRC | pb12 + pa4 |
//! | SCK | pc6 |
//!
//! ## PCM5102 module
//!
//! | Pin | Connected to |
//! |-------|-----------------|
//! | SCK | pc6 |
//! | BCK | pb13 + pc10 |
//! | DIN | pc12 |
//! | LCK | pb12 + pa4 |
//! | GND | Gnd |
//! | VIN | +3V3 |
//! | FLT | Gnd or +3V3 |
//! | DEMP | Gnd |
//! | XSMT | +3V3 |
//! | A3V3 | |
//! | AGND | audio out gnd |
//! | ROUT | audio out left |
//! | LROUT | audio out right |
//!
//! Notes: on the module (not the chip) A3V3 is connected to VIN and AGND is connected to GND
//!
//!
//! Expected behavior: you should ear a crappy stereo effect. This is actually 2 square tremolo
//! applied with a 90 degrees phase shift.
#![no_std]
#![no_main]
use core::panic::PanicInfo;
use rtt_target::rprintln;
use stm32f4xx_hal as hal;
#[rtic::app(device = stm32f4xx_hal::pac, peripherals = true,dispatchers = [EXTI0, EXTI1, EXTI2])]
mod app {
use core::fmt::Write;
use super::hal;
use hal::gpio::{Edge, NoPin};
use hal::i2s::stm32_i2s_v12x::driver::*;
use hal::i2s::I2s;
use hal::pac::Interrupt;
use hal::pac::{EXTI, SPI2, SPI3};
use hal::prelude::*;
use heapless::spsc::*;
use rtt_target::{rprintln, rtt_init, set_print_channel};
type I2s2Driver = I2sDriver<I2s<SPI2>, Master, Receive, Philips>;
type I2s3Driver = I2sDriver<I2s<SPI3>, Slave, Transmit, Philips>;
// Part of the frame we currently transmit or receive
#[derive(Copy, Clone)]
pub enum FrameState {
LeftMsb,
LeftLsb,
RightMsb,
RightLsb,
}
use FrameState::{LeftLsb, LeftMsb, RightLsb, RightMsb};
impl Default for FrameState {
fn default() -> Self {
Self::LeftMsb
}
}
#[shared]
struct Shared {
#[lock_free]
i2s2_driver: I2s2Driver,
#[lock_free]
i2s3_driver: I2s3Driver,
#[lock_free]
exti: EXTI,
}
#[local]
struct Local {
logs_chan: rtt_target::UpChannel,
adc_p: Producer<'static, (i32, i32), 2>,
process_c: Consumer<'static, (i32, i32), 2>,
process_p: Producer<'static, (i32, i32), 2>,
dac_c: Consumer<'static, (i32, i32), 2>,
}
#[init(local = [queue_1: Queue<(i32,i32), 2> = Queue::new(),queue_2: Queue<(i32,i32), 2> = Queue::new()])]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
let queue_1 = cx.local.queue_1;
let queue_2 = cx.local.queue_2;
let channels = rtt_init! {
up: {
0: {
size: 128
name: "Logs"
}
1: {
size: 128
name: "Panics"
}
}
};
let logs_chan = channels.up.0;
let panics_chan = channels.up.1;
set_print_channel(panics_chan);
let (adc_p, process_c) = queue_1.split();
let (process_p, dac_c) = queue_2.split();
let device = cx.device;
let mut syscfg = device.SYSCFG.constrain();
let mut exti = device.EXTI;
let gpioa = device.GPIOA.split();
let gpiob = device.GPIOB.split();
let gpioc = device.GPIOC.split();
let rcc = device.RCC.constrain();
let clocks = rcc
.cfgr
.use_hse(8u32.MHz())
.sysclk(96.MHz())
.hclk(96.MHz())
.pclk1(50.MHz())
.pclk2(100.MHz())
.i2s_clk(61440.kHz())
.freeze();
// I2S pins: (WS, CK, MCLK, SD) for I2S2
let i2s2_pins = (
gpiob.pb12, //WS
gpiob.pb13, //CK
gpioc.pc6, //MCK
gpiob.pb15, //SD
);
let i2s2 = I2s::new(device.SPI2, i2s2_pins, &clocks);
let i2s2_config = I2sDriverConfig::new_master()
.receive()
.standard(Philips)
.data_format(DataFormat::Data24Channel32)
.master_clock(true)
.request_frequency(48_000);
let mut i2s2_driver = I2sDriver::new(i2s2, i2s2_config);
rprintln!("actual sample rate is {}", i2s2_driver.sample_rate());
i2s2_driver.set_rx_interrupt(true);
i2s2_driver.set_error_interrupt(true);
// I2S3 pins: (WS, CK, NoPin, SD) for I2S3
let i2s3_pins = (gpioa.pa4, gpioc.pc10, NoPin::new(), gpioc.pc12);
let i2s3 = I2s::new(device.SPI3, i2s3_pins, &clocks);
let i2s3_config = i2s2_config.to_slave().transmit();
let mut i2s3_driver = I2sDriver::new(i2s3, i2s3_config);
i2s3_driver.set_tx_interrupt(true);
i2s3_driver.set_error_interrupt(true);
// set up an interrupt on WS pin
let ws_pin = i2s3_driver.ws_pin_mut();
ws_pin.make_interrupt_source(&mut syscfg);
ws_pin.trigger_on_edge(&mut exti, Edge::Rising);
// we will enable i2s3 in interrupt
ws_pin.enable_interrupt(&mut exti);
i2s2_driver.enable();
(
Shared {
i2s2_driver,
i2s3_driver,
exti,
},
Local {
logs_chan,
adc_p,
process_c,
process_p,
dac_c,
},
init::Monotonics(),
)
}
#[idle(shared = [], local = [])]
fn idle(_cx: idle::Context) -> ! {
#[allow(clippy::empty_loop)]
loop {}
}
// Printing message directly in a i2s interrupt can cause timing issues.
#[task(capacity = 10, local = [logs_chan])]
fn log(cx: log::Context, message: &'static str) {
writeln!(cx.local.logs_chan, "{}", message).unwrap();
}
// processing audio
#[task(binds = SPI5, local = [count: u32 = 0,process_c,process_p])]
fn process(cx: process::Context) {
let count = cx.local.count;
let process_c = cx.local.process_c;
let process_p = cx.local.process_p;
while let Some(mut smpl) = process_c.dequeue() {
let period = 24000;
if *count > period / 2 {
smpl.0 >>= 1;
}
if *count > period / 4 && *count <= period * 3 / 4 {
smpl.1 >>= 1;
}
*count += 1;
if *count >= period {
*count = 0;
}
process_p.enqueue(smpl).ok();
}
}
#[task(
priority = 4,
binds = SPI2,
local = [frame_state: FrameState = LeftMsb, frame: (u32,u32) = (0,0),adc_p],
shared = [i2s2_driver]
)]
fn | (cx: i2s2::Context) {
let frame_state = cx.local.frame_state;
let frame = cx.local.frame;
let adc_p = cx.local.adc_p;
let i2s2_driver = cx.shared.i2s2_driver;
let status = i2s2_driver.status();
// It's better to read first to avoid triggering ovr flag
if status.rxne() {
let data = i2s2_driver.read_data_register();
match (*frame_state, status.chside()) {
(LeftMsb, Channel::Left) => {
frame.0 = (data as u32) << 16;
*frame_state = LeftLsb;
}
(LeftLsb, Channel::Left) => {
frame.0 |= data as u32;
*frame_state = RightMsb;
}
(RightMsb, Channel::Right) => {
frame.1 = (data as u32) << 16;
*frame_state = RightLsb;
}
(RightLsb, Channel::Right) => {
frame.1 |= data as u32;
// defer sample processing to another task
let (l, r) = *frame;
adc_p.enqueue((l as i32, r as i32)).ok();
rtic::pend(Interrupt::SPI5);
*frame_state = LeftMsb;
}
// in case of ovr this resynchronize at start of new frame
_ => *frame_state = LeftMsb,
}
}
if status.ovr() {
log::spawn("i2s2 Overrun").ok();
// sequence to delete ovr flag
i2s2_driver.read_data_register();
i2s2_driver.status();
}
}
#[task(
priority = 4,
binds = SPI3,
local = [frame_state: FrameState = LeftMsb,frame: (u32,u32) = (0,0),dac_c],
shared = [i2s3_driver,exti]
)]
fn i2s3(cx: i2s3::Context) {
let frame_state = cx.local.frame_state;
let frame = cx.local.frame;
let dac_c = cx.local.dac_c;
let i2s3_driver = cx.shared.i2s3_driver;
let exti = cx.shared.exti;
let status = i2s3_driver.status();
// it's better to write data first to avoid to trigger udr flag
if status.txe() {
let data;
match (*frame_state, status.chside()) {
(LeftMsb, Channel::Left) => {
let (l, r) = dac_c.dequeue().unwrap_or_default();
*frame = (l as u32, r as u32);
data = (frame.0 >> 16) as u16;
*frame_state = LeftLsb;
}
(LeftLsb, Channel::Left) => {
data = (frame.0 & 0xFFFF) as u16;
*frame_state = RightMsb;
}
(RightMsb, Channel::Right) => {
data = (frame.1 >> 16) as u16;
*frame_state = RightLsb;
}
(RightLsb, Channel::Right) => {
data = (frame.1 & 0xFFFF) as u16;
*frame_state = LeftMsb;
}
// in case of udr this resynchronize tracked and actual channel
_ => {
*frame_state = LeftMsb;
data = 0; //garbage data to avoid additional underrun
}
}
i2s3_driver.write_data_register(data);
}
if status.fre() {
log::spawn("i2s3 Frame error").ok();
i2s3_driver.disable();
i2s3_driver.ws_pin_mut().enable_interrupt(exti);
}
if status.udr() {
log::spawn("i2s3 udr").ok();
i2s3_driver.status();
i2s3_driver.write_data_register(0);
}
}
// Look i2s3 WS line for (re) synchronisation
#[task(priority = 4, binds = EXTI4, shared = [i2s3_driver,exti])]
fn exti4(cx: exti4::Context) {
let i2s3_driver = cx.shared.i2s3_driver;
let exti = cx.shared.exti;
let ws_pin = i2s3_driver.ws_pin_mut();
// check if that pin triggered the interrupt.
if ws_pin.check_interrupt() {
// Here we know ws pin is high because the interrupt was triggerd by it's rising edge
ws_pin.clear_interrupt_pending_bit();
ws_pin.disable_interrupt(exti);
i2s3_driver.write_data_register(0);
i2s3_driver.enable();
}
}
}
#[inline(never)]
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
rprintln!("{}", info);
loop {} // You might need a compiler fence in here.
}
| i2s2 | identifier_name |
rtic-i2s-audio-in-out.rs | //! # I2S example with rtic
//!
//! This application show how to use I2sDriver with interruption. Be careful to you ear, wrong
//! operation can trigger loud noise on the DAC output.
//!
//! # Hardware required
//!
//! * a STM32F411 based board
//! * I2S ADC and DAC, eg PCM1808 and PCM5102 from TI
//! * Audio signal at ADC input, and something to ear at DAC output.
//!
//! # Hardware Wiring
//!
//! The wiring assume using PCM1808 and PCM5102 module that can be found on Aliexpress, ebay,
//! Amazon...
//!
//! ## Stm32
//!
//! | stm32 | PCM1808 | PCM5102 |
//! |-------------|---------|---------|
//! | pb12 + pa4 | LRC | LCK |
//! | pb13 + pc10 | BCK | BCK |
//! | pc6 | SCK | SCK |
//! | pc12 | | DIN |
//! | pb15 | OUT | |
//!
//! ## PCM1808 ADC module
//!
//! | Pin | Connected To |
//! |-----|----------------|
//! | LIN | audio in left |
//! | - | audio in gnd |
//! | RIN | audio in right |
//! | FMT | Gnd or NC |
//! | MD1 | Gnd or NC |
//! | MD0 | Gnd or NC |
//! | Gnd | Gnd |
//! | 3.3 | +3V3 |
//! | +5V | +5v |
//! | BCK | pb13 + pc10 |
//! | OUT | pb15 |
//! | LRC | pb12 + pa4 |
//! | SCK | pc6 |
//!
//! ## PCM5102 module
//!
//! | Pin | Connected to |
//! |-------|-----------------|
//! | SCK | pc6 |
//! | BCK | pb13 + pc10 |
//! | DIN | pc12 |
//! | LCK | pb12 + pa4 |
//! | GND | Gnd |
//! | VIN | +3V3 |
//! | FLT | Gnd or +3V3 |
//! | DEMP | Gnd |
//! | XSMT | +3V3 |
//! | A3V3 | |
//! | AGND | audio out gnd |
//! | ROUT | audio out left |
//! | LROUT | audio out right |
//!
//! Notes: on the module (not the chip) A3V3 is connected to VIN and AGND is connected to GND
//!
//!
//! Expected behavior: you should ear a crappy stereo effect. This is actually 2 square tremolo
//! applied with a 90 degrees phase shift.
#![no_std]
#![no_main]
use core::panic::PanicInfo;
use rtt_target::rprintln;
use stm32f4xx_hal as hal;
#[rtic::app(device = stm32f4xx_hal::pac, peripherals = true,dispatchers = [EXTI0, EXTI1, EXTI2])]
mod app {
use core::fmt::Write;
use super::hal;
use hal::gpio::{Edge, NoPin};
use hal::i2s::stm32_i2s_v12x::driver::*;
use hal::i2s::I2s;
use hal::pac::Interrupt;
use hal::pac::{EXTI, SPI2, SPI3};
use hal::prelude::*;
use heapless::spsc::*;
use rtt_target::{rprintln, rtt_init, set_print_channel};
type I2s2Driver = I2sDriver<I2s<SPI2>, Master, Receive, Philips>;
type I2s3Driver = I2sDriver<I2s<SPI3>, Slave, Transmit, Philips>;
// Part of the frame we currently transmit or receive
#[derive(Copy, Clone)]
pub enum FrameState {
LeftMsb,
LeftLsb,
RightMsb,
RightLsb,
}
use FrameState::{LeftLsb, LeftMsb, RightLsb, RightMsb};
impl Default for FrameState {
fn default() -> Self {
Self::LeftMsb
}
}
#[shared]
struct Shared {
#[lock_free]
i2s2_driver: I2s2Driver,
#[lock_free]
i2s3_driver: I2s3Driver,
#[lock_free]
exti: EXTI,
}
#[local]
struct Local {
logs_chan: rtt_target::UpChannel,
adc_p: Producer<'static, (i32, i32), 2>,
process_c: Consumer<'static, (i32, i32), 2>,
process_p: Producer<'static, (i32, i32), 2>,
dac_c: Consumer<'static, (i32, i32), 2>,
}
#[init(local = [queue_1: Queue<(i32,i32), 2> = Queue::new(),queue_2: Queue<(i32,i32), 2> = Queue::new()])]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
let queue_1 = cx.local.queue_1;
let queue_2 = cx.local.queue_2;
let channels = rtt_init! {
up: {
0: {
size: 128
name: "Logs"
}
1: {
size: 128
name: "Panics"
}
}
};
let logs_chan = channels.up.0;
let panics_chan = channels.up.1;
set_print_channel(panics_chan);
let (adc_p, process_c) = queue_1.split();
let (process_p, dac_c) = queue_2.split();
let device = cx.device;
let mut syscfg = device.SYSCFG.constrain();
let mut exti = device.EXTI; | let gpiob = device.GPIOB.split();
let gpioc = device.GPIOC.split();
let rcc = device.RCC.constrain();
let clocks = rcc
.cfgr
.use_hse(8u32.MHz())
.sysclk(96.MHz())
.hclk(96.MHz())
.pclk1(50.MHz())
.pclk2(100.MHz())
.i2s_clk(61440.kHz())
.freeze();
// I2S pins: (WS, CK, MCLK, SD) for I2S2
let i2s2_pins = (
gpiob.pb12, //WS
gpiob.pb13, //CK
gpioc.pc6, //MCK
gpiob.pb15, //SD
);
let i2s2 = I2s::new(device.SPI2, i2s2_pins, &clocks);
let i2s2_config = I2sDriverConfig::new_master()
.receive()
.standard(Philips)
.data_format(DataFormat::Data24Channel32)
.master_clock(true)
.request_frequency(48_000);
let mut i2s2_driver = I2sDriver::new(i2s2, i2s2_config);
rprintln!("actual sample rate is {}", i2s2_driver.sample_rate());
i2s2_driver.set_rx_interrupt(true);
i2s2_driver.set_error_interrupt(true);
// I2S3 pins: (WS, CK, NoPin, SD) for I2S3
let i2s3_pins = (gpioa.pa4, gpioc.pc10, NoPin::new(), gpioc.pc12);
let i2s3 = I2s::new(device.SPI3, i2s3_pins, &clocks);
let i2s3_config = i2s2_config.to_slave().transmit();
let mut i2s3_driver = I2sDriver::new(i2s3, i2s3_config);
i2s3_driver.set_tx_interrupt(true);
i2s3_driver.set_error_interrupt(true);
// set up an interrupt on WS pin
let ws_pin = i2s3_driver.ws_pin_mut();
ws_pin.make_interrupt_source(&mut syscfg);
ws_pin.trigger_on_edge(&mut exti, Edge::Rising);
// we will enable i2s3 in interrupt
ws_pin.enable_interrupt(&mut exti);
i2s2_driver.enable();
(
Shared {
i2s2_driver,
i2s3_driver,
exti,
},
Local {
logs_chan,
adc_p,
process_c,
process_p,
dac_c,
},
init::Monotonics(),
)
}
#[idle(shared = [], local = [])]
fn idle(_cx: idle::Context) -> ! {
#[allow(clippy::empty_loop)]
loop {}
}
// Printing message directly in a i2s interrupt can cause timing issues.
#[task(capacity = 10, local = [logs_chan])]
fn log(cx: log::Context, message: &'static str) {
writeln!(cx.local.logs_chan, "{}", message).unwrap();
}
// processing audio
#[task(binds = SPI5, local = [count: u32 = 0,process_c,process_p])]
fn process(cx: process::Context) {
let count = cx.local.count;
let process_c = cx.local.process_c;
let process_p = cx.local.process_p;
while let Some(mut smpl) = process_c.dequeue() {
let period = 24000;
if *count > period / 2 {
smpl.0 >>= 1;
}
if *count > period / 4 && *count <= period * 3 / 4 {
smpl.1 >>= 1;
}
*count += 1;
if *count >= period {
*count = 0;
}
process_p.enqueue(smpl).ok();
}
}
#[task(
priority = 4,
binds = SPI2,
local = [frame_state: FrameState = LeftMsb, frame: (u32,u32) = (0,0),adc_p],
shared = [i2s2_driver]
)]
fn i2s2(cx: i2s2::Context) {
let frame_state = cx.local.frame_state;
let frame = cx.local.frame;
let adc_p = cx.local.adc_p;
let i2s2_driver = cx.shared.i2s2_driver;
let status = i2s2_driver.status();
// It's better to read first to avoid triggering ovr flag
if status.rxne() {
let data = i2s2_driver.read_data_register();
match (*frame_state, status.chside()) {
(LeftMsb, Channel::Left) => {
frame.0 = (data as u32) << 16;
*frame_state = LeftLsb;
}
(LeftLsb, Channel::Left) => {
frame.0 |= data as u32;
*frame_state = RightMsb;
}
(RightMsb, Channel::Right) => {
frame.1 = (data as u32) << 16;
*frame_state = RightLsb;
}
(RightLsb, Channel::Right) => {
frame.1 |= data as u32;
// defer sample processing to another task
let (l, r) = *frame;
adc_p.enqueue((l as i32, r as i32)).ok();
rtic::pend(Interrupt::SPI5);
*frame_state = LeftMsb;
}
// in case of ovr this resynchronize at start of new frame
_ => *frame_state = LeftMsb,
}
}
if status.ovr() {
log::spawn("i2s2 Overrun").ok();
// sequence to delete ovr flag
i2s2_driver.read_data_register();
i2s2_driver.status();
}
}
#[task(
priority = 4,
binds = SPI3,
local = [frame_state: FrameState = LeftMsb,frame: (u32,u32) = (0,0),dac_c],
shared = [i2s3_driver,exti]
)]
fn i2s3(cx: i2s3::Context) {
let frame_state = cx.local.frame_state;
let frame = cx.local.frame;
let dac_c = cx.local.dac_c;
let i2s3_driver = cx.shared.i2s3_driver;
let exti = cx.shared.exti;
let status = i2s3_driver.status();
// it's better to write data first to avoid to trigger udr flag
if status.txe() {
let data;
match (*frame_state, status.chside()) {
(LeftMsb, Channel::Left) => {
let (l, r) = dac_c.dequeue().unwrap_or_default();
*frame = (l as u32, r as u32);
data = (frame.0 >> 16) as u16;
*frame_state = LeftLsb;
}
(LeftLsb, Channel::Left) => {
data = (frame.0 & 0xFFFF) as u16;
*frame_state = RightMsb;
}
(RightMsb, Channel::Right) => {
data = (frame.1 >> 16) as u16;
*frame_state = RightLsb;
}
(RightLsb, Channel::Right) => {
data = (frame.1 & 0xFFFF) as u16;
*frame_state = LeftMsb;
}
// in case of udr this resynchronize tracked and actual channel
_ => {
*frame_state = LeftMsb;
data = 0; //garbage data to avoid additional underrun
}
}
i2s3_driver.write_data_register(data);
}
if status.fre() {
log::spawn("i2s3 Frame error").ok();
i2s3_driver.disable();
i2s3_driver.ws_pin_mut().enable_interrupt(exti);
}
if status.udr() {
log::spawn("i2s3 udr").ok();
i2s3_driver.status();
i2s3_driver.write_data_register(0);
}
}
// Look i2s3 WS line for (re) synchronisation
#[task(priority = 4, binds = EXTI4, shared = [i2s3_driver,exti])]
fn exti4(cx: exti4::Context) {
let i2s3_driver = cx.shared.i2s3_driver;
let exti = cx.shared.exti;
let ws_pin = i2s3_driver.ws_pin_mut();
// check if that pin triggered the interrupt.
if ws_pin.check_interrupt() {
// Here we know ws pin is high because the interrupt was triggerd by it's rising edge
ws_pin.clear_interrupt_pending_bit();
ws_pin.disable_interrupt(exti);
i2s3_driver.write_data_register(0);
i2s3_driver.enable();
}
}
}
#[inline(never)]
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
rprintln!("{}", info);
loop {} // You might need a compiler fence in here.
} | let gpioa = device.GPIOA.split(); | random_line_split |
Renderer.ts | /*
* surface -> canvas なレンダラ。
* HTMLCanvasElement もこの層で抽象化する
*/
import * as Util from "./Util";
import * as SDT from "ikagaka-shell-loader/lib/Model/SurfaceDefinitionTree";
import {Canvas, copy} from "./Canvas";
export class Renderer {
// GCの発生を抑えるためバッファを使いまわす
srfCnv: Canvas;
cnv: HTMLCanvasElement;
ctx: CanvasRenderingContext2D;
tmpcnv: HTMLCanvasElement
tmpctx: CanvasRenderingContext2D;
debug: boolean; // 基準点描画
use_self_alpha: boolean;
// 渡されたCanvasをベースサーフェスとしてレイヤー合成を開始する。
// nullならば1x1のCanvasをベースサーフェスとする。
// 渡されたCanvasは変更しない。
constructor(srfCnv?: Canvas) {
this.srfCnv = srfCnv == null ? new Canvas(Util.createCanvas()) : srfCnv;
this.cnv = this.srfCnv.cnv;
this.ctx = <CanvasRenderingContext2D>this.cnv.getContext("2d");
this.tmpcnv = Util.createCanvas();
this.tmpctx = <CanvasRenderingContext2D>this.tmpcnv.getContext("2d");
this.use_self_alpha = false;
this.debug = false;
}
// バッファを使いまわすためのリセット
// clearは短形を保つがリセットは1x1になる
reset(): void {
// reshapeの機会を減らすため大きさはそのままにする
this.ctx.canvas.width = this.ctx.canvas.width;
this.tmpctx.canvas.width = this.tmpctx.canvas.width;
this.srfCnv.basePosX = 0;
this.srfCnv.basePosY = 0;
this.srfCnv.baseWidth = 0;
this.srfCnv.baseHeight = 0;
}
clear(): void {
this.ctx.clearRect(0, 0, this.ctx.canvas.width, this.ctx.canvas.height);
}
// [
// {canvas: srfCnv1, type: "base", x: 0, y: 0}
// {canvas: srfCnv2, type: "overlay", x: 50, y: 50}
// ]
composeElements(elms: {type: string, x: number, y: number, canvas: Canvas}[]): Canvas {
// baseを決定
const bases = elms.filter(({type})=> type === "base");
const others = elms.filter(({type})=> type !== "base");
// element[MAX].base > element0 > element[MIN]
if(bases.length === 0){
// element[MIN]
// elms.length > 0なのでundefinedにはならない…はず。
// お前がbaseになるんだよ
const base = <SDT.SurfaceElement&{canvas:Canvas}>others.shift();
if(base != null){
bases.push(base);
console.warn("SurfaceRenderer#composeElements: base surface not found. failback.", bases, others);
}else{
console.error("SurfaceRenderer#composeElements: cannot decide base surface.", base, others);
return this.srfCnv;
}
}
let base = bases.slice(-1)[0]; /* last */
this.base(base.canvas);
others.forEach(({canvas, type, x, y})=>{
this.composeElement(canvas, type, x, y);
});
return this.srfCnv;
}
composeElement(canvas: Canvas, type: string, x=0, y=0): void {
switch (type) {
case "overlay": this.overlay(canvas, x, y); break;
case "overlayfast": this.overlayfast(canvas, x, y); break;
case "replace": this.replace(canvas, x, y); break;
case "interpolate": this.interpolate(canvas, x, y); break;
case "reduce": this.reduce(canvas, x, y); break;
default:
console.warn("SurfaceRenderer#composeElement:", "unkown compose method", canvas, type, x, y);
}
}
rebase(srfCnv: Canvas){
this.srfCnv = srfCnv; // 描画対象を変える
this.cnv = this.srfCnv.cnv;
this.ctx = <CanvasRenderingContext2D>this.cnv.getContext("2d");
}
init(srfCnv: Canvas){
// this を srfCnv の値で置き換え
this.base(srfCnv);
this.srfCnv.basePosX = srfCnv.basePosX;
this.srfCnv.basePosY = srfCnv.basePosY;
this.srfCnv.baseWidth = srfCnv.baseWidth;
this.srfCnv.baseHeight = srfCnv.baseHeight;
}
//下位レイヤをコマで完全に置き換える。collisionもコマのサーフェスに定義されたものに更新される。
//このメソッドのパターンを重ねると、サーフェス全面を描画し直すことによるアニメーション(いわばパラパラ漫画)が実現される。
//この描画メソッドが指定されたpattern定義では、XY座標は無視される。
//着せ替え・elementでも使用できる。
base(part: Canvas): void {
this.cnv.width = part.cnv.width;
this.cnv.height = part.cnv.height;
this.ctx.globalCompositeOperation = "source-over";
this.ctx.drawImage(part.cnv, 0, 0);
}
//下位レイヤにコマを重ねる。
//着せ替え・elementでも使用できる。
overlay(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "source-over";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤの非透過部分(半透明含む)にのみコマを重ねる。
//着せ替え・elementでも使用できる。
overlayfast(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "source-atop";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤの透明なところにのみコマを重ねる。
//下位レイヤの半透明部分に対しても、透明度が高い部分ほど強くコマを合成する。
//interpolateで重なる部分はベースより上位(手前)側になければならない
//(interpolateのコマが描画している部分に、上位のレイヤで不透明な部分が重なると反映されなくなる)。
//着せ替え・elementでも使用できる。
interpolate(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "destination-over";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤにコマを重ねるが、コマの透過部分について下位レイヤにも反映する(reduce + overlayに近い)。
//着せ替え・elementでも使用できる。
replace(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.clearRect(this.srfCnv.basePosX + x, this.srfCnv.basePosY + y, part.cnv.width, part.cnv.height);
this.overlay(part, x, y);
}
prepareOverlay(part: Canvas, x: number, y: number): void {
// パーツがはみだす量
// もし負なら左へはみ出した量
let left = this.srfCnv.basePosX + x;
// もし負なら右へはみ出した量
let right = this.cnv.width - ((this.srfCnv.basePosX + x) + part.cnv.width);
// もし負なら上へはみ出した量
let top = this.srfCnv.basePosY + y;
// もし負なら↓へはみ出した量
let bottom = this.cnv.height - ((this.srfCnv.basePosY + y) + part.cnv.height);
if(left < 0 || right < 0 || top < 0 || bottom < 0){
// はみ出し発生
let offsetX = 0; // ずれた量
let offsetY = 0;
console.info("SurfaceRenderer#prepareOverlay: reshape occured");
// 現状をtmpcnvへコピー
Util.fastcopy(this.cnv, this.tmpctx);
if(left<0){
offsetX = (-left);
this.cnv.width += (-left); // reshape
this.srfCnv.basePosX += (-left);
}
if(right<0){
this.cnv.width += (-right); // reshape
}
if(top<0){
offsetY = (-top);
this.cnv.height += (-top); // reshape
this.srfCnv.basePosY += (-top);
}
if(bottom<0){
this.cnv.height += (-bottom); // reshape
}
this.ctx.drawImage(this.tmpctx.canvas, offsetX, offsetY); //下位レイヤ再描画
}
if(this.debug){
// 基準点描画
this.ctx.fillStyle = "lime";
this.ctx.fillRect(this.srfCnv.basePosX, this.srfCnv.basePosY, 5, 5);
}
}
//下位レイヤの抜き色による透過領域に、そのコマの抜き色による透過領域を追加する。コマの抜き色で無い部分は無視される。
//着せ替え用に用意されたメソッドだが、着せ替えでないアニメーション・elementでも使用可能。
//http://usada.sakura.vg/contents/seriko.html
reduce(part: Canvas, x: number, y: number): void {
// はみ出しちぇっく prepareOverlay はしない
const width = x + part.cnv.width < this.cnv.width ? part.cnv.width : this.cnv.width - x;
const height = y + part.cnv.height < this.cnv.height ? part.cnv.height : this.cnv.height - y;
const imgdataA = this.ctx.getImageData(0, 0, this.cnv.width, this.cnv.height);
const dataA = imgdataA.data;
// partの透明領域までアクセスする必要がある
const ctxB = <CanvasRenderingContext2D>part.cnv.getContext("2d");
const imgdataB = ctxB.getImageData(0, 0, part.cnv.width, part.cnv.height)
const dataB = imgdataB.data;
for(let _y=0; _y<height; _y++){
for(let _x=0; _x<width; _x++){
const iA = (x+_x)*4 + (y+_y)*this.cnv.width*4; // baseのxy座標とインデックス
const iB = (_x)*4 + (_y)*part.cnv.width*4; // partのxy座標とインデックス
// もしコマが透過ならpartのalphaチャネルでbaseのを上書き
if(d |
}
this.ctx.putImageData(imgdataA, 0, 0);
}
drawRegions(regions: SDT.SurfaceCollision[], description="notitle"): void {
this.ctx.font = "35px";
this.ctx.lineWidth = 4;
this.ctx.strokeStyle = "white";
this.ctx.strokeText(description, 5, 10);
this.ctx.fillStyle = "black";
this.ctx.fillText(description, 5, 10); // surfaceIdを描画
regions.forEach((col)=>{
this.drawRegion(col);
});
}
drawRegion(region: SDT.SurfaceCollision): void {
const {type="", name=""} = region;
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "#00FF00";
var left=0, top=0, right=0, bottom=0;
switch (type) {
case "rect":
var {left=0, top=0, right=0, bottom=0} = <SDT.SurfaceCollisionRect>region;
left += this.srfCnv.basePosX;
top += this.srfCnv.basePosY;
right += this.srfCnv.basePosX;
bottom += this.srfCnv.basePosY;
this.ctx.beginPath();
this.ctx.rect(left, top, right - left, bottom - top);
this.ctx.stroke();
break;
case "ellipse":
var {left=0, top=0, right=0, bottom=0} = <SDT.SurfaceCollisionEllipse>region;
left += this.srfCnv.basePosX;
top += this.srfCnv.basePosY;
right += this.srfCnv.basePosX;
bottom += this.srfCnv.basePosY;
// 実はctx.ellipseはfirefox対応してない
this.drawEllipseWithBezier(left, top, right - left, bottom - top);
break;
case "circle":
let {radius=0, centerX=0, centerY=0} = <SDT.SurfaceCollisionCircle>region;
centerX += this.srfCnv.basePosX;
centerY += this.srfCnv.basePosY;
left = centerX;
top = centerY;
this.ctx.beginPath();
this.ctx.arc(centerX, centerY, radius, 0, 2*Math.PI, true);
this.ctx.stroke();
break;
case "polygon":
const {coordinates=[]} = <SDT.SurfaceCollisionPolygon>region;
if(coordinates.length <= 0) break;
this.ctx.beginPath();
const {x:startX, y:startY} = coordinates[0];
left = startX;
top = startY;
this.ctx.moveTo(startX, startY);
for (let i=1; i<coordinates.length; i++){
const {x, y} = coordinates[i];
this.ctx.lineTo(x, y);
}
this.ctx.lineTo(startX, startY);
this.ctx.stroke();
break;
default:
console.warn("SurfaceRenderer#drawRegion", "unkown collision shape:", region);
break;
}
this.ctx.font = "35px";
this.ctx.lineWidth = 4;
this.ctx.strokeStyle = "white";
this.ctx.strokeText(type + ":" + name, left + 5, top + 10);
this.ctx.fillStyle = "black";
this.ctx.fillText(type + ":" + name, left + 5, top + 10);
}
// ctx.ellipseは非標準
drawEllipseWithBezier(x: number, y: number, w: number, h: number): void {
const kappa = .5522848,
ox = (w / 2) * kappa, // control point offset horizontal
oy = (h / 2) * kappa, // control point offset vertical
xe = x + w, // x-end
ye = y + h, // y-end
xm = x + w / 2, // x-middle
ym = y + h / 2; // y-middle
this.ctx.beginPath();
this.ctx.moveTo(x, ym);
this.ctx.bezierCurveTo(x, ym - oy, xm - ox, y, xm, y);
this.ctx.bezierCurveTo(xm + ox, y, xe, ym - oy, xe, ym);
this.ctx.bezierCurveTo(xe, ym + oy, xm + ox, ye, xm, ye);
this.ctx.bezierCurveTo(xm - ox, ye, x, ym + oy, x, ym);
this.ctx.stroke();
}
}
export function isHit(srfCnv: Canvas, sdef: SDT.SurfaceDefinition, x: number, y: number):{transparency: boolean, name: string}{
const transparency = Util.isHit(this.cnv, x, y);
const name = sdef.getRegion(x - this.basePosX, y - this.basePosY);
return {transparency, name};
}
| ataB[iB + 3] === 0) dataA[iA + 3] = dataB[iB + 3];
} | conditional_block |
Renderer.ts | /*
* surface -> canvas なレンダラ。
* HTMLCanvasElement もこの層で抽象化する
*/
import * as Util from "./Util";
import * as SDT from "ikagaka-shell-loader/lib/Model/SurfaceDefinitionTree";
import {Canvas, copy} from "./Canvas";
export class Renderer {
// GCの発生を抑えるためバッファを使いまわす
srfCnv: Canvas;
cnv: HTMLCanvasElement;
ctx: CanvasRenderingContext2D;
tmpcnv: HTMLCanvasElement
tmpctx: CanvasRenderingContext2D;
debug: boolean; // 基準点描画
use_self_alpha: boolean;
// 渡されたCanvasをベースサーフェスとしてレイヤー合成を開始する。
// nullならば1x1のCanvasをベースサーフェスとする。
// 渡されたCanvasは変更しない。
constructor(srfCnv?: Canvas) {
this.srfCnv = srfCnv == null ? new Canvas(Util.createCanvas()) : srfCnv;
this.cnv = this.srfCnv.cnv;
this.ctx = <CanvasRenderingContext2D>this.cnv.getContext("2d");
this.tmpcnv = Util.createCanvas();
this.tmpctx = <CanvasRenderingContext2D>this.tmpcnv.getContext("2d");
this.use_self_alpha = false;
this.debug = false;
}
// バッファを使いまわすためのリセット
// clearは短形を保つがリセットは1x1になる
reset(): void {
// reshapeの機会を減らすため大きさはそのままにする
this.ctx.canvas.width = this.ctx.canvas.width;
this.tmpctx.canvas.width = this.tmpctx.canvas.width;
this.srfCnv.basePosX = 0;
this.srfCnv.basePosY = 0;
this.srfCnv.baseWidth = 0;
this.srfCnv.baseHeight = 0;
}
clear(): void {
this.ctx.clearRect(0, 0, this.ctx.canvas.width, this.ctx.canvas.height);
}
// [
// {canvas: srfCnv1, type: "base", x: 0, y: 0}
// {canvas: srfCnv2, type: "overlay", x: 50, y: 50}
// ]
composeElements(elms: {type: string, x: number, y: number, canvas: Canvas}[]): Canvas {
// baseを決定
const bases = elms.filter(({type})=> type === "base");
const others = elms.filter(({type})=> type !== "base");
// element[MAX].base > element0 > element[MIN]
if(bases.length === 0){
// element[MIN]
// elms.length > 0なのでundefinedにはならない…はず。
// お前がbaseになるんだよ
const base = <SDT.SurfaceElement&{canvas:Canvas}>others.shift();
if(base != null){
bases.push(base);
console.warn("SurfaceRenderer#composeElements: base surface not found. failback.", bases, others);
}else{
console.error("SurfaceRenderer#composeElements: cannot decide base surface.", base, others);
return this.srfCnv;
}
}
let base = bases.slice(-1)[0]; /* last */
this.base(base.canvas);
others.forEach(({canvas, type, x, y})=>{
this.composeElement(canvas, type, x, y);
});
return this.srfCnv;
}
composeElement(canvas: Canvas, type: string, x=0, y=0): void {
switch (type) {
case "overlay": this.overlay(canvas, x, y); break;
case "overlayfast": this.overlayfast(canvas, x, y); break;
case "replace": this.replace(canvas, x, y); break;
case "interpolate": this.interpolate(canvas, x, y); break;
case "reduce": this.reduce(canvas, x, y); break;
default:
console.warn("SurfaceRenderer#composeElement:", "unkown compose method", canvas, type, x, y);
}
}
rebase(srfCnv: Canvas){
this.srfCnv = srfCnv; // 描画対象を変える
this.cnv = this.srfCnv.cnv;
this.ctx = <CanvasRenderingContext2D>this.cnv.getContext("2d");
}
init(srfCnv: Canvas){
// this を srfCnv の値で置き換え
this.base(srfCnv);
this.srfCnv.basePosX = srfCnv.basePosX;
this.srfCnv.basePosY = srfCnv.basePosY;
this.srfCnv.baseWidth = srfCnv.baseWidth;
this.srfCnv.baseHeight = srfCnv.baseHeight;
}
//下位レイヤをコマで完全に置き換える。collisionもコマのサーフェスに定義されたものに更新される。
//このメソッドのパターンを重ねると、サーフェス全面を描画し直すことによるアニメーション(いわばパラパラ漫画)が実現される。
//この描画メソッドが指定されたpattern定義では、XY座標は無視される。
//着せ替え・elementでも使用できる。
base(part: Canvas): void {
this.cnv.width = part.cnv.width;
this.cnv.height = part.cnv.height;
this.ctx.globalCompositeOperation = "source-over";
this.ctx.drawImage(part.cnv, 0, 0);
}
//下位レイヤにコマを重ねる。
//着せ替え・elementでも使用できる。
overlay(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "source-over";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤの非透過部分(半透明含む)にのみコマを重ねる。
//着せ替え・elementでも使用できる。
overlayfast(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "source-atop";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤの透明なところにのみコマを重ねる。
//下位レイヤの半透明部分に対しても、透明度が高い部分ほど強くコマを合成する。
//interpolateで重なる部分はベースより上位(手前)側になければならない
//(interpolateのコマが描画している部分に、上位のレイヤで不透明な部分が重なると反映されなくなる)。
//着せ替え・elementでも使用できる。
interpolate(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "destination-over";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤにコマを重ねるが、コマの透過部分について下位レイヤにも反映する(reduce + overlayに近い)。
//着せ替え・elementでも使用できる。
replace(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.clearRect(this.srfCnv.basePosX + x, this.srfCnv.basePosY + y, part.cnv.width, part.cnv.height);
this.overlay(part, x, y);
}
prepareOverlay(part: Canvas, x: number, y: number): void {
// パーツがはみだす量
// もし負なら左へはみ出した量
let left = this.srfCnv.basePosX + x;
// もし負なら右へはみ出した量
let right = this.cnv.width - ((this.srfCnv.basePosX + x) + part.cnv.width);
// もし負なら上へはみ出した量
let top = this.srfCnv.basePosY + y;
// もし負なら↓へはみ出した量
let bottom = this.cnv.height - ((this.srfCnv.basePosY + y) + part.cnv.height);
if(left < 0 || right < 0 || top < 0 || bottom < 0){
// はみ出し発生
let offsetX = 0; // ずれた量
let offsetY = 0;
console.info("SurfaceRenderer# | hape
this.srfCnv.basePosX += (-left);
}
if(right<0){
this.cnv.width += (-right); // reshape
}
if(top<0){
offsetY = (-top);
this.cnv.height += (-top); // reshape
this.srfCnv.basePosY += (-top);
}
if(bottom<0){
this.cnv.height += (-bottom); // reshape
}
this.ctx.drawImage(this.tmpctx.canvas, offsetX, offsetY); //下位レイヤ再描画
}
if(this.debug){
// 基準点描画
this.ctx.fillStyle = "lime";
this.ctx.fillRect(this.srfCnv.basePosX, this.srfCnv.basePosY, 5, 5);
}
}
//下位レイヤの抜き色による透過領域に、そのコマの抜き色による透過領域を追加する。コマの抜き色で無い部分は無視される。
//着せ替え用に用意されたメソッドだが、着せ替えでないアニメーション・elementでも使用可能。
//http://usada.sakura.vg/contents/seriko.html
reduce(part: Canvas, x: number, y: number): void {
// はみ出しちぇっく prepareOverlay はしない
const width = x + part.cnv.width < this.cnv.width ? part.cnv.width : this.cnv.width - x;
const height = y + part.cnv.height < this.cnv.height ? part.cnv.height : this.cnv.height - y;
const imgdataA = this.ctx.getImageData(0, 0, this.cnv.width, this.cnv.height);
const dataA = imgdataA.data;
// partの透明領域までアクセスする必要がある
const ctxB = <CanvasRenderingContext2D>part.cnv.getContext("2d");
const imgdataB = ctxB.getImageData(0, 0, part.cnv.width, part.cnv.height)
const dataB = imgdataB.data;
for(let _y=0; _y<height; _y++){
for(let _x=0; _x<width; _x++){
const iA = (x+_x)*4 + (y+_y)*this.cnv.width*4; // baseのxy座標とインデックス
const iB = (_x)*4 + (_y)*part.cnv.width*4; // partのxy座標とインデックス
// もしコマが透過ならpartのalphaチャネルでbaseのを上書き
if(dataB[iB + 3] === 0) dataA[iA + 3] = dataB[iB + 3];
}
}
this.ctx.putImageData(imgdataA, 0, 0);
}
drawRegions(regions: SDT.SurfaceCollision[], description="notitle"): void {
this.ctx.font = "35px";
this.ctx.lineWidth = 4;
this.ctx.strokeStyle = "white";
this.ctx.strokeText(description, 5, 10);
this.ctx.fillStyle = "black";
this.ctx.fillText(description, 5, 10); // surfaceIdを描画
regions.forEach((col)=>{
this.drawRegion(col);
});
}
drawRegion(region: SDT.SurfaceCollision): void {
const {type="", name=""} = region;
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "#00FF00";
var left=0, top=0, right=0, bottom=0;
switch (type) {
case "rect":
var {left=0, top=0, right=0, bottom=0} = <SDT.SurfaceCollisionRect>region;
left += this.srfCnv.basePosX;
top += this.srfCnv.basePosY;
right += this.srfCnv.basePosX;
bottom += this.srfCnv.basePosY;
this.ctx.beginPath();
this.ctx.rect(left, top, right - left, bottom - top);
this.ctx.stroke();
break;
case "ellipse":
var {left=0, top=0, right=0, bottom=0} = <SDT.SurfaceCollisionEllipse>region;
left += this.srfCnv.basePosX;
top += this.srfCnv.basePosY;
right += this.srfCnv.basePosX;
bottom += this.srfCnv.basePosY;
// 実はctx.ellipseはfirefox対応してない
this.drawEllipseWithBezier(left, top, right - left, bottom - top);
break;
case "circle":
let {radius=0, centerX=0, centerY=0} = <SDT.SurfaceCollisionCircle>region;
centerX += this.srfCnv.basePosX;
centerY += this.srfCnv.basePosY;
left = centerX;
top = centerY;
this.ctx.beginPath();
this.ctx.arc(centerX, centerY, radius, 0, 2*Math.PI, true);
this.ctx.stroke();
break;
case "polygon":
const {coordinates=[]} = <SDT.SurfaceCollisionPolygon>region;
if(coordinates.length <= 0) break;
this.ctx.beginPath();
const {x:startX, y:startY} = coordinates[0];
left = startX;
top = startY;
this.ctx.moveTo(startX, startY);
for (let i=1; i<coordinates.length; i++){
const {x, y} = coordinates[i];
this.ctx.lineTo(x, y);
}
this.ctx.lineTo(startX, startY);
this.ctx.stroke();
break;
default:
console.warn("SurfaceRenderer#drawRegion", "unkown collision shape:", region);
break;
}
this.ctx.font = "35px";
this.ctx.lineWidth = 4;
this.ctx.strokeStyle = "white";
this.ctx.strokeText(type + ":" + name, left + 5, top + 10);
this.ctx.fillStyle = "black";
this.ctx.fillText(type + ":" + name, left + 5, top + 10);
}
// ctx.ellipseは非標準
drawEllipseWithBezier(x: number, y: number, w: number, h: number): void {
const kappa = .5522848,
ox = (w / 2) * kappa, // control point offset horizontal
oy = (h / 2) * kappa, // control point offset vertical
xe = x + w, // x-end
ye = y + h, // y-end
xm = x + w / 2, // x-middle
ym = y + h / 2; // y-middle
this.ctx.beginPath();
this.ctx.moveTo(x, ym);
this.ctx.bezierCurveTo(x, ym - oy, xm - ox, y, xm, y);
this.ctx.bezierCurveTo(xm + ox, y, xe, ym - oy, xe, ym);
this.ctx.bezierCurveTo(xe, ym + oy, xm + ox, ye, xm, ye);
this.ctx.bezierCurveTo(xm - ox, ye, x, ym + oy, x, ym);
this.ctx.stroke();
}
}
export function isHit(srfCnv: Canvas, sdef: SDT.SurfaceDefinition, x: number, y: number):{transparency: boolean, name: string}{
const transparency = Util.isHit(this.cnv, x, y);
const name = sdef.getRegion(x - this.basePosX, y - this.basePosY);
return {transparency, name};
}
| prepareOverlay: reshape occured");
// 現状をtmpcnvへコピー
Util.fastcopy(this.cnv, this.tmpctx);
if(left<0){
offsetX = (-left);
this.cnv.width += (-left); // res | identifier_body |
Renderer.ts | /*
* surface -> canvas なレンダラ。
* HTMLCanvasElement もこの層で抽象化する
*/
import * as Util from "./Util";
import * as SDT from "ikagaka-shell-loader/lib/Model/SurfaceDefinitionTree";
import {Canvas, copy} from "./Canvas";
export class Renderer {
// GCの発生を抑えるためバッファを使いまわす
srfCnv: Canvas;
cnv: HTMLCanvasElement;
ctx: CanvasRenderingContext2D;
tmpcnv: HTMLCanvasElement
tmpctx: CanvasRenderingContext2D;
debug: boolean; // 基準点描画
use_self_alpha: boolean;
// 渡されたCanvasをベースサーフェスとしてレイヤー合成を開始する。
// nullならば1x1のCanvasをベースサーフェスとする。
// 渡されたCanvasは変更しない。
constructor(srfCnv?: Canvas) {
this.srfCnv = srfCnv == null ? new Canvas(Util.createCanvas()) : srfCnv;
this.cnv = this.srfCnv.cnv;
this.ctx = <CanvasRenderingContext2D>this.cnv.getContext("2d");
this.tmpcnv = Util.createCanvas();
this.tmpctx = <CanvasRenderingContext2D>this.tmpcnv.getContext("2d");
this.use_self_alpha = false;
this.debug = false;
}
// バッファを使いまわすためのリセット
// clearは短形を保つがリセットは1x1になる
reset(): void {
// reshapeの機会を減らすため大きさはそのままにする
this.ctx.canvas.width = this.ctx.canvas.width;
this.tmpctx.canvas.width = this.tmpctx.canvas.width;
this.srfCnv.basePosX = 0;
this.srfCnv.basePosY = 0;
this.srfCnv.baseWidth = 0;
this.srfCnv.baseHeight = 0;
}
clear(): void {
this.ctx.clearRect(0, 0, this.ctx.canvas.width, this.ctx.canvas.height);
}
// [
// {canvas: srfCnv1, type: "base", x: 0, y: 0}
// {canvas: srfCnv2, type: "overlay", x: 50, y: 50}
// ]
composeElements(elms: {type: string, x: number, y: number, canvas: Canvas | Canvas {
// baseを決定
const bases = elms.filter(({type})=> type === "base");
const others = elms.filter(({type})=> type !== "base");
// element[MAX].base > element0 > element[MIN]
if(bases.length === 0){
// element[MIN]
// elms.length > 0なのでundefinedにはならない…はず。
// お前がbaseになるんだよ
const base = <SDT.SurfaceElement&{canvas:Canvas}>others.shift();
if(base != null){
bases.push(base);
console.warn("SurfaceRenderer#composeElements: base surface not found. failback.", bases, others);
}else{
console.error("SurfaceRenderer#composeElements: cannot decide base surface.", base, others);
return this.srfCnv;
}
}
let base = bases.slice(-1)[0]; /* last */
this.base(base.canvas);
others.forEach(({canvas, type, x, y})=>{
this.composeElement(canvas, type, x, y);
});
return this.srfCnv;
}
composeElement(canvas: Canvas, type: string, x=0, y=0): void {
switch (type) {
case "overlay": this.overlay(canvas, x, y); break;
case "overlayfast": this.overlayfast(canvas, x, y); break;
case "replace": this.replace(canvas, x, y); break;
case "interpolate": this.interpolate(canvas, x, y); break;
case "reduce": this.reduce(canvas, x, y); break;
default:
console.warn("SurfaceRenderer#composeElement:", "unkown compose method", canvas, type, x, y);
}
}
rebase(srfCnv: Canvas){
this.srfCnv = srfCnv; // 描画対象を変える
this.cnv = this.srfCnv.cnv;
this.ctx = <CanvasRenderingContext2D>this.cnv.getContext("2d");
}
init(srfCnv: Canvas){
// this を srfCnv の値で置き換え
this.base(srfCnv);
this.srfCnv.basePosX = srfCnv.basePosX;
this.srfCnv.basePosY = srfCnv.basePosY;
this.srfCnv.baseWidth = srfCnv.baseWidth;
this.srfCnv.baseHeight = srfCnv.baseHeight;
}
//下位レイヤをコマで完全に置き換える。collisionもコマのサーフェスに定義されたものに更新される。
//このメソッドのパターンを重ねると、サーフェス全面を描画し直すことによるアニメーション(いわばパラパラ漫画)が実現される。
//この描画メソッドが指定されたpattern定義では、XY座標は無視される。
//着せ替え・elementでも使用できる。
base(part: Canvas): void {
this.cnv.width = part.cnv.width;
this.cnv.height = part.cnv.height;
this.ctx.globalCompositeOperation = "source-over";
this.ctx.drawImage(part.cnv, 0, 0);
}
//下位レイヤにコマを重ねる。
//着せ替え・elementでも使用できる。
overlay(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "source-over";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤの非透過部分(半透明含む)にのみコマを重ねる。
//着せ替え・elementでも使用できる。
overlayfast(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "source-atop";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤの透明なところにのみコマを重ねる。
//下位レイヤの半透明部分に対しても、透明度が高い部分ほど強くコマを合成する。
//interpolateで重なる部分はベースより上位(手前)側になければならない
//(interpolateのコマが描画している部分に、上位のレイヤで不透明な部分が重なると反映されなくなる)。
//着せ替え・elementでも使用できる。
interpolate(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "destination-over";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤにコマを重ねるが、コマの透過部分について下位レイヤにも反映する(reduce + overlayに近い)。
//着せ替え・elementでも使用できる。
replace(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.clearRect(this.srfCnv.basePosX + x, this.srfCnv.basePosY + y, part.cnv.width, part.cnv.height);
this.overlay(part, x, y);
}
prepareOverlay(part: Canvas, x: number, y: number): void {
// パーツがはみだす量
// もし負なら左へはみ出した量
let left = this.srfCnv.basePosX + x;
// もし負なら右へはみ出した量
let right = this.cnv.width - ((this.srfCnv.basePosX + x) + part.cnv.width);
// もし負なら上へはみ出した量
let top = this.srfCnv.basePosY + y;
// もし負なら↓へはみ出した量
let bottom = this.cnv.height - ((this.srfCnv.basePosY + y) + part.cnv.height);
if(left < 0 || right < 0 || top < 0 || bottom < 0){
// はみ出し発生
let offsetX = 0; // ずれた量
let offsetY = 0;
console.info("SurfaceRenderer#prepareOverlay: reshape occured");
// 現状をtmpcnvへコピー
Util.fastcopy(this.cnv, this.tmpctx);
if(left<0){
offsetX = (-left);
this.cnv.width += (-left); // reshape
this.srfCnv.basePosX += (-left);
}
if(right<0){
this.cnv.width += (-right); // reshape
}
if(top<0){
offsetY = (-top);
this.cnv.height += (-top); // reshape
this.srfCnv.basePosY += (-top);
}
if(bottom<0){
this.cnv.height += (-bottom); // reshape
}
this.ctx.drawImage(this.tmpctx.canvas, offsetX, offsetY); //下位レイヤ再描画
}
if(this.debug){
// 基準点描画
this.ctx.fillStyle = "lime";
this.ctx.fillRect(this.srfCnv.basePosX, this.srfCnv.basePosY, 5, 5);
}
}
//下位レイヤの抜き色による透過領域に、そのコマの抜き色による透過領域を追加する。コマの抜き色で無い部分は無視される。
//着せ替え用に用意されたメソッドだが、着せ替えでないアニメーション・elementでも使用可能。
//http://usada.sakura.vg/contents/seriko.html
reduce(part: Canvas, x: number, y: number): void {
// はみ出しちぇっく prepareOverlay はしない
const width = x + part.cnv.width < this.cnv.width ? part.cnv.width : this.cnv.width - x;
const height = y + part.cnv.height < this.cnv.height ? part.cnv.height : this.cnv.height - y;
const imgdataA = this.ctx.getImageData(0, 0, this.cnv.width, this.cnv.height);
const dataA = imgdataA.data;
// partの透明領域までアクセスする必要がある
const ctxB = <CanvasRenderingContext2D>part.cnv.getContext("2d");
const imgdataB = ctxB.getImageData(0, 0, part.cnv.width, part.cnv.height)
const dataB = imgdataB.data;
for(let _y=0; _y<height; _y++){
for(let _x=0; _x<width; _x++){
const iA = (x+_x)*4 + (y+_y)*this.cnv.width*4; // baseのxy座標とインデックス
const iB = (_x)*4 + (_y)*part.cnv.width*4; // partのxy座標とインデックス
// もしコマが透過ならpartのalphaチャネルでbaseのを上書き
if(dataB[iB + 3] === 0) dataA[iA + 3] = dataB[iB + 3];
}
}
this.ctx.putImageData(imgdataA, 0, 0);
}
drawRegions(regions: SDT.SurfaceCollision[], description="notitle"): void {
this.ctx.font = "35px";
this.ctx.lineWidth = 4;
this.ctx.strokeStyle = "white";
this.ctx.strokeText(description, 5, 10);
this.ctx.fillStyle = "black";
this.ctx.fillText(description, 5, 10); // surfaceIdを描画
regions.forEach((col)=>{
this.drawRegion(col);
});
}
drawRegion(region: SDT.SurfaceCollision): void {
const {type="", name=""} = region;
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "#00FF00";
var left=0, top=0, right=0, bottom=0;
switch (type) {
case "rect":
var {left=0, top=0, right=0, bottom=0} = <SDT.SurfaceCollisionRect>region;
left += this.srfCnv.basePosX;
top += this.srfCnv.basePosY;
right += this.srfCnv.basePosX;
bottom += this.srfCnv.basePosY;
this.ctx.beginPath();
this.ctx.rect(left, top, right - left, bottom - top);
this.ctx.stroke();
break;
case "ellipse":
var {left=0, top=0, right=0, bottom=0} = <SDT.SurfaceCollisionEllipse>region;
left += this.srfCnv.basePosX;
top += this.srfCnv.basePosY;
right += this.srfCnv.basePosX;
bottom += this.srfCnv.basePosY;
// 実はctx.ellipseはfirefox対応してない
this.drawEllipseWithBezier(left, top, right - left, bottom - top);
break;
case "circle":
let {radius=0, centerX=0, centerY=0} = <SDT.SurfaceCollisionCircle>region;
centerX += this.srfCnv.basePosX;
centerY += this.srfCnv.basePosY;
left = centerX;
top = centerY;
this.ctx.beginPath();
this.ctx.arc(centerX, centerY, radius, 0, 2*Math.PI, true);
this.ctx.stroke();
break;
case "polygon":
const {coordinates=[]} = <SDT.SurfaceCollisionPolygon>region;
if(coordinates.length <= 0) break;
this.ctx.beginPath();
const {x:startX, y:startY} = coordinates[0];
left = startX;
top = startY;
this.ctx.moveTo(startX, startY);
for (let i=1; i<coordinates.length; i++){
const {x, y} = coordinates[i];
this.ctx.lineTo(x, y);
}
this.ctx.lineTo(startX, startY);
this.ctx.stroke();
break;
default:
console.warn("SurfaceRenderer#drawRegion", "unkown collision shape:", region);
break;
}
this.ctx.font = "35px";
this.ctx.lineWidth = 4;
this.ctx.strokeStyle = "white";
this.ctx.strokeText(type + ":" + name, left + 5, top + 10);
this.ctx.fillStyle = "black";
this.ctx.fillText(type + ":" + name, left + 5, top + 10);
}
// ctx.ellipseは非標準
drawEllipseWithBezier(x: number, y: number, w: number, h: number): void {
const kappa = .5522848,
ox = (w / 2) * kappa, // control point offset horizontal
oy = (h / 2) * kappa, // control point offset vertical
xe = x + w, // x-end
ye = y + h, // y-end
xm = x + w / 2, // x-middle
ym = y + h / 2; // y-middle
this.ctx.beginPath();
this.ctx.moveTo(x, ym);
this.ctx.bezierCurveTo(x, ym - oy, xm - ox, y, xm, y);
this.ctx.bezierCurveTo(xm + ox, y, xe, ym - oy, xe, ym);
this.ctx.bezierCurveTo(xe, ym + oy, xm + ox, ye, xm, ye);
this.ctx.bezierCurveTo(xm - ox, ye, x, ym + oy, x, ym);
this.ctx.stroke();
}
}
export function isHit(srfCnv: Canvas, sdef: SDT.SurfaceDefinition, x: number, y: number):{transparency: boolean, name: string}{
const transparency = Util.isHit(this.cnv, x, y);
const name = sdef.getRegion(x - this.basePosX, y - this.basePosY);
return {transparency, name};
}
| }[]): | identifier_name |
Renderer.ts | /*
* surface -> canvas なレンダラ。
* HTMLCanvasElement もこの層で抽象化する
*/
import * as Util from "./Util";
import * as SDT from "ikagaka-shell-loader/lib/Model/SurfaceDefinitionTree";
import {Canvas, copy} from "./Canvas";
export class Renderer {
// GCの発生を抑えるためバッファを使いまわす
srfCnv: Canvas;
cnv: HTMLCanvasElement;
ctx: CanvasRenderingContext2D;
tmpcnv: HTMLCanvasElement
tmpctx: CanvasRenderingContext2D;
debug: boolean; // 基準点描画
use_self_alpha: boolean;
// 渡されたCanvasをベースサーフェスとしてレイヤー合成を開始する。
// nullならば1x1のCanvasをベースサーフェスとする。
// 渡されたCanvasは変更しない。
constructor(srfCnv?: Canvas) {
this.srfCnv = srfCnv == null ? new Canvas(Util.createCanvas()) : srfCnv;
this.cnv = this.srfCnv.cnv;
this.ctx = <CanvasRenderingContext2D>this.cnv.getContext("2d");
this.tmpcnv = Util.createCanvas();
this.tmpctx = <CanvasRenderingContext2D>this.tmpcnv.getContext("2d");
this.use_self_alpha = false;
this.debug = false;
}
// バッファを使いまわすためのリセット
// clearは短形を保つがリセットは1x1になる
reset(): void {
// reshapeの機会を減らすため大きさはそのままにする
this.ctx.canvas.width = this.ctx.canvas.width;
this.tmpctx.canvas.width = this.tmpctx.canvas.width;
this.srfCnv.basePosX = 0;
this.srfCnv.basePosY = 0;
this.srfCnv.baseWidth = 0;
this.srfCnv.baseHeight = 0;
}
clear(): void {
this.ctx.clearRect(0, 0, this.ctx.canvas.width, this.ctx.canvas.height);
}
// [
// {canvas: srfCnv1, type: "base", x: 0, y: 0}
// {canvas: srfCnv2, type: "overlay", x: 50, y: 50}
// ]
composeElements(elms: {type: string, x: number, y: number, canvas: Canvas}[]): Canvas {
// baseを決定
const bases = elms.filter(({type})=> type === "base");
const others = elms.filter(({type})=> type !== "base");
// element[MAX].base > element0 > element[MIN]
if(bases.length === 0){
// element[MIN]
// elms.length > 0なのでundefinedにはならない…はず。
// お前がbaseになるんだよ
const base = <SDT.SurfaceElement&{canvas:Canvas}>others.shift();
if(base != null){
bases.push(base); | }
}
let base = bases.slice(-1)[0]; /* last */
this.base(base.canvas);
others.forEach(({canvas, type, x, y})=>{
this.composeElement(canvas, type, x, y);
});
return this.srfCnv;
}
composeElement(canvas: Canvas, type: string, x=0, y=0): void {
switch (type) {
case "overlay": this.overlay(canvas, x, y); break;
case "overlayfast": this.overlayfast(canvas, x, y); break;
case "replace": this.replace(canvas, x, y); break;
case "interpolate": this.interpolate(canvas, x, y); break;
case "reduce": this.reduce(canvas, x, y); break;
default:
console.warn("SurfaceRenderer#composeElement:", "unkown compose method", canvas, type, x, y);
}
}
rebase(srfCnv: Canvas){
this.srfCnv = srfCnv; // 描画対象を変える
this.cnv = this.srfCnv.cnv;
this.ctx = <CanvasRenderingContext2D>this.cnv.getContext("2d");
}
init(srfCnv: Canvas){
// this を srfCnv の値で置き換え
this.base(srfCnv);
this.srfCnv.basePosX = srfCnv.basePosX;
this.srfCnv.basePosY = srfCnv.basePosY;
this.srfCnv.baseWidth = srfCnv.baseWidth;
this.srfCnv.baseHeight = srfCnv.baseHeight;
}
//下位レイヤをコマで完全に置き換える。collisionもコマのサーフェスに定義されたものに更新される。
//このメソッドのパターンを重ねると、サーフェス全面を描画し直すことによるアニメーション(いわばパラパラ漫画)が実現される。
//この描画メソッドが指定されたpattern定義では、XY座標は無視される。
//着せ替え・elementでも使用できる。
base(part: Canvas): void {
this.cnv.width = part.cnv.width;
this.cnv.height = part.cnv.height;
this.ctx.globalCompositeOperation = "source-over";
this.ctx.drawImage(part.cnv, 0, 0);
}
//下位レイヤにコマを重ねる。
//着せ替え・elementでも使用できる。
overlay(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "source-over";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤの非透過部分(半透明含む)にのみコマを重ねる。
//着せ替え・elementでも使用できる。
overlayfast(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "source-atop";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤの透明なところにのみコマを重ねる。
//下位レイヤの半透明部分に対しても、透明度が高い部分ほど強くコマを合成する。
//interpolateで重なる部分はベースより上位(手前)側になければならない
//(interpolateのコマが描画している部分に、上位のレイヤで不透明な部分が重なると反映されなくなる)。
//着せ替え・elementでも使用できる。
interpolate(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.globalCompositeOperation = "destination-over";
this.ctx.drawImage(part.cnv, this.srfCnv.basePosX + x, this.srfCnv.basePosY + y);
}
//下位レイヤにコマを重ねるが、コマの透過部分について下位レイヤにも反映する(reduce + overlayに近い)。
//着せ替え・elementでも使用できる。
replace(part: Canvas, x: number, y: number): void {
this.prepareOverlay(part, x, y);
this.ctx.clearRect(this.srfCnv.basePosX + x, this.srfCnv.basePosY + y, part.cnv.width, part.cnv.height);
this.overlay(part, x, y);
}
prepareOverlay(part: Canvas, x: number, y: number): void {
// パーツがはみだす量
// もし負なら左へはみ出した量
let left = this.srfCnv.basePosX + x;
// もし負なら右へはみ出した量
let right = this.cnv.width - ((this.srfCnv.basePosX + x) + part.cnv.width);
// もし負なら上へはみ出した量
let top = this.srfCnv.basePosY + y;
// もし負なら↓へはみ出した量
let bottom = this.cnv.height - ((this.srfCnv.basePosY + y) + part.cnv.height);
if(left < 0 || right < 0 || top < 0 || bottom < 0){
// はみ出し発生
let offsetX = 0; // ずれた量
let offsetY = 0;
console.info("SurfaceRenderer#prepareOverlay: reshape occured");
// 現状をtmpcnvへコピー
Util.fastcopy(this.cnv, this.tmpctx);
if(left<0){
offsetX = (-left);
this.cnv.width += (-left); // reshape
this.srfCnv.basePosX += (-left);
}
if(right<0){
this.cnv.width += (-right); // reshape
}
if(top<0){
offsetY = (-top);
this.cnv.height += (-top); // reshape
this.srfCnv.basePosY += (-top);
}
if(bottom<0){
this.cnv.height += (-bottom); // reshape
}
this.ctx.drawImage(this.tmpctx.canvas, offsetX, offsetY); //下位レイヤ再描画
}
if(this.debug){
// 基準点描画
this.ctx.fillStyle = "lime";
this.ctx.fillRect(this.srfCnv.basePosX, this.srfCnv.basePosY, 5, 5);
}
}
//下位レイヤの抜き色による透過領域に、そのコマの抜き色による透過領域を追加する。コマの抜き色で無い部分は無視される。
//着せ替え用に用意されたメソッドだが、着せ替えでないアニメーション・elementでも使用可能。
//http://usada.sakura.vg/contents/seriko.html
reduce(part: Canvas, x: number, y: number): void {
// はみ出しちぇっく prepareOverlay はしない
const width = x + part.cnv.width < this.cnv.width ? part.cnv.width : this.cnv.width - x;
const height = y + part.cnv.height < this.cnv.height ? part.cnv.height : this.cnv.height - y;
const imgdataA = this.ctx.getImageData(0, 0, this.cnv.width, this.cnv.height);
const dataA = imgdataA.data;
// partの透明領域までアクセスする必要がある
const ctxB = <CanvasRenderingContext2D>part.cnv.getContext("2d");
const imgdataB = ctxB.getImageData(0, 0, part.cnv.width, part.cnv.height)
const dataB = imgdataB.data;
for(let _y=0; _y<height; _y++){
for(let _x=0; _x<width; _x++){
const iA = (x+_x)*4 + (y+_y)*this.cnv.width*4; // baseのxy座標とインデックス
const iB = (_x)*4 + (_y)*part.cnv.width*4; // partのxy座標とインデックス
// もしコマが透過ならpartのalphaチャネルでbaseのを上書き
if(dataB[iB + 3] === 0) dataA[iA + 3] = dataB[iB + 3];
}
}
this.ctx.putImageData(imgdataA, 0, 0);
}
drawRegions(regions: SDT.SurfaceCollision[], description="notitle"): void {
this.ctx.font = "35px";
this.ctx.lineWidth = 4;
this.ctx.strokeStyle = "white";
this.ctx.strokeText(description, 5, 10);
this.ctx.fillStyle = "black";
this.ctx.fillText(description, 5, 10); // surfaceIdを描画
regions.forEach((col)=>{
this.drawRegion(col);
});
}
drawRegion(region: SDT.SurfaceCollision): void {
const {type="", name=""} = region;
this.ctx.lineWidth = 1;
this.ctx.strokeStyle = "#00FF00";
var left=0, top=0, right=0, bottom=0;
switch (type) {
case "rect":
var {left=0, top=0, right=0, bottom=0} = <SDT.SurfaceCollisionRect>region;
left += this.srfCnv.basePosX;
top += this.srfCnv.basePosY;
right += this.srfCnv.basePosX;
bottom += this.srfCnv.basePosY;
this.ctx.beginPath();
this.ctx.rect(left, top, right - left, bottom - top);
this.ctx.stroke();
break;
case "ellipse":
var {left=0, top=0, right=0, bottom=0} = <SDT.SurfaceCollisionEllipse>region;
left += this.srfCnv.basePosX;
top += this.srfCnv.basePosY;
right += this.srfCnv.basePosX;
bottom += this.srfCnv.basePosY;
// 実はctx.ellipseはfirefox対応してない
this.drawEllipseWithBezier(left, top, right - left, bottom - top);
break;
case "circle":
let {radius=0, centerX=0, centerY=0} = <SDT.SurfaceCollisionCircle>region;
centerX += this.srfCnv.basePosX;
centerY += this.srfCnv.basePosY;
left = centerX;
top = centerY;
this.ctx.beginPath();
this.ctx.arc(centerX, centerY, radius, 0, 2*Math.PI, true);
this.ctx.stroke();
break;
case "polygon":
const {coordinates=[]} = <SDT.SurfaceCollisionPolygon>region;
if(coordinates.length <= 0) break;
this.ctx.beginPath();
const {x:startX, y:startY} = coordinates[0];
left = startX;
top = startY;
this.ctx.moveTo(startX, startY);
for (let i=1; i<coordinates.length; i++){
const {x, y} = coordinates[i];
this.ctx.lineTo(x, y);
}
this.ctx.lineTo(startX, startY);
this.ctx.stroke();
break;
default:
console.warn("SurfaceRenderer#drawRegion", "unkown collision shape:", region);
break;
}
this.ctx.font = "35px";
this.ctx.lineWidth = 4;
this.ctx.strokeStyle = "white";
this.ctx.strokeText(type + ":" + name, left + 5, top + 10);
this.ctx.fillStyle = "black";
this.ctx.fillText(type + ":" + name, left + 5, top + 10);
}
// ctx.ellipseは非標準
drawEllipseWithBezier(x: number, y: number, w: number, h: number): void {
const kappa = .5522848,
ox = (w / 2) * kappa, // control point offset horizontal
oy = (h / 2) * kappa, // control point offset vertical
xe = x + w, // x-end
ye = y + h, // y-end
xm = x + w / 2, // x-middle
ym = y + h / 2; // y-middle
this.ctx.beginPath();
this.ctx.moveTo(x, ym);
this.ctx.bezierCurveTo(x, ym - oy, xm - ox, y, xm, y);
this.ctx.bezierCurveTo(xm + ox, y, xe, ym - oy, xe, ym);
this.ctx.bezierCurveTo(xe, ym + oy, xm + ox, ye, xm, ye);
this.ctx.bezierCurveTo(xm - ox, ye, x, ym + oy, x, ym);
this.ctx.stroke();
}
}
export function isHit(srfCnv: Canvas, sdef: SDT.SurfaceDefinition, x: number, y: number):{transparency: boolean, name: string}{
const transparency = Util.isHit(this.cnv, x, y);
const name = sdef.getRegion(x - this.basePosX, y - this.basePosY);
return {transparency, name};
} | console.warn("SurfaceRenderer#composeElements: base surface not found. failback.", bases, others);
}else{
console.error("SurfaceRenderer#composeElements: cannot decide base surface.", base, others);
return this.srfCnv; | random_line_split |
crypto_box.rs | use crate::internal::rayon::rayon_exec;
use crate::internal::x25519;
use block_padding::Padding;
use crypto_box as lib_crypto_box;
use std::sync::Arc;
/// Length of the crypto box aead nonce.
/// Ideally this would be exposed from upstream but I didn't see a good way to get at it directly.
pub const NONCE_BYTES: usize = 24;
/// The size of blocks to pad encrypted data to.
/// We have no idea how big incoming data is, but probably it is generally smallish.
/// Devs can always do their own padding on top of this, but we want some safety for unpadded data.
/// Libsodium optionally supports ISO 7816-4 padding algorithm.
/// @see https://doc.libsodium.org/padding#algorithm
pub const BLOCK_PADDING_SIZE: usize = 32;
/// The delimiter for padding as per ISO 7816-4.
pub const BLOCK_PADDING_DELIMITER: u8 = 0x80;
/// Newtype for the nonce for safety.
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxNonce([u8; NONCE_BYTES]);
impl CryptoBoxNonce {
async fn new_random() -> Self {
rayon_exec(move || {
let mut rng = rand::thread_rng();
let mut bytes = [0; NONCE_BYTES];
// We rely on the lib_crypto_box nonce length being the same as what we expect.
// Should be a reasonably safe bet as 24 bytes is dictated by the crypto_box algorithm.
bytes.copy_from_slice(
lib_crypto_box::generate_nonce(&mut rng).as_slice(),
);
Self(bytes)
})
.await
}
}
impl AsRef<[u8; NONCE_BYTES]> for CryptoBoxNonce {
fn as_ref(&self) -> &[u8; NONCE_BYTES] {
&self.0
}
}
impl AsRef<[u8]> for CryptoBoxNonce {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl From<[u8; NONCE_BYTES]> for CryptoBoxNonce {
fn from(array: [u8; NONCE_BYTES]) -> Self {
Self(array)
}
}
impl std::convert::TryFrom<&[u8]> for CryptoBoxNonce {
type Error = crate::error::LairError;
fn try_from(slice: &[u8]) -> Result<Self, Self::Error> {
if slice.len() == NONCE_BYTES {
let mut inner = [0; NONCE_BYTES];
inner.copy_from_slice(slice);
Ok(Self(inner))
} else {
Err(crate::error::LairError::CryptoBoxNonceLength)
}
}
}
impl CryptoBoxNonce {
/// Always NONCE_BYTES.
pub fn len(&self) -> usize {
NONCE_BYTES
}
/// For clippy.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
/// "Additional associated data" as per the aead rust crate Payload.
/// May be empty. Must be valid if present.
pub struct CryptoBoxAad(Vec<u8>);
/// The nonce and encrypted data together.
/// @todo include additional associated data?
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxEncryptedData {
/// The nonce generated during encryption.
/// We never allow nonce to be set externally so we need to return it.
pub nonce: CryptoBoxNonce,
/// The encrypted version of our input data.
#[allow(clippy::rc_buffer)]
pub encrypted_data: Arc<Vec<u8>>,
}
/// Data to be encrypted.
/// Not associated with a nonce because we enforce random nonces.
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxData {
/// Data to be encrypted.
#[allow(clippy::rc_buffer)]
pub data: Arc<Vec<u8>>,
}
impl AsRef<[u8]> for CryptoBoxData {
fn as_ref(&self) -> &[u8] {
self.data.as_ref()
}
}
impl CryptoBoxData {
/// Length of newtype is length of inner.
pub fn len(&self) -> usize {
AsRef::<[u8]>::as_ref(self).len()
}
/// For clippy.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl From<Vec<u8>> for CryptoBoxData {
fn from(v: Vec<u8>) -> Self {
Self { data: Arc::new(v) }
}
}
/// @todo all of this can be opened up to be more flexible over time.
/// Eventually all possible input such as nonces and associated data should be settable by the
/// external interface.
/// In the short term everyone is getting their heads around the 80/20 usage patterns that are as
/// safe as we can possibly make them to avoid subtleties that lead to nonce or key re-use etc.
///
/// Wrapper around crypto_box from whatever lib we use.
/// No BYO nonces. Nonces always random and returned as part of `CryptoBoxEncryptedData`.
/// No BYO algorithms (cipher agility). Algorithm always X25519XSalsa20Poly1305.
/// Currently no additional associated data but DNA space may be included in the future.
/// The sender's private key encrypts _for_ the recipient's pubkey.
///
/// FYI allowing nonces could be dangerous as it's exposed as a general purpose authenticated
/// encryption mechanism (or will be) via. crypto_box from libsodium.
/// The main thing is that if a secret/nonce combination is _ever_ used more than once it
/// completely breaks encryption.
//
/// Example ways a nonce could accidentally be reused:
/// - If two DNAs are the same or similar (e.g. cloned DNAs) then they will have the same
/// nonce generation logic, so may create collisions when run in parallel.
/// - Collision of initialization vectors in a key exchange/crypto session.
/// - Use of a counter based nonce in a way that isn't 100% reliably incrementing.
///
/// Example ways a secret could accidentally be reused:
/// - If two agents both commit their pubkeys then share them with each other, then the same
/// shared key will be 'negotiated' by x25519 ECDH every time it is called.
/// - If a pubkey is used across two different DNAs the secrets will collide at the lair
/// and the DNAs won't have a way to co-ordinate or detect this.
///
/// E.g. Ring is very wary of secret key re-use e.g. it makes explicit the use-case where an
/// ephemeral (single use) key is generated to establish an ephemeral (single use) shared
/// key. Our use-case is the libsodium `crypto_box` function that uses an x25519 keypair to
/// perform authenticated encryption, so it makes more sense for us to be storing our
/// private keys for later use BUT see above for the dangers of key re-use that the app dev
/// really needs to be wary of.
///
/// @see https://eprint.iacr.org/2019/519.pdf for 'context separable interfaces'
pub async fn crypto_box(
sender: x25519::X25519PrivKey,
recipient: x25519::X25519PubKey,
data: Arc<CryptoBoxData>,
) -> crate::error::LairResult<CryptoBoxEncryptedData> {
let nonce = CryptoBoxNonce::new_random().await;
rayon_exec(move || {
use lib_crypto_box::aead::Aead;
let sender_box =
lib_crypto_box::SalsaBox::new(recipient.as_ref(), sender.as_ref());
// It's actually easier and clearer to directly pad the vector than use the block_padding
// crate, as that is optimised for blocks.
let mut to_encrypt = data.data.to_vec();
let padding_delimiter = vec![BLOCK_PADDING_DELIMITER];
let padding = vec![
0x0;
BLOCK_PADDING_SIZE
- (data.data.len() + 1) % BLOCK_PADDING_SIZE
];
to_encrypt.extend(padding_delimiter);
to_encrypt.extend(padding);
let encrypted_data = Arc::new(sender_box.encrypt(
AsRef::<[u8; NONCE_BYTES]>::as_ref(&nonce).into(),
to_encrypt.as_slice(),
)?);
// @todo do we want associated data to enforce the originating DHT space?
// https://eprint.iacr.org/2019/519.pdf for 'context separable interfaces'
Ok(CryptoBoxEncryptedData {
nonce,
encrypted_data,
})
})
.await
}
/// Wrapper around crypto_box_open from whatever lib we use.
/// Exact inverse of `crypto_box_open` so nonce must be provided in `CryptoBoxEncryptedData`.
/// The recipient's private key encrypts _from_ the sender's pubkey.
pub async fn crypto_box_open(
recipient: x25519::X25519PrivKey,
sender: x25519::X25519PubKey,
encrypted_data: Arc<CryptoBoxEncryptedData>,
) -> crate::error::LairResult<Option<CryptoBoxData>> {
rayon_exec(move || {
use lib_crypto_box::aead::Aead;
let recipient_box =
lib_crypto_box::SalsaBox::new(sender.as_ref(), recipient.as_ref());
match recipient_box.decrypt(
AsRef::<[u8; NONCE_BYTES]>::as_ref(&encrypted_data.nonce).into(),
encrypted_data.encrypted_data.as_slice(),
) {
Ok(decrypted_data) => |
Err(_) => Ok(None),
}
})
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test(flavor = "multi_thread")]
async fn it_can_encrypt_and_decrypt() {
for input in [
// Empty vec.
vec![],
// Small vec.
vec![0],
vec![0, 1, 2],
vec![0, 1, 2, 3],
// Vec ending in padding delimiter.
vec![0x80],
vec![0, 0x80],
vec![0x80; BLOCK_PADDING_SIZE - 1],
vec![0x80; BLOCK_PADDING_SIZE],
vec![0x80; BLOCK_PADDING_SIZE + 1],
// Larger vec.
vec![0; BLOCK_PADDING_SIZE - 1],
vec![0; BLOCK_PADDING_SIZE],
vec![0; BLOCK_PADDING_SIZE + 1],
vec![0; BLOCK_PADDING_SIZE * 2 - 1],
vec![0; BLOCK_PADDING_SIZE * 2],
vec![0; BLOCK_PADDING_SIZE * 2 + 1],
]
.iter()
{
// Fresh keys.
let alice =
crate::internal::x25519::x25519_keypair_new_from_entropy()
.await
.unwrap();
let bob =
crate::internal::x25519::x25519_keypair_new_from_entropy()
.await
.unwrap();
let data = CryptoBoxData {
data: Arc::new(input.to_vec()),
};
// from alice to bob.
let encrypted_data = super::crypto_box(
alice.priv_key,
bob.pub_key,
Arc::new(data.clone()),
)
.await
.unwrap();
// The length excluding the 16 byte overhead should always be a multiple of 32 as this
// is our padding.
assert_eq!((encrypted_data.encrypted_data.len() - 16) % 32, 0);
let decrypted_data = super::crypto_box_open(
bob.priv_key,
alice.pub_key,
Arc::new(encrypted_data),
)
.await
.unwrap();
// If we can decrypt we managed to pad and unpad as well as encrypt and decrypt.
assert_eq!(&decrypted_data, &Some(data));
}
}
}
| {
match block_padding::Iso7816::unpad(&decrypted_data) {
// @todo do we want associated data to enforce the originating DHT space?
Ok(unpadded) => Ok(Some(CryptoBoxData {
data: Arc::new(unpadded.to_vec()),
})),
Err(_) => Ok(None),
}
} | conditional_block |
crypto_box.rs | use crate::internal::rayon::rayon_exec;
use crate::internal::x25519;
use block_padding::Padding;
use crypto_box as lib_crypto_box;
use std::sync::Arc;
/// Length of the crypto box aead nonce.
/// Ideally this would be exposed from upstream but I didn't see a good way to get at it directly.
pub const NONCE_BYTES: usize = 24;
/// The size of blocks to pad encrypted data to.
/// We have no idea how big incoming data is, but probably it is generally smallish.
/// Devs can always do their own padding on top of this, but we want some safety for unpadded data.
/// Libsodium optionally supports ISO 7816-4 padding algorithm.
/// @see https://doc.libsodium.org/padding#algorithm
pub const BLOCK_PADDING_SIZE: usize = 32;
/// The delimiter for padding as per ISO 7816-4.
pub const BLOCK_PADDING_DELIMITER: u8 = 0x80;
/// Newtype for the nonce for safety.
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxNonce([u8; NONCE_BYTES]);
impl CryptoBoxNonce {
async fn new_random() -> Self {
rayon_exec(move || {
let mut rng = rand::thread_rng();
let mut bytes = [0; NONCE_BYTES];
// We rely on the lib_crypto_box nonce length being the same as what we expect.
// Should be a reasonably safe bet as 24 bytes is dictated by the crypto_box algorithm.
bytes.copy_from_slice(
lib_crypto_box::generate_nonce(&mut rng).as_slice(),
);
Self(bytes)
})
.await
}
}
impl AsRef<[u8; NONCE_BYTES]> for CryptoBoxNonce {
fn as_ref(&self) -> &[u8; NONCE_BYTES] {
&self.0
}
}
impl AsRef<[u8]> for CryptoBoxNonce {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl From<[u8; NONCE_BYTES]> for CryptoBoxNonce {
fn from(array: [u8; NONCE_BYTES]) -> Self {
Self(array)
}
}
impl std::convert::TryFrom<&[u8]> for CryptoBoxNonce {
type Error = crate::error::LairError;
fn try_from(slice: &[u8]) -> Result<Self, Self::Error> {
if slice.len() == NONCE_BYTES {
let mut inner = [0; NONCE_BYTES];
inner.copy_from_slice(slice);
Ok(Self(inner))
} else {
Err(crate::error::LairError::CryptoBoxNonceLength)
}
}
}
impl CryptoBoxNonce {
/// Always NONCE_BYTES.
pub fn len(&self) -> usize {
NONCE_BYTES
}
/// For clippy.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
/// "Additional associated data" as per the aead rust crate Payload.
/// May be empty. Must be valid if present.
pub struct CryptoBoxAad(Vec<u8>);
/// The nonce and encrypted data together.
/// @todo include additional associated data?
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxEncryptedData {
/// The nonce generated during encryption.
/// We never allow nonce to be set externally so we need to return it.
pub nonce: CryptoBoxNonce,
/// The encrypted version of our input data.
#[allow(clippy::rc_buffer)]
pub encrypted_data: Arc<Vec<u8>>,
}
/// Data to be encrypted.
/// Not associated with a nonce because we enforce random nonces.
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxData {
/// Data to be encrypted.
#[allow(clippy::rc_buffer)]
pub data: Arc<Vec<u8>>,
}
impl AsRef<[u8]> for CryptoBoxData {
fn as_ref(&self) -> &[u8] {
self.data.as_ref()
}
}
impl CryptoBoxData {
/// Length of newtype is length of inner.
pub fn len(&self) -> usize {
AsRef::<[u8]>::as_ref(self).len()
}
/// For clippy.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl From<Vec<u8>> for CryptoBoxData {
fn from(v: Vec<u8>) -> Self {
Self { data: Arc::new(v) }
}
}
/// @todo all of this can be opened up to be more flexible over time.
/// Eventually all possible input such as nonces and associated data should be settable by the
/// external interface.
/// In the short term everyone is getting their heads around the 80/20 usage patterns that are as
/// safe as we can possibly make them to avoid subtleties that lead to nonce or key re-use etc.
///
/// Wrapper around crypto_box from whatever lib we use.
/// No BYO nonces. Nonces always random and returned as part of `CryptoBoxEncryptedData`.
/// No BYO algorithms (cipher agility). Algorithm always X25519XSalsa20Poly1305.
/// Currently no additional associated data but DNA space may be included in the future.
/// The sender's private key encrypts _for_ the recipient's pubkey.
///
/// FYI allowing nonces could be dangerous as it's exposed as a general purpose authenticated
/// encryption mechanism (or will be) via. crypto_box from libsodium.
/// The main thing is that if a secret/nonce combination is _ever_ used more than once it
/// completely breaks encryption.
//
/// Example ways a nonce could accidentally be reused:
/// - If two DNAs are the same or similar (e.g. cloned DNAs) then they will have the same
/// nonce generation logic, so may create collisions when run in parallel.
/// - Collision of initialization vectors in a key exchange/crypto session.
/// - Use of a counter based nonce in a way that isn't 100% reliably incrementing.
///
/// Example ways a secret could accidentally be reused:
/// - If two agents both commit their pubkeys then share them with each other, then the same
/// shared key will be 'negotiated' by x25519 ECDH every time it is called.
/// - If a pubkey is used across two different DNAs the secrets will collide at the lair
/// and the DNAs won't have a way to co-ordinate or detect this.
///
/// E.g. Ring is very wary of secret key re-use e.g. it makes explicit the use-case where an
/// ephemeral (single use) key is generated to establish an ephemeral (single use) shared
/// key. Our use-case is the libsodium `crypto_box` function that uses an x25519 keypair to
/// perform authenticated encryption, so it makes more sense for us to be storing our
/// private keys for later use BUT see above for the dangers of key re-use that the app dev
/// really needs to be wary of.
///
/// @see https://eprint.iacr.org/2019/519.pdf for 'context separable interfaces'
pub async fn crypto_box(
sender: x25519::X25519PrivKey,
recipient: x25519::X25519PubKey,
data: Arc<CryptoBoxData>,
) -> crate::error::LairResult<CryptoBoxEncryptedData> {
let nonce = CryptoBoxNonce::new_random().await;
rayon_exec(move || {
use lib_crypto_box::aead::Aead;
let sender_box =
lib_crypto_box::SalsaBox::new(recipient.as_ref(), sender.as_ref());
// It's actually easier and clearer to directly pad the vector than use the block_padding
// crate, as that is optimised for blocks.
let mut to_encrypt = data.data.to_vec();
let padding_delimiter = vec![BLOCK_PADDING_DELIMITER];
let padding = vec![
0x0; | ];
to_encrypt.extend(padding_delimiter);
to_encrypt.extend(padding);
let encrypted_data = Arc::new(sender_box.encrypt(
AsRef::<[u8; NONCE_BYTES]>::as_ref(&nonce).into(),
to_encrypt.as_slice(),
)?);
// @todo do we want associated data to enforce the originating DHT space?
// https://eprint.iacr.org/2019/519.pdf for 'context separable interfaces'
Ok(CryptoBoxEncryptedData {
nonce,
encrypted_data,
})
})
.await
}
/// Wrapper around crypto_box_open from whatever lib we use.
/// Exact inverse of `crypto_box_open` so nonce must be provided in `CryptoBoxEncryptedData`.
/// The recipient's private key encrypts _from_ the sender's pubkey.
pub async fn crypto_box_open(
recipient: x25519::X25519PrivKey,
sender: x25519::X25519PubKey,
encrypted_data: Arc<CryptoBoxEncryptedData>,
) -> crate::error::LairResult<Option<CryptoBoxData>> {
rayon_exec(move || {
use lib_crypto_box::aead::Aead;
let recipient_box =
lib_crypto_box::SalsaBox::new(sender.as_ref(), recipient.as_ref());
match recipient_box.decrypt(
AsRef::<[u8; NONCE_BYTES]>::as_ref(&encrypted_data.nonce).into(),
encrypted_data.encrypted_data.as_slice(),
) {
Ok(decrypted_data) => {
match block_padding::Iso7816::unpad(&decrypted_data) {
// @todo do we want associated data to enforce the originating DHT space?
Ok(unpadded) => Ok(Some(CryptoBoxData {
data: Arc::new(unpadded.to_vec()),
})),
Err(_) => Ok(None),
}
}
Err(_) => Ok(None),
}
})
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test(flavor = "multi_thread")]
async fn it_can_encrypt_and_decrypt() {
for input in [
// Empty vec.
vec![],
// Small vec.
vec![0],
vec![0, 1, 2],
vec![0, 1, 2, 3],
// Vec ending in padding delimiter.
vec![0x80],
vec![0, 0x80],
vec![0x80; BLOCK_PADDING_SIZE - 1],
vec![0x80; BLOCK_PADDING_SIZE],
vec![0x80; BLOCK_PADDING_SIZE + 1],
// Larger vec.
vec![0; BLOCK_PADDING_SIZE - 1],
vec![0; BLOCK_PADDING_SIZE],
vec![0; BLOCK_PADDING_SIZE + 1],
vec![0; BLOCK_PADDING_SIZE * 2 - 1],
vec![0; BLOCK_PADDING_SIZE * 2],
vec![0; BLOCK_PADDING_SIZE * 2 + 1],
]
.iter()
{
// Fresh keys.
let alice =
crate::internal::x25519::x25519_keypair_new_from_entropy()
.await
.unwrap();
let bob =
crate::internal::x25519::x25519_keypair_new_from_entropy()
.await
.unwrap();
let data = CryptoBoxData {
data: Arc::new(input.to_vec()),
};
// from alice to bob.
let encrypted_data = super::crypto_box(
alice.priv_key,
bob.pub_key,
Arc::new(data.clone()),
)
.await
.unwrap();
// The length excluding the 16 byte overhead should always be a multiple of 32 as this
// is our padding.
assert_eq!((encrypted_data.encrypted_data.len() - 16) % 32, 0);
let decrypted_data = super::crypto_box_open(
bob.priv_key,
alice.pub_key,
Arc::new(encrypted_data),
)
.await
.unwrap();
// If we can decrypt we managed to pad and unpad as well as encrypt and decrypt.
assert_eq!(&decrypted_data, &Some(data));
}
}
} | BLOCK_PADDING_SIZE
- (data.data.len() + 1) % BLOCK_PADDING_SIZE | random_line_split |
crypto_box.rs | use crate::internal::rayon::rayon_exec;
use crate::internal::x25519;
use block_padding::Padding;
use crypto_box as lib_crypto_box;
use std::sync::Arc;
/// Length of the crypto box aead nonce.
/// Ideally this would be exposed from upstream but I didn't see a good way to get at it directly.
pub const NONCE_BYTES: usize = 24;
/// The size of blocks to pad encrypted data to.
/// We have no idea how big incoming data is, but probably it is generally smallish.
/// Devs can always do their own padding on top of this, but we want some safety for unpadded data.
/// Libsodium optionally supports ISO 7816-4 padding algorithm.
/// @see https://doc.libsodium.org/padding#algorithm
pub const BLOCK_PADDING_SIZE: usize = 32;
/// The delimiter for padding as per ISO 7816-4.
pub const BLOCK_PADDING_DELIMITER: u8 = 0x80;
/// Newtype for the nonce for safety.
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxNonce([u8; NONCE_BYTES]);
impl CryptoBoxNonce {
async fn new_random() -> Self {
rayon_exec(move || {
let mut rng = rand::thread_rng();
let mut bytes = [0; NONCE_BYTES];
// We rely on the lib_crypto_box nonce length being the same as what we expect.
// Should be a reasonably safe bet as 24 bytes is dictated by the crypto_box algorithm.
bytes.copy_from_slice(
lib_crypto_box::generate_nonce(&mut rng).as_slice(),
);
Self(bytes)
})
.await
}
}
impl AsRef<[u8; NONCE_BYTES]> for CryptoBoxNonce {
fn as_ref(&self) -> &[u8; NONCE_BYTES] {
&self.0
}
}
impl AsRef<[u8]> for CryptoBoxNonce {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl From<[u8; NONCE_BYTES]> for CryptoBoxNonce {
fn from(array: [u8; NONCE_BYTES]) -> Self {
Self(array)
}
}
impl std::convert::TryFrom<&[u8]> for CryptoBoxNonce {
type Error = crate::error::LairError;
fn | (slice: &[u8]) -> Result<Self, Self::Error> {
if slice.len() == NONCE_BYTES {
let mut inner = [0; NONCE_BYTES];
inner.copy_from_slice(slice);
Ok(Self(inner))
} else {
Err(crate::error::LairError::CryptoBoxNonceLength)
}
}
}
impl CryptoBoxNonce {
/// Always NONCE_BYTES.
pub fn len(&self) -> usize {
NONCE_BYTES
}
/// For clippy.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
/// "Additional associated data" as per the aead rust crate Payload.
/// May be empty. Must be valid if present.
pub struct CryptoBoxAad(Vec<u8>);
/// The nonce and encrypted data together.
/// @todo include additional associated data?
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxEncryptedData {
/// The nonce generated during encryption.
/// We never allow nonce to be set externally so we need to return it.
pub nonce: CryptoBoxNonce,
/// The encrypted version of our input data.
#[allow(clippy::rc_buffer)]
pub encrypted_data: Arc<Vec<u8>>,
}
/// Data to be encrypted.
/// Not associated with a nonce because we enforce random nonces.
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxData {
/// Data to be encrypted.
#[allow(clippy::rc_buffer)]
pub data: Arc<Vec<u8>>,
}
impl AsRef<[u8]> for CryptoBoxData {
fn as_ref(&self) -> &[u8] {
self.data.as_ref()
}
}
impl CryptoBoxData {
/// Length of newtype is length of inner.
pub fn len(&self) -> usize {
AsRef::<[u8]>::as_ref(self).len()
}
/// For clippy.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl From<Vec<u8>> for CryptoBoxData {
fn from(v: Vec<u8>) -> Self {
Self { data: Arc::new(v) }
}
}
/// @todo all of this can be opened up to be more flexible over time.
/// Eventually all possible input such as nonces and associated data should be settable by the
/// external interface.
/// In the short term everyone is getting their heads around the 80/20 usage patterns that are as
/// safe as we can possibly make them to avoid subtleties that lead to nonce or key re-use etc.
///
/// Wrapper around crypto_box from whatever lib we use.
/// No BYO nonces. Nonces always random and returned as part of `CryptoBoxEncryptedData`.
/// No BYO algorithms (cipher agility). Algorithm always X25519XSalsa20Poly1305.
/// Currently no additional associated data but DNA space may be included in the future.
/// The sender's private key encrypts _for_ the recipient's pubkey.
///
/// FYI allowing nonces could be dangerous as it's exposed as a general purpose authenticated
/// encryption mechanism (or will be) via. crypto_box from libsodium.
/// The main thing is that if a secret/nonce combination is _ever_ used more than once it
/// completely breaks encryption.
//
/// Example ways a nonce could accidentally be reused:
/// - If two DNAs are the same or similar (e.g. cloned DNAs) then they will have the same
/// nonce generation logic, so may create collisions when run in parallel.
/// - Collision of initialization vectors in a key exchange/crypto session.
/// - Use of a counter based nonce in a way that isn't 100% reliably incrementing.
///
/// Example ways a secret could accidentally be reused:
/// - If two agents both commit their pubkeys then share them with each other, then the same
/// shared key will be 'negotiated' by x25519 ECDH every time it is called.
/// - If a pubkey is used across two different DNAs the secrets will collide at the lair
/// and the DNAs won't have a way to co-ordinate or detect this.
///
/// E.g. Ring is very wary of secret key re-use e.g. it makes explicit the use-case where an
/// ephemeral (single use) key is generated to establish an ephemeral (single use) shared
/// key. Our use-case is the libsodium `crypto_box` function that uses an x25519 keypair to
/// perform authenticated encryption, so it makes more sense for us to be storing our
/// private keys for later use BUT see above for the dangers of key re-use that the app dev
/// really needs to be wary of.
///
/// @see https://eprint.iacr.org/2019/519.pdf for 'context separable interfaces'
pub async fn crypto_box(
sender: x25519::X25519PrivKey,
recipient: x25519::X25519PubKey,
data: Arc<CryptoBoxData>,
) -> crate::error::LairResult<CryptoBoxEncryptedData> {
let nonce = CryptoBoxNonce::new_random().await;
rayon_exec(move || {
use lib_crypto_box::aead::Aead;
let sender_box =
lib_crypto_box::SalsaBox::new(recipient.as_ref(), sender.as_ref());
// It's actually easier and clearer to directly pad the vector than use the block_padding
// crate, as that is optimised for blocks.
let mut to_encrypt = data.data.to_vec();
let padding_delimiter = vec![BLOCK_PADDING_DELIMITER];
let padding = vec![
0x0;
BLOCK_PADDING_SIZE
- (data.data.len() + 1) % BLOCK_PADDING_SIZE
];
to_encrypt.extend(padding_delimiter);
to_encrypt.extend(padding);
let encrypted_data = Arc::new(sender_box.encrypt(
AsRef::<[u8; NONCE_BYTES]>::as_ref(&nonce).into(),
to_encrypt.as_slice(),
)?);
// @todo do we want associated data to enforce the originating DHT space?
// https://eprint.iacr.org/2019/519.pdf for 'context separable interfaces'
Ok(CryptoBoxEncryptedData {
nonce,
encrypted_data,
})
})
.await
}
/// Wrapper around crypto_box_open from whatever lib we use.
/// Exact inverse of `crypto_box_open` so nonce must be provided in `CryptoBoxEncryptedData`.
/// The recipient's private key encrypts _from_ the sender's pubkey.
pub async fn crypto_box_open(
recipient: x25519::X25519PrivKey,
sender: x25519::X25519PubKey,
encrypted_data: Arc<CryptoBoxEncryptedData>,
) -> crate::error::LairResult<Option<CryptoBoxData>> {
rayon_exec(move || {
use lib_crypto_box::aead::Aead;
let recipient_box =
lib_crypto_box::SalsaBox::new(sender.as_ref(), recipient.as_ref());
match recipient_box.decrypt(
AsRef::<[u8; NONCE_BYTES]>::as_ref(&encrypted_data.nonce).into(),
encrypted_data.encrypted_data.as_slice(),
) {
Ok(decrypted_data) => {
match block_padding::Iso7816::unpad(&decrypted_data) {
// @todo do we want associated data to enforce the originating DHT space?
Ok(unpadded) => Ok(Some(CryptoBoxData {
data: Arc::new(unpadded.to_vec()),
})),
Err(_) => Ok(None),
}
}
Err(_) => Ok(None),
}
})
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test(flavor = "multi_thread")]
async fn it_can_encrypt_and_decrypt() {
for input in [
// Empty vec.
vec![],
// Small vec.
vec![0],
vec![0, 1, 2],
vec![0, 1, 2, 3],
// Vec ending in padding delimiter.
vec![0x80],
vec![0, 0x80],
vec![0x80; BLOCK_PADDING_SIZE - 1],
vec![0x80; BLOCK_PADDING_SIZE],
vec![0x80; BLOCK_PADDING_SIZE + 1],
// Larger vec.
vec![0; BLOCK_PADDING_SIZE - 1],
vec![0; BLOCK_PADDING_SIZE],
vec![0; BLOCK_PADDING_SIZE + 1],
vec![0; BLOCK_PADDING_SIZE * 2 - 1],
vec![0; BLOCK_PADDING_SIZE * 2],
vec![0; BLOCK_PADDING_SIZE * 2 + 1],
]
.iter()
{
// Fresh keys.
let alice =
crate::internal::x25519::x25519_keypair_new_from_entropy()
.await
.unwrap();
let bob =
crate::internal::x25519::x25519_keypair_new_from_entropy()
.await
.unwrap();
let data = CryptoBoxData {
data: Arc::new(input.to_vec()),
};
// from alice to bob.
let encrypted_data = super::crypto_box(
alice.priv_key,
bob.pub_key,
Arc::new(data.clone()),
)
.await
.unwrap();
// The length excluding the 16 byte overhead should always be a multiple of 32 as this
// is our padding.
assert_eq!((encrypted_data.encrypted_data.len() - 16) % 32, 0);
let decrypted_data = super::crypto_box_open(
bob.priv_key,
alice.pub_key,
Arc::new(encrypted_data),
)
.await
.unwrap();
// If we can decrypt we managed to pad and unpad as well as encrypt and decrypt.
assert_eq!(&decrypted_data, &Some(data));
}
}
}
| try_from | identifier_name |
crypto_box.rs | use crate::internal::rayon::rayon_exec;
use crate::internal::x25519;
use block_padding::Padding;
use crypto_box as lib_crypto_box;
use std::sync::Arc;
/// Length of the crypto box aead nonce.
/// Ideally this would be exposed from upstream but I didn't see a good way to get at it directly.
pub const NONCE_BYTES: usize = 24;
/// The size of blocks to pad encrypted data to.
/// We have no idea how big incoming data is, but probably it is generally smallish.
/// Devs can always do their own padding on top of this, but we want some safety for unpadded data.
/// Libsodium optionally supports ISO 7816-4 padding algorithm.
/// @see https://doc.libsodium.org/padding#algorithm
pub const BLOCK_PADDING_SIZE: usize = 32;
/// The delimiter for padding as per ISO 7816-4.
pub const BLOCK_PADDING_DELIMITER: u8 = 0x80;
/// Newtype for the nonce for safety.
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxNonce([u8; NONCE_BYTES]);
impl CryptoBoxNonce {
async fn new_random() -> Self {
rayon_exec(move || {
let mut rng = rand::thread_rng();
let mut bytes = [0; NONCE_BYTES];
// We rely on the lib_crypto_box nonce length being the same as what we expect.
// Should be a reasonably safe bet as 24 bytes is dictated by the crypto_box algorithm.
bytes.copy_from_slice(
lib_crypto_box::generate_nonce(&mut rng).as_slice(),
);
Self(bytes)
})
.await
}
}
impl AsRef<[u8; NONCE_BYTES]> for CryptoBoxNonce {
fn as_ref(&self) -> &[u8; NONCE_BYTES] {
&self.0
}
}
impl AsRef<[u8]> for CryptoBoxNonce {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl From<[u8; NONCE_BYTES]> for CryptoBoxNonce {
fn from(array: [u8; NONCE_BYTES]) -> Self {
Self(array)
}
}
impl std::convert::TryFrom<&[u8]> for CryptoBoxNonce {
type Error = crate::error::LairError;
fn try_from(slice: &[u8]) -> Result<Self, Self::Error> {
if slice.len() == NONCE_BYTES {
let mut inner = [0; NONCE_BYTES];
inner.copy_from_slice(slice);
Ok(Self(inner))
} else {
Err(crate::error::LairError::CryptoBoxNonceLength)
}
}
}
impl CryptoBoxNonce {
/// Always NONCE_BYTES.
pub fn len(&self) -> usize {
NONCE_BYTES
}
/// For clippy.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
/// "Additional associated data" as per the aead rust crate Payload.
/// May be empty. Must be valid if present.
pub struct CryptoBoxAad(Vec<u8>);
/// The nonce and encrypted data together.
/// @todo include additional associated data?
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxEncryptedData {
/// The nonce generated during encryption.
/// We never allow nonce to be set externally so we need to return it.
pub nonce: CryptoBoxNonce,
/// The encrypted version of our input data.
#[allow(clippy::rc_buffer)]
pub encrypted_data: Arc<Vec<u8>>,
}
/// Data to be encrypted.
/// Not associated with a nonce because we enforce random nonces.
#[derive(Debug, PartialEq, Clone)]
pub struct CryptoBoxData {
/// Data to be encrypted.
#[allow(clippy::rc_buffer)]
pub data: Arc<Vec<u8>>,
}
impl AsRef<[u8]> for CryptoBoxData {
fn as_ref(&self) -> &[u8] |
}
impl CryptoBoxData {
/// Length of newtype is length of inner.
pub fn len(&self) -> usize {
AsRef::<[u8]>::as_ref(self).len()
}
/// For clippy.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl From<Vec<u8>> for CryptoBoxData {
fn from(v: Vec<u8>) -> Self {
Self { data: Arc::new(v) }
}
}
/// @todo all of this can be opened up to be more flexible over time.
/// Eventually all possible input such as nonces and associated data should be settable by the
/// external interface.
/// In the short term everyone is getting their heads around the 80/20 usage patterns that are as
/// safe as we can possibly make them to avoid subtleties that lead to nonce or key re-use etc.
///
/// Wrapper around crypto_box from whatever lib we use.
/// No BYO nonces. Nonces always random and returned as part of `CryptoBoxEncryptedData`.
/// No BYO algorithms (cipher agility). Algorithm always X25519XSalsa20Poly1305.
/// Currently no additional associated data but DNA space may be included in the future.
/// The sender's private key encrypts _for_ the recipient's pubkey.
///
/// FYI allowing nonces could be dangerous as it's exposed as a general purpose authenticated
/// encryption mechanism (or will be) via. crypto_box from libsodium.
/// The main thing is that if a secret/nonce combination is _ever_ used more than once it
/// completely breaks encryption.
//
/// Example ways a nonce could accidentally be reused:
/// - If two DNAs are the same or similar (e.g. cloned DNAs) then they will have the same
/// nonce generation logic, so may create collisions when run in parallel.
/// - Collision of initialization vectors in a key exchange/crypto session.
/// - Use of a counter based nonce in a way that isn't 100% reliably incrementing.
///
/// Example ways a secret could accidentally be reused:
/// - If two agents both commit their pubkeys then share them with each other, then the same
/// shared key will be 'negotiated' by x25519 ECDH every time it is called.
/// - If a pubkey is used across two different DNAs the secrets will collide at the lair
/// and the DNAs won't have a way to co-ordinate or detect this.
///
/// E.g. Ring is very wary of secret key re-use e.g. it makes explicit the use-case where an
/// ephemeral (single use) key is generated to establish an ephemeral (single use) shared
/// key. Our use-case is the libsodium `crypto_box` function that uses an x25519 keypair to
/// perform authenticated encryption, so it makes more sense for us to be storing our
/// private keys for later use BUT see above for the dangers of key re-use that the app dev
/// really needs to be wary of.
///
/// @see https://eprint.iacr.org/2019/519.pdf for 'context separable interfaces'
pub async fn crypto_box(
sender: x25519::X25519PrivKey,
recipient: x25519::X25519PubKey,
data: Arc<CryptoBoxData>,
) -> crate::error::LairResult<CryptoBoxEncryptedData> {
let nonce = CryptoBoxNonce::new_random().await;
rayon_exec(move || {
use lib_crypto_box::aead::Aead;
let sender_box =
lib_crypto_box::SalsaBox::new(recipient.as_ref(), sender.as_ref());
// It's actually easier and clearer to directly pad the vector than use the block_padding
// crate, as that is optimised for blocks.
let mut to_encrypt = data.data.to_vec();
let padding_delimiter = vec![BLOCK_PADDING_DELIMITER];
let padding = vec![
0x0;
BLOCK_PADDING_SIZE
- (data.data.len() + 1) % BLOCK_PADDING_SIZE
];
to_encrypt.extend(padding_delimiter);
to_encrypt.extend(padding);
let encrypted_data = Arc::new(sender_box.encrypt(
AsRef::<[u8; NONCE_BYTES]>::as_ref(&nonce).into(),
to_encrypt.as_slice(),
)?);
// @todo do we want associated data to enforce the originating DHT space?
// https://eprint.iacr.org/2019/519.pdf for 'context separable interfaces'
Ok(CryptoBoxEncryptedData {
nonce,
encrypted_data,
})
})
.await
}
/// Wrapper around crypto_box_open from whatever lib we use.
/// Exact inverse of `crypto_box_open` so nonce must be provided in `CryptoBoxEncryptedData`.
/// The recipient's private key encrypts _from_ the sender's pubkey.
pub async fn crypto_box_open(
recipient: x25519::X25519PrivKey,
sender: x25519::X25519PubKey,
encrypted_data: Arc<CryptoBoxEncryptedData>,
) -> crate::error::LairResult<Option<CryptoBoxData>> {
rayon_exec(move || {
use lib_crypto_box::aead::Aead;
let recipient_box =
lib_crypto_box::SalsaBox::new(sender.as_ref(), recipient.as_ref());
match recipient_box.decrypt(
AsRef::<[u8; NONCE_BYTES]>::as_ref(&encrypted_data.nonce).into(),
encrypted_data.encrypted_data.as_slice(),
) {
Ok(decrypted_data) => {
match block_padding::Iso7816::unpad(&decrypted_data) {
// @todo do we want associated data to enforce the originating DHT space?
Ok(unpadded) => Ok(Some(CryptoBoxData {
data: Arc::new(unpadded.to_vec()),
})),
Err(_) => Ok(None),
}
}
Err(_) => Ok(None),
}
})
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test(flavor = "multi_thread")]
async fn it_can_encrypt_and_decrypt() {
for input in [
// Empty vec.
vec![],
// Small vec.
vec![0],
vec![0, 1, 2],
vec![0, 1, 2, 3],
// Vec ending in padding delimiter.
vec![0x80],
vec![0, 0x80],
vec![0x80; BLOCK_PADDING_SIZE - 1],
vec![0x80; BLOCK_PADDING_SIZE],
vec![0x80; BLOCK_PADDING_SIZE + 1],
// Larger vec.
vec![0; BLOCK_PADDING_SIZE - 1],
vec![0; BLOCK_PADDING_SIZE],
vec![0; BLOCK_PADDING_SIZE + 1],
vec![0; BLOCK_PADDING_SIZE * 2 - 1],
vec![0; BLOCK_PADDING_SIZE * 2],
vec![0; BLOCK_PADDING_SIZE * 2 + 1],
]
.iter()
{
// Fresh keys.
let alice =
crate::internal::x25519::x25519_keypair_new_from_entropy()
.await
.unwrap();
let bob =
crate::internal::x25519::x25519_keypair_new_from_entropy()
.await
.unwrap();
let data = CryptoBoxData {
data: Arc::new(input.to_vec()),
};
// from alice to bob.
let encrypted_data = super::crypto_box(
alice.priv_key,
bob.pub_key,
Arc::new(data.clone()),
)
.await
.unwrap();
// The length excluding the 16 byte overhead should always be a multiple of 32 as this
// is our padding.
assert_eq!((encrypted_data.encrypted_data.len() - 16) % 32, 0);
let decrypted_data = super::crypto_box_open(
bob.priv_key,
alice.pub_key,
Arc::new(encrypted_data),
)
.await
.unwrap();
// If we can decrypt we managed to pad and unpad as well as encrypt and decrypt.
assert_eq!(&decrypted_data, &Some(data));
}
}
}
| {
self.data.as_ref()
} | identifier_body |
scoped_signal_handler.rs | // Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Provides a struct for registering signal handlers that get cleared on drop.
use std::convert::TryFrom;
use std::fmt;
use std::io::{Cursor, Write};
use std::panic::catch_unwind;
use std::result;
use libc::{c_int, c_void, STDERR_FILENO};
use remain::sorted;
use thiserror::Error;
use crate::errno;
use crate::signal::{
clear_signal_handler, has_default_signal_handler, register_signal_handler, wait_for_signal,
Signal,
};
#[sorted]
#[derive(Error, Debug)]
pub enum Error {
/// Already waiting for interrupt.
#[error("already waiting for interrupt.")]
AlreadyWaiting,
/// Signal already has a handler.
#[error("signal handler already set for {0:?}")]
HandlerAlreadySet(Signal),
/// Failed to check if signal has the default signal handler.
#[error("failed to check the signal handler for {0:?}: {1}")]
HasDefaultSignalHandler(Signal, errno::Error),
/// Failed to register a signal handler.
#[error("failed to register a signal handler for {0:?}: {1}")]
RegisterSignalHandler(Signal, errno::Error),
/// Sigaction failed.
#[error("sigaction failed for {0:?}: {1}")]
Sigaction(Signal, errno::Error),
/// Failed to wait for signal.
#[error("wait_for_signal failed: {0}")]
WaitForSignal(errno::Error),
}
pub type Result<T> = result::Result<T, Error>;
/// The interface used by Scoped Signal handler.
///
/// # Safety
/// The implementation of handle_signal needs to be async signal-safe.
///
/// NOTE: panics are caught when possible because a panic inside ffi is undefined behavior.
pub unsafe trait SignalHandler {
/// A function that is called to handle the passed signal.
fn handle_signal(signal: Signal);
}
/// Wrap the handler with an extern "C" function.
extern "C" fn call_handler<H: SignalHandler>(signum: c_int) |
/// Represents a signal handler that is registered with a set of signals that unregistered when the
/// struct goes out of scope. Prefer a signalfd based solution before using this.
pub struct ScopedSignalHandler {
signals: Vec<Signal>,
}
impl ScopedSignalHandler {
/// Attempts to register `handler` with the provided `signals`. It will fail if there is already
/// an existing handler on any of `signals`.
///
/// # Safety
/// This is safe if H::handle_signal is async-signal safe.
pub fn new<H: SignalHandler>(signals: &[Signal]) -> Result<Self> {
let mut scoped_handler = ScopedSignalHandler {
signals: Vec::with_capacity(signals.len()),
};
for &signal in signals {
if !has_default_signal_handler((signal).into())
.map_err(|err| Error::HasDefaultSignalHandler(signal, err))?
{
return Err(Error::HandlerAlreadySet(signal));
}
// Requires an async-safe callback.
unsafe {
register_signal_handler((signal).into(), call_handler::<H>)
.map_err(|err| Error::RegisterSignalHandler(signal, err))?
};
scoped_handler.signals.push(signal);
}
Ok(scoped_handler)
}
}
/// Clears the signal handler for any of the associated signals.
impl Drop for ScopedSignalHandler {
fn drop(&mut self) {
for signal in &self.signals {
if let Err(err) = clear_signal_handler((*signal).into()) {
eprintln!("Error: failed to clear signal handler: {:?}", err);
}
}
}
}
/// A signal handler that does nothing.
///
/// This is useful in cases where wait_for_signal is used since it will never trigger if the signal
/// is blocked and the default handler may have undesired effects like terminating the process.
pub struct EmptySignalHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for EmptySignalHandler {
fn handle_signal(_: Signal) {}
}
/// Blocks until SIGINT is received, which often happens because Ctrl-C was pressed in an
/// interactive terminal.
///
/// Note: if you are using a multi-threaded application you need to block SIGINT on all other
/// threads or they may receive the signal instead of the desired thread.
pub fn wait_for_interrupt() -> Result<()> {
// Register a signal handler if there is not one already so the thread is not killed.
let ret = ScopedSignalHandler::new::<EmptySignalHandler>(&[Signal::Interrupt]);
if !matches!(&ret, Ok(_) | Err(Error::HandlerAlreadySet(_))) {
ret?;
}
match wait_for_signal(&[Signal::Interrupt.into()], None) {
Ok(_) => Ok(()),
Err(err) => Err(Error::WaitForSignal(err)),
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::mem::zeroed;
use std::ptr::{null, null_mut};
use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex, MutexGuard, Once};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use libc::sigaction;
use crate::{gettid, kill, Pid};
const TEST_SIGNAL: Signal = Signal::User1;
const TEST_SIGNALS: &[Signal] = &[Signal::User1, Signal::User2];
static TEST_SIGNAL_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// Only allows one test case to execute at a time.
fn get_mutex() -> MutexGuard<'static, ()> {
static INIT: Once = Once::new();
static mut VAL: Option<Arc<Mutex<()>>> = None;
INIT.call_once(|| {
let val = Some(Arc::new(Mutex::new(())));
// Safe because the mutation is protected by the Once.
unsafe { VAL = val }
});
// Safe mutation only happens in the Once.
unsafe { VAL.as_ref() }.unwrap().lock().unwrap()
}
fn reset_counter() {
TEST_SIGNAL_COUNTER.swap(0, Ordering::SeqCst);
}
fn get_sigaction(signal: Signal) -> Result<sigaction> {
// Safe because sigaction is owned and expected to be initialized ot zeros.
let mut sigact: sigaction = unsafe { zeroed() };
if unsafe { sigaction(signal.into(), null(), &mut sigact) } < 0 {
Err(Error::Sigaction(signal, errno::Error::last()))
} else {
Ok(sigact)
}
}
/// Safety:
/// This is only safe if the signal handler set in sigaction is safe.
unsafe fn restore_sigaction(signal: Signal, sigact: sigaction) -> Result<sigaction> {
if sigaction(signal.into(), &sigact, null_mut()) < 0 {
Err(Error::Sigaction(signal, errno::Error::last()))
} else {
Ok(sigact)
}
}
/// Safety:
/// Safe if the signal handler for Signal::User1 is safe.
unsafe fn send_test_signal() {
kill(gettid(), Signal::User1.into()).unwrap()
}
macro_rules! assert_counter_eq {
($compare_to:expr) => {{
let expected: usize = $compare_to;
let got: usize = TEST_SIGNAL_COUNTER.load(Ordering::SeqCst);
if got != expected {
panic!(
"wrong signal counter value: got {}; expected {}",
got, expected
);
}
}};
}
struct TestHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for TestHandler {
fn handle_signal(signal: Signal) {
if TEST_SIGNAL == signal {
TEST_SIGNAL_COUNTER.fetch_add(1, Ordering::SeqCst);
}
}
}
#[test]
fn scopedsignalhandler_success() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
reset_counter();
assert_counter_eq!(0);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
let handler = ScopedSignalHandler::new::<TestHandler>(&[TEST_SIGNAL]).unwrap();
assert!(!has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because test_handler is safe.
unsafe { send_test_signal() };
// Give the handler time to run in case it is on a different thread.
for _ in 1..40 {
if TEST_SIGNAL_COUNTER.load(Ordering::SeqCst) > 0 {
break;
}
sleep(Duration::from_millis(250));
}
assert_counter_eq!(1);
drop(handler);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
}
#[test]
fn scopedsignalhandler_handleralreadyset() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
reset_counter();
assert_counter_eq!(0);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because TestHandler is async-signal safe.
let handler = ScopedSignalHandler::new::<TestHandler>(&[TEST_SIGNAL]).unwrap();
assert!(!has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because TestHandler is async-signal safe.
assert!(matches!(
ScopedSignalHandler::new::<TestHandler>(TEST_SIGNALS),
Err(Error::HandlerAlreadySet(Signal::User1))
));
assert_counter_eq!(0);
drop(handler);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
}
/// Stores the thread used by WaitForInterruptHandler.
static WAIT_FOR_INTERRUPT_THREAD_ID: AtomicI32 = AtomicI32::new(0);
/// Forwards SIGINT to the appropriate thread.
struct WaitForInterruptHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for WaitForInterruptHandler {
fn handle_signal(_: Signal) {
let tid = WAIT_FOR_INTERRUPT_THREAD_ID.load(Ordering::SeqCst);
// If the thread ID is set and executed on the wrong thread, forward the signal.
if tid != 0 && gettid() != tid {
// Safe because the handler is safe and the target thread id is expecting the signal.
unsafe { kill(tid, Signal::Interrupt.into()) }.unwrap();
}
}
}
/// Query /proc/${tid}/status for its State and check if it is either S (sleeping) or in
/// D (disk sleep).
fn thread_is_sleeping(tid: Pid) -> result::Result<bool, errno::Error> {
const PREFIX: &str = "State:";
let mut status_reader = BufReader::new(File::open(format!("/proc/{}/status", tid))?);
let mut line = String::new();
loop {
let count = status_reader.read_line(&mut line)?;
if count == 0 {
return Err(errno::Error::new(libc::EIO));
}
if let Some(stripped) = line.strip_prefix(PREFIX) {
return Ok(matches!(
stripped.trim_start().chars().next(),
Some('S') | Some('D')
));
}
line.clear();
}
}
/// Wait for a process to block either in a sleeping or disk sleep state.
fn wait_for_thread_to_sleep(tid: Pid, timeout: Duration) -> result::Result<(), errno::Error> {
let start = Instant::now();
loop {
if thread_is_sleeping(tid)? {
return Ok(());
}
if start.elapsed() > timeout {
return Err(errno::Error::new(libc::EAGAIN));
}
sleep(Duration::from_millis(50));
}
}
#[test]
fn waitforinterrupt_success() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
let to_restore = get_sigaction(Signal::Interrupt).unwrap();
clear_signal_handler(Signal::Interrupt.into()).unwrap();
// Safe because TestHandler is async-signal safe.
let handler =
ScopedSignalHandler::new::<WaitForInterruptHandler>(&[Signal::Interrupt]).unwrap();
let tid = gettid();
WAIT_FOR_INTERRUPT_THREAD_ID.store(tid, Ordering::SeqCst);
let join_handle = spawn(move || -> result::Result<(), errno::Error> {
// Wait unitl the thread is ready to receive the signal.
wait_for_thread_to_sleep(tid, Duration::from_secs(10)).unwrap();
// Safe because the SIGINT handler is safe.
unsafe { kill(tid, Signal::Interrupt.into()) }
});
let wait_ret = wait_for_interrupt();
let join_ret = join_handle.join();
drop(handler);
// Safe because we are restoring the previous SIGINT handler.
unsafe { restore_sigaction(Signal::Interrupt, to_restore) }.unwrap();
wait_ret.unwrap();
join_ret.unwrap().unwrap();
}
}
| {
// Make an effort to surface an error.
if catch_unwind(|| H::handle_signal(Signal::try_from(signum).unwrap())).is_err() {
// Note the following cannot be used:
// eprintln! - uses std::io which has locks that may be held.
// format! - uses the allocator which enforces mutual exclusion.
// Get the debug representation of signum.
let signal: Signal;
let signal_debug: &dyn fmt::Debug = match Signal::try_from(signum) {
Ok(s) => {
signal = s;
&signal as &dyn fmt::Debug
}
Err(_) => &signum as &dyn fmt::Debug,
};
// Buffer the output, so a single call to write can be used.
// The message accounts for 29 chars, that leaves 35 for the string representation of the
// signal which is more than enough.
let mut buffer = [0u8; 64];
let mut cursor = Cursor::new(buffer.as_mut());
if writeln!(cursor, "signal handler got error for: {:?}", signal_debug).is_ok() {
let len = cursor.position() as usize;
// Safe in the sense that buffer is owned and the length is checked. This may print in
// the middle of an existing write, but that is considered better than dropping the
// error.
unsafe {
libc::write(
STDERR_FILENO,
cursor.get_ref().as_ptr() as *const c_void,
len,
)
};
} else {
// This should never happen, but write an error message just in case.
const ERROR_DROPPED: &str = "Error dropped by signal handler.";
let bytes = ERROR_DROPPED.as_bytes();
unsafe { libc::write(STDERR_FILENO, bytes.as_ptr() as *const c_void, bytes.len()) };
}
}
} | identifier_body |
scoped_signal_handler.rs | // Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Provides a struct for registering signal handlers that get cleared on drop.
use std::convert::TryFrom;
use std::fmt;
use std::io::{Cursor, Write};
use std::panic::catch_unwind;
use std::result;
use libc::{c_int, c_void, STDERR_FILENO};
use remain::sorted;
use thiserror::Error;
use crate::errno;
use crate::signal::{
clear_signal_handler, has_default_signal_handler, register_signal_handler, wait_for_signal,
Signal,
};
#[sorted]
#[derive(Error, Debug)]
pub enum Error {
/// Already waiting for interrupt.
#[error("already waiting for interrupt.")]
AlreadyWaiting,
/// Signal already has a handler.
#[error("signal handler already set for {0:?}")]
HandlerAlreadySet(Signal),
/// Failed to check if signal has the default signal handler.
#[error("failed to check the signal handler for {0:?}: {1}")]
HasDefaultSignalHandler(Signal, errno::Error),
/// Failed to register a signal handler.
#[error("failed to register a signal handler for {0:?}: {1}")]
RegisterSignalHandler(Signal, errno::Error),
/// Sigaction failed.
#[error("sigaction failed for {0:?}: {1}")]
Sigaction(Signal, errno::Error),
/// Failed to wait for signal.
#[error("wait_for_signal failed: {0}")]
WaitForSignal(errno::Error),
}
pub type Result<T> = result::Result<T, Error>;
/// The interface used by Scoped Signal handler.
///
/// # Safety
/// The implementation of handle_signal needs to be async signal-safe.
///
/// NOTE: panics are caught when possible because a panic inside ffi is undefined behavior.
pub unsafe trait SignalHandler {
/// A function that is called to handle the passed signal.
fn handle_signal(signal: Signal);
}
/// Wrap the handler with an extern "C" function.
extern "C" fn call_handler<H: SignalHandler>(signum: c_int) {
// Make an effort to surface an error.
if catch_unwind(|| H::handle_signal(Signal::try_from(signum).unwrap())).is_err() {
// Note the following cannot be used:
// eprintln! - uses std::io which has locks that may be held.
// format! - uses the allocator which enforces mutual exclusion.
// Get the debug representation of signum.
let signal: Signal;
let signal_debug: &dyn fmt::Debug = match Signal::try_from(signum) {
Ok(s) => {
signal = s;
&signal as &dyn fmt::Debug
}
Err(_) => &signum as &dyn fmt::Debug,
};
// Buffer the output, so a single call to write can be used.
// The message accounts for 29 chars, that leaves 35 for the string representation of the
// signal which is more than enough.
let mut buffer = [0u8; 64];
let mut cursor = Cursor::new(buffer.as_mut());
if writeln!(cursor, "signal handler got error for: {:?}", signal_debug).is_ok() {
let len = cursor.position() as usize;
// Safe in the sense that buffer is owned and the length is checked. This may print in
// the middle of an existing write, but that is considered better than dropping the
// error.
unsafe {
libc::write(
STDERR_FILENO,
cursor.get_ref().as_ptr() as *const c_void,
len,
)
};
} else {
// This should never happen, but write an error message just in case.
const ERROR_DROPPED: &str = "Error dropped by signal handler.";
let bytes = ERROR_DROPPED.as_bytes();
unsafe { libc::write(STDERR_FILENO, bytes.as_ptr() as *const c_void, bytes.len()) };
}
}
}
/// Represents a signal handler that is registered with a set of signals that unregistered when the
/// struct goes out of scope. Prefer a signalfd based solution before using this.
pub struct ScopedSignalHandler {
signals: Vec<Signal>,
}
impl ScopedSignalHandler {
/// Attempts to register `handler` with the provided `signals`. It will fail if there is already
/// an existing handler on any of `signals`.
///
/// # Safety
/// This is safe if H::handle_signal is async-signal safe.
pub fn new<H: SignalHandler>(signals: &[Signal]) -> Result<Self> {
let mut scoped_handler = ScopedSignalHandler {
signals: Vec::with_capacity(signals.len()),
};
for &signal in signals {
if !has_default_signal_handler((signal).into())
.map_err(|err| Error::HasDefaultSignalHandler(signal, err))?
{
return Err(Error::HandlerAlreadySet(signal));
}
// Requires an async-safe callback.
unsafe {
register_signal_handler((signal).into(), call_handler::<H>)
.map_err(|err| Error::RegisterSignalHandler(signal, err))?
};
scoped_handler.signals.push(signal);
}
Ok(scoped_handler)
}
}
/// Clears the signal handler for any of the associated signals.
impl Drop for ScopedSignalHandler {
fn drop(&mut self) {
for signal in &self.signals {
if let Err(err) = clear_signal_handler((*signal).into()) {
eprintln!("Error: failed to clear signal handler: {:?}", err);
}
}
}
}
/// A signal handler that does nothing.
///
/// This is useful in cases where wait_for_signal is used since it will never trigger if the signal
/// is blocked and the default handler may have undesired effects like terminating the process.
pub struct EmptySignalHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for EmptySignalHandler {
fn handle_signal(_: Signal) {}
}
/// Blocks until SIGINT is received, which often happens because Ctrl-C was pressed in an
/// interactive terminal.
///
/// Note: if you are using a multi-threaded application you need to block SIGINT on all other
/// threads or they may receive the signal instead of the desired thread.
pub fn wait_for_interrupt() -> Result<()> {
// Register a signal handler if there is not one already so the thread is not killed.
let ret = ScopedSignalHandler::new::<EmptySignalHandler>(&[Signal::Interrupt]);
if !matches!(&ret, Ok(_) | Err(Error::HandlerAlreadySet(_))) {
ret?;
}
match wait_for_signal(&[Signal::Interrupt.into()], None) {
Ok(_) => Ok(()),
Err(err) => Err(Error::WaitForSignal(err)),
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::mem::zeroed;
use std::ptr::{null, null_mut};
use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex, MutexGuard, Once};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use libc::sigaction;
use crate::{gettid, kill, Pid};
const TEST_SIGNAL: Signal = Signal::User1;
const TEST_SIGNALS: &[Signal] = &[Signal::User1, Signal::User2];
static TEST_SIGNAL_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// Only allows one test case to execute at a time.
fn get_mutex() -> MutexGuard<'static, ()> {
static INIT: Once = Once::new();
static mut VAL: Option<Arc<Mutex<()>>> = None;
INIT.call_once(|| {
let val = Some(Arc::new(Mutex::new(())));
// Safe because the mutation is protected by the Once.
unsafe { VAL = val }
});
// Safe mutation only happens in the Once.
unsafe { VAL.as_ref() }.unwrap().lock().unwrap()
}
fn reset_counter() {
TEST_SIGNAL_COUNTER.swap(0, Ordering::SeqCst);
}
fn get_sigaction(signal: Signal) -> Result<sigaction> {
// Safe because sigaction is owned and expected to be initialized ot zeros.
let mut sigact: sigaction = unsafe { zeroed() };
if unsafe { sigaction(signal.into(), null(), &mut sigact) } < 0 {
Err(Error::Sigaction(signal, errno::Error::last()))
} else {
Ok(sigact)
}
}
/// Safety:
/// This is only safe if the signal handler set in sigaction is safe.
unsafe fn restore_sigaction(signal: Signal, sigact: sigaction) -> Result<sigaction> {
if sigaction(signal.into(), &sigact, null_mut()) < 0 {
Err(Error::Sigaction(signal, errno::Error::last()))
} else {
Ok(sigact)
}
}
/// Safety:
/// Safe if the signal handler for Signal::User1 is safe.
unsafe fn send_test_signal() {
kill(gettid(), Signal::User1.into()).unwrap()
}
macro_rules! assert_counter_eq {
($compare_to:expr) => {{
let expected: usize = $compare_to;
let got: usize = TEST_SIGNAL_COUNTER.load(Ordering::SeqCst);
if got != expected {
panic!(
"wrong signal counter value: got {}; expected {}",
got, expected
);
}
}};
}
struct TestHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for TestHandler {
fn handle_signal(signal: Signal) {
if TEST_SIGNAL == signal {
TEST_SIGNAL_COUNTER.fetch_add(1, Ordering::SeqCst);
}
}
}
#[test]
fn scopedsignalhandler_success() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
reset_counter();
assert_counter_eq!(0);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
let handler = ScopedSignalHandler::new::<TestHandler>(&[TEST_SIGNAL]).unwrap();
assert!(!has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because test_handler is safe.
unsafe { send_test_signal() };
// Give the handler time to run in case it is on a different thread.
for _ in 1..40 {
if TEST_SIGNAL_COUNTER.load(Ordering::SeqCst) > 0 {
break;
}
sleep(Duration::from_millis(250));
}
assert_counter_eq!(1);
drop(handler);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
}
#[test]
fn scopedsignalhandler_handleralreadyset() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
reset_counter();
assert_counter_eq!(0);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because TestHandler is async-signal safe.
let handler = ScopedSignalHandler::new::<TestHandler>(&[TEST_SIGNAL]).unwrap();
assert!(!has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because TestHandler is async-signal safe.
assert!(matches!(
ScopedSignalHandler::new::<TestHandler>(TEST_SIGNALS),
Err(Error::HandlerAlreadySet(Signal::User1))
));
assert_counter_eq!(0);
drop(handler);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
}
/// Stores the thread used by WaitForInterruptHandler.
static WAIT_FOR_INTERRUPT_THREAD_ID: AtomicI32 = AtomicI32::new(0);
/// Forwards SIGINT to the appropriate thread.
struct WaitForInterruptHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for WaitForInterruptHandler {
fn handle_signal(_: Signal) {
let tid = WAIT_FOR_INTERRUPT_THREAD_ID.load(Ordering::SeqCst);
// If the thread ID is set and executed on the wrong thread, forward the signal.
if tid != 0 && gettid() != tid {
// Safe because the handler is safe and the target thread id is expecting the signal.
unsafe { kill(tid, Signal::Interrupt.into()) }.unwrap();
}
}
}
/// Query /proc/${tid}/status for its State and check if it is either S (sleeping) or in
/// D (disk sleep).
fn thread_is_sleeping(tid: Pid) -> result::Result<bool, errno::Error> {
const PREFIX: &str = "State:";
let mut status_reader = BufReader::new(File::open(format!("/proc/{}/status", tid))?);
let mut line = String::new();
loop {
let count = status_reader.read_line(&mut line)?;
if count == 0 {
return Err(errno::Error::new(libc::EIO));
}
if let Some(stripped) = line.strip_prefix(PREFIX) |
line.clear();
}
}
/// Wait for a process to block either in a sleeping or disk sleep state.
fn wait_for_thread_to_sleep(tid: Pid, timeout: Duration) -> result::Result<(), errno::Error> {
let start = Instant::now();
loop {
if thread_is_sleeping(tid)? {
return Ok(());
}
if start.elapsed() > timeout {
return Err(errno::Error::new(libc::EAGAIN));
}
sleep(Duration::from_millis(50));
}
}
#[test]
fn waitforinterrupt_success() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
let to_restore = get_sigaction(Signal::Interrupt).unwrap();
clear_signal_handler(Signal::Interrupt.into()).unwrap();
// Safe because TestHandler is async-signal safe.
let handler =
ScopedSignalHandler::new::<WaitForInterruptHandler>(&[Signal::Interrupt]).unwrap();
let tid = gettid();
WAIT_FOR_INTERRUPT_THREAD_ID.store(tid, Ordering::SeqCst);
let join_handle = spawn(move || -> result::Result<(), errno::Error> {
// Wait unitl the thread is ready to receive the signal.
wait_for_thread_to_sleep(tid, Duration::from_secs(10)).unwrap();
// Safe because the SIGINT handler is safe.
unsafe { kill(tid, Signal::Interrupt.into()) }
});
let wait_ret = wait_for_interrupt();
let join_ret = join_handle.join();
drop(handler);
// Safe because we are restoring the previous SIGINT handler.
unsafe { restore_sigaction(Signal::Interrupt, to_restore) }.unwrap();
wait_ret.unwrap();
join_ret.unwrap().unwrap();
}
}
| {
return Ok(matches!(
stripped.trim_start().chars().next(),
Some('S') | Some('D')
));
} | conditional_block |
scoped_signal_handler.rs | // Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Provides a struct for registering signal handlers that get cleared on drop.
use std::convert::TryFrom;
use std::fmt;
use std::io::{Cursor, Write};
use std::panic::catch_unwind;
use std::result;
use libc::{c_int, c_void, STDERR_FILENO};
use remain::sorted;
use thiserror::Error;
use crate::errno;
use crate::signal::{
clear_signal_handler, has_default_signal_handler, register_signal_handler, wait_for_signal,
Signal,
};
#[sorted]
#[derive(Error, Debug)]
pub enum Error {
/// Already waiting for interrupt.
#[error("already waiting for interrupt.")]
AlreadyWaiting,
/// Signal already has a handler.
#[error("signal handler already set for {0:?}")]
HandlerAlreadySet(Signal),
/// Failed to check if signal has the default signal handler.
#[error("failed to check the signal handler for {0:?}: {1}")]
HasDefaultSignalHandler(Signal, errno::Error),
/// Failed to register a signal handler.
#[error("failed to register a signal handler for {0:?}: {1}")]
RegisterSignalHandler(Signal, errno::Error),
/// Sigaction failed.
#[error("sigaction failed for {0:?}: {1}")]
Sigaction(Signal, errno::Error),
/// Failed to wait for signal.
#[error("wait_for_signal failed: {0}")]
WaitForSignal(errno::Error),
}
pub type Result<T> = result::Result<T, Error>;
/// The interface used by Scoped Signal handler.
///
/// # Safety
/// The implementation of handle_signal needs to be async signal-safe.
///
/// NOTE: panics are caught when possible because a panic inside ffi is undefined behavior.
pub unsafe trait SignalHandler {
/// A function that is called to handle the passed signal.
fn handle_signal(signal: Signal);
}
/// Wrap the handler with an extern "C" function.
extern "C" fn call_handler<H: SignalHandler>(signum: c_int) {
// Make an effort to surface an error.
if catch_unwind(|| H::handle_signal(Signal::try_from(signum).unwrap())).is_err() {
// Note the following cannot be used: | // format! - uses the allocator which enforces mutual exclusion.
// Get the debug representation of signum.
let signal: Signal;
let signal_debug: &dyn fmt::Debug = match Signal::try_from(signum) {
Ok(s) => {
signal = s;
&signal as &dyn fmt::Debug
}
Err(_) => &signum as &dyn fmt::Debug,
};
// Buffer the output, so a single call to write can be used.
// The message accounts for 29 chars, that leaves 35 for the string representation of the
// signal which is more than enough.
let mut buffer = [0u8; 64];
let mut cursor = Cursor::new(buffer.as_mut());
if writeln!(cursor, "signal handler got error for: {:?}", signal_debug).is_ok() {
let len = cursor.position() as usize;
// Safe in the sense that buffer is owned and the length is checked. This may print in
// the middle of an existing write, but that is considered better than dropping the
// error.
unsafe {
libc::write(
STDERR_FILENO,
cursor.get_ref().as_ptr() as *const c_void,
len,
)
};
} else {
// This should never happen, but write an error message just in case.
const ERROR_DROPPED: &str = "Error dropped by signal handler.";
let bytes = ERROR_DROPPED.as_bytes();
unsafe { libc::write(STDERR_FILENO, bytes.as_ptr() as *const c_void, bytes.len()) };
}
}
}
/// Represents a signal handler that is registered with a set of signals that unregistered when the
/// struct goes out of scope. Prefer a signalfd based solution before using this.
pub struct ScopedSignalHandler {
signals: Vec<Signal>,
}
impl ScopedSignalHandler {
/// Attempts to register `handler` with the provided `signals`. It will fail if there is already
/// an existing handler on any of `signals`.
///
/// # Safety
/// This is safe if H::handle_signal is async-signal safe.
pub fn new<H: SignalHandler>(signals: &[Signal]) -> Result<Self> {
let mut scoped_handler = ScopedSignalHandler {
signals: Vec::with_capacity(signals.len()),
};
for &signal in signals {
if !has_default_signal_handler((signal).into())
.map_err(|err| Error::HasDefaultSignalHandler(signal, err))?
{
return Err(Error::HandlerAlreadySet(signal));
}
// Requires an async-safe callback.
unsafe {
register_signal_handler((signal).into(), call_handler::<H>)
.map_err(|err| Error::RegisterSignalHandler(signal, err))?
};
scoped_handler.signals.push(signal);
}
Ok(scoped_handler)
}
}
/// Clears the signal handler for any of the associated signals.
impl Drop for ScopedSignalHandler {
fn drop(&mut self) {
for signal in &self.signals {
if let Err(err) = clear_signal_handler((*signal).into()) {
eprintln!("Error: failed to clear signal handler: {:?}", err);
}
}
}
}
/// A signal handler that does nothing.
///
/// This is useful in cases where wait_for_signal is used since it will never trigger if the signal
/// is blocked and the default handler may have undesired effects like terminating the process.
pub struct EmptySignalHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for EmptySignalHandler {
fn handle_signal(_: Signal) {}
}
/// Blocks until SIGINT is received, which often happens because Ctrl-C was pressed in an
/// interactive terminal.
///
/// Note: if you are using a multi-threaded application you need to block SIGINT on all other
/// threads or they may receive the signal instead of the desired thread.
pub fn wait_for_interrupt() -> Result<()> {
// Register a signal handler if there is not one already so the thread is not killed.
let ret = ScopedSignalHandler::new::<EmptySignalHandler>(&[Signal::Interrupt]);
if !matches!(&ret, Ok(_) | Err(Error::HandlerAlreadySet(_))) {
ret?;
}
match wait_for_signal(&[Signal::Interrupt.into()], None) {
Ok(_) => Ok(()),
Err(err) => Err(Error::WaitForSignal(err)),
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::mem::zeroed;
use std::ptr::{null, null_mut};
use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex, MutexGuard, Once};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use libc::sigaction;
use crate::{gettid, kill, Pid};
const TEST_SIGNAL: Signal = Signal::User1;
const TEST_SIGNALS: &[Signal] = &[Signal::User1, Signal::User2];
static TEST_SIGNAL_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// Only allows one test case to execute at a time.
fn get_mutex() -> MutexGuard<'static, ()> {
static INIT: Once = Once::new();
static mut VAL: Option<Arc<Mutex<()>>> = None;
INIT.call_once(|| {
let val = Some(Arc::new(Mutex::new(())));
// Safe because the mutation is protected by the Once.
unsafe { VAL = val }
});
// Safe mutation only happens in the Once.
unsafe { VAL.as_ref() }.unwrap().lock().unwrap()
}
fn reset_counter() {
TEST_SIGNAL_COUNTER.swap(0, Ordering::SeqCst);
}
fn get_sigaction(signal: Signal) -> Result<sigaction> {
// Safe because sigaction is owned and expected to be initialized ot zeros.
let mut sigact: sigaction = unsafe { zeroed() };
if unsafe { sigaction(signal.into(), null(), &mut sigact) } < 0 {
Err(Error::Sigaction(signal, errno::Error::last()))
} else {
Ok(sigact)
}
}
/// Safety:
/// This is only safe if the signal handler set in sigaction is safe.
unsafe fn restore_sigaction(signal: Signal, sigact: sigaction) -> Result<sigaction> {
if sigaction(signal.into(), &sigact, null_mut()) < 0 {
Err(Error::Sigaction(signal, errno::Error::last()))
} else {
Ok(sigact)
}
}
/// Safety:
/// Safe if the signal handler for Signal::User1 is safe.
unsafe fn send_test_signal() {
kill(gettid(), Signal::User1.into()).unwrap()
}
macro_rules! assert_counter_eq {
($compare_to:expr) => {{
let expected: usize = $compare_to;
let got: usize = TEST_SIGNAL_COUNTER.load(Ordering::SeqCst);
if got != expected {
panic!(
"wrong signal counter value: got {}; expected {}",
got, expected
);
}
}};
}
struct TestHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for TestHandler {
fn handle_signal(signal: Signal) {
if TEST_SIGNAL == signal {
TEST_SIGNAL_COUNTER.fetch_add(1, Ordering::SeqCst);
}
}
}
#[test]
fn scopedsignalhandler_success() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
reset_counter();
assert_counter_eq!(0);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
let handler = ScopedSignalHandler::new::<TestHandler>(&[TEST_SIGNAL]).unwrap();
assert!(!has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because test_handler is safe.
unsafe { send_test_signal() };
// Give the handler time to run in case it is on a different thread.
for _ in 1..40 {
if TEST_SIGNAL_COUNTER.load(Ordering::SeqCst) > 0 {
break;
}
sleep(Duration::from_millis(250));
}
assert_counter_eq!(1);
drop(handler);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
}
#[test]
fn scopedsignalhandler_handleralreadyset() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
reset_counter();
assert_counter_eq!(0);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because TestHandler is async-signal safe.
let handler = ScopedSignalHandler::new::<TestHandler>(&[TEST_SIGNAL]).unwrap();
assert!(!has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because TestHandler is async-signal safe.
assert!(matches!(
ScopedSignalHandler::new::<TestHandler>(TEST_SIGNALS),
Err(Error::HandlerAlreadySet(Signal::User1))
));
assert_counter_eq!(0);
drop(handler);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
}
/// Stores the thread used by WaitForInterruptHandler.
static WAIT_FOR_INTERRUPT_THREAD_ID: AtomicI32 = AtomicI32::new(0);
/// Forwards SIGINT to the appropriate thread.
struct WaitForInterruptHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for WaitForInterruptHandler {
fn handle_signal(_: Signal) {
let tid = WAIT_FOR_INTERRUPT_THREAD_ID.load(Ordering::SeqCst);
// If the thread ID is set and executed on the wrong thread, forward the signal.
if tid != 0 && gettid() != tid {
// Safe because the handler is safe and the target thread id is expecting the signal.
unsafe { kill(tid, Signal::Interrupt.into()) }.unwrap();
}
}
}
/// Query /proc/${tid}/status for its State and check if it is either S (sleeping) or in
/// D (disk sleep).
fn thread_is_sleeping(tid: Pid) -> result::Result<bool, errno::Error> {
const PREFIX: &str = "State:";
let mut status_reader = BufReader::new(File::open(format!("/proc/{}/status", tid))?);
let mut line = String::new();
loop {
let count = status_reader.read_line(&mut line)?;
if count == 0 {
return Err(errno::Error::new(libc::EIO));
}
if let Some(stripped) = line.strip_prefix(PREFIX) {
return Ok(matches!(
stripped.trim_start().chars().next(),
Some('S') | Some('D')
));
}
line.clear();
}
}
/// Wait for a process to block either in a sleeping or disk sleep state.
fn wait_for_thread_to_sleep(tid: Pid, timeout: Duration) -> result::Result<(), errno::Error> {
let start = Instant::now();
loop {
if thread_is_sleeping(tid)? {
return Ok(());
}
if start.elapsed() > timeout {
return Err(errno::Error::new(libc::EAGAIN));
}
sleep(Duration::from_millis(50));
}
}
#[test]
fn waitforinterrupt_success() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
let to_restore = get_sigaction(Signal::Interrupt).unwrap();
clear_signal_handler(Signal::Interrupt.into()).unwrap();
// Safe because TestHandler is async-signal safe.
let handler =
ScopedSignalHandler::new::<WaitForInterruptHandler>(&[Signal::Interrupt]).unwrap();
let tid = gettid();
WAIT_FOR_INTERRUPT_THREAD_ID.store(tid, Ordering::SeqCst);
let join_handle = spawn(move || -> result::Result<(), errno::Error> {
// Wait unitl the thread is ready to receive the signal.
wait_for_thread_to_sleep(tid, Duration::from_secs(10)).unwrap();
// Safe because the SIGINT handler is safe.
unsafe { kill(tid, Signal::Interrupt.into()) }
});
let wait_ret = wait_for_interrupt();
let join_ret = join_handle.join();
drop(handler);
// Safe because we are restoring the previous SIGINT handler.
unsafe { restore_sigaction(Signal::Interrupt, to_restore) }.unwrap();
wait_ret.unwrap();
join_ret.unwrap().unwrap();
}
} | // eprintln! - uses std::io which has locks that may be held. | random_line_split |
scoped_signal_handler.rs | // Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Provides a struct for registering signal handlers that get cleared on drop.
use std::convert::TryFrom;
use std::fmt;
use std::io::{Cursor, Write};
use std::panic::catch_unwind;
use std::result;
use libc::{c_int, c_void, STDERR_FILENO};
use remain::sorted;
use thiserror::Error;
use crate::errno;
use crate::signal::{
clear_signal_handler, has_default_signal_handler, register_signal_handler, wait_for_signal,
Signal,
};
#[sorted]
#[derive(Error, Debug)]
pub enum Error {
/// Already waiting for interrupt.
#[error("already waiting for interrupt.")]
AlreadyWaiting,
/// Signal already has a handler.
#[error("signal handler already set for {0:?}")]
HandlerAlreadySet(Signal),
/// Failed to check if signal has the default signal handler.
#[error("failed to check the signal handler for {0:?}: {1}")]
HasDefaultSignalHandler(Signal, errno::Error),
/// Failed to register a signal handler.
#[error("failed to register a signal handler for {0:?}: {1}")]
RegisterSignalHandler(Signal, errno::Error),
/// Sigaction failed.
#[error("sigaction failed for {0:?}: {1}")]
Sigaction(Signal, errno::Error),
/// Failed to wait for signal.
#[error("wait_for_signal failed: {0}")]
WaitForSignal(errno::Error),
}
pub type Result<T> = result::Result<T, Error>;
/// The interface used by Scoped Signal handler.
///
/// # Safety
/// The implementation of handle_signal needs to be async signal-safe.
///
/// NOTE: panics are caught when possible because a panic inside ffi is undefined behavior.
pub unsafe trait SignalHandler {
/// A function that is called to handle the passed signal.
fn handle_signal(signal: Signal);
}
/// Wrap the handler with an extern "C" function.
extern "C" fn call_handler<H: SignalHandler>(signum: c_int) {
// Make an effort to surface an error.
if catch_unwind(|| H::handle_signal(Signal::try_from(signum).unwrap())).is_err() {
// Note the following cannot be used:
// eprintln! - uses std::io which has locks that may be held.
// format! - uses the allocator which enforces mutual exclusion.
// Get the debug representation of signum.
let signal: Signal;
let signal_debug: &dyn fmt::Debug = match Signal::try_from(signum) {
Ok(s) => {
signal = s;
&signal as &dyn fmt::Debug
}
Err(_) => &signum as &dyn fmt::Debug,
};
// Buffer the output, so a single call to write can be used.
// The message accounts for 29 chars, that leaves 35 for the string representation of the
// signal which is more than enough.
let mut buffer = [0u8; 64];
let mut cursor = Cursor::new(buffer.as_mut());
if writeln!(cursor, "signal handler got error for: {:?}", signal_debug).is_ok() {
let len = cursor.position() as usize;
// Safe in the sense that buffer is owned and the length is checked. This may print in
// the middle of an existing write, but that is considered better than dropping the
// error.
unsafe {
libc::write(
STDERR_FILENO,
cursor.get_ref().as_ptr() as *const c_void,
len,
)
};
} else {
// This should never happen, but write an error message just in case.
const ERROR_DROPPED: &str = "Error dropped by signal handler.";
let bytes = ERROR_DROPPED.as_bytes();
unsafe { libc::write(STDERR_FILENO, bytes.as_ptr() as *const c_void, bytes.len()) };
}
}
}
/// Represents a signal handler that is registered with a set of signals that unregistered when the
/// struct goes out of scope. Prefer a signalfd based solution before using this.
pub struct ScopedSignalHandler {
signals: Vec<Signal>,
}
impl ScopedSignalHandler {
/// Attempts to register `handler` with the provided `signals`. It will fail if there is already
/// an existing handler on any of `signals`.
///
/// # Safety
/// This is safe if H::handle_signal is async-signal safe.
pub fn new<H: SignalHandler>(signals: &[Signal]) -> Result<Self> {
let mut scoped_handler = ScopedSignalHandler {
signals: Vec::with_capacity(signals.len()),
};
for &signal in signals {
if !has_default_signal_handler((signal).into())
.map_err(|err| Error::HasDefaultSignalHandler(signal, err))?
{
return Err(Error::HandlerAlreadySet(signal));
}
// Requires an async-safe callback.
unsafe {
register_signal_handler((signal).into(), call_handler::<H>)
.map_err(|err| Error::RegisterSignalHandler(signal, err))?
};
scoped_handler.signals.push(signal);
}
Ok(scoped_handler)
}
}
/// Clears the signal handler for any of the associated signals.
impl Drop for ScopedSignalHandler {
fn drop(&mut self) {
for signal in &self.signals {
if let Err(err) = clear_signal_handler((*signal).into()) {
eprintln!("Error: failed to clear signal handler: {:?}", err);
}
}
}
}
/// A signal handler that does nothing.
///
/// This is useful in cases where wait_for_signal is used since it will never trigger if the signal
/// is blocked and the default handler may have undesired effects like terminating the process.
pub struct | ;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for EmptySignalHandler {
fn handle_signal(_: Signal) {}
}
/// Blocks until SIGINT is received, which often happens because Ctrl-C was pressed in an
/// interactive terminal.
///
/// Note: if you are using a multi-threaded application you need to block SIGINT on all other
/// threads or they may receive the signal instead of the desired thread.
pub fn wait_for_interrupt() -> Result<()> {
// Register a signal handler if there is not one already so the thread is not killed.
let ret = ScopedSignalHandler::new::<EmptySignalHandler>(&[Signal::Interrupt]);
if !matches!(&ret, Ok(_) | Err(Error::HandlerAlreadySet(_))) {
ret?;
}
match wait_for_signal(&[Signal::Interrupt.into()], None) {
Ok(_) => Ok(()),
Err(err) => Err(Error::WaitForSignal(err)),
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::mem::zeroed;
use std::ptr::{null, null_mut};
use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex, MutexGuard, Once};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use libc::sigaction;
use crate::{gettid, kill, Pid};
const TEST_SIGNAL: Signal = Signal::User1;
const TEST_SIGNALS: &[Signal] = &[Signal::User1, Signal::User2];
static TEST_SIGNAL_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// Only allows one test case to execute at a time.
fn get_mutex() -> MutexGuard<'static, ()> {
static INIT: Once = Once::new();
static mut VAL: Option<Arc<Mutex<()>>> = None;
INIT.call_once(|| {
let val = Some(Arc::new(Mutex::new(())));
// Safe because the mutation is protected by the Once.
unsafe { VAL = val }
});
// Safe mutation only happens in the Once.
unsafe { VAL.as_ref() }.unwrap().lock().unwrap()
}
fn reset_counter() {
TEST_SIGNAL_COUNTER.swap(0, Ordering::SeqCst);
}
fn get_sigaction(signal: Signal) -> Result<sigaction> {
// Safe because sigaction is owned and expected to be initialized ot zeros.
let mut sigact: sigaction = unsafe { zeroed() };
if unsafe { sigaction(signal.into(), null(), &mut sigact) } < 0 {
Err(Error::Sigaction(signal, errno::Error::last()))
} else {
Ok(sigact)
}
}
/// Safety:
/// This is only safe if the signal handler set in sigaction is safe.
unsafe fn restore_sigaction(signal: Signal, sigact: sigaction) -> Result<sigaction> {
if sigaction(signal.into(), &sigact, null_mut()) < 0 {
Err(Error::Sigaction(signal, errno::Error::last()))
} else {
Ok(sigact)
}
}
/// Safety:
/// Safe if the signal handler for Signal::User1 is safe.
unsafe fn send_test_signal() {
kill(gettid(), Signal::User1.into()).unwrap()
}
macro_rules! assert_counter_eq {
($compare_to:expr) => {{
let expected: usize = $compare_to;
let got: usize = TEST_SIGNAL_COUNTER.load(Ordering::SeqCst);
if got != expected {
panic!(
"wrong signal counter value: got {}; expected {}",
got, expected
);
}
}};
}
struct TestHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for TestHandler {
fn handle_signal(signal: Signal) {
if TEST_SIGNAL == signal {
TEST_SIGNAL_COUNTER.fetch_add(1, Ordering::SeqCst);
}
}
}
#[test]
fn scopedsignalhandler_success() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
reset_counter();
assert_counter_eq!(0);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
let handler = ScopedSignalHandler::new::<TestHandler>(&[TEST_SIGNAL]).unwrap();
assert!(!has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because test_handler is safe.
unsafe { send_test_signal() };
// Give the handler time to run in case it is on a different thread.
for _ in 1..40 {
if TEST_SIGNAL_COUNTER.load(Ordering::SeqCst) > 0 {
break;
}
sleep(Duration::from_millis(250));
}
assert_counter_eq!(1);
drop(handler);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
}
#[test]
fn scopedsignalhandler_handleralreadyset() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
reset_counter();
assert_counter_eq!(0);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because TestHandler is async-signal safe.
let handler = ScopedSignalHandler::new::<TestHandler>(&[TEST_SIGNAL]).unwrap();
assert!(!has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
// Safe because TestHandler is async-signal safe.
assert!(matches!(
ScopedSignalHandler::new::<TestHandler>(TEST_SIGNALS),
Err(Error::HandlerAlreadySet(Signal::User1))
));
assert_counter_eq!(0);
drop(handler);
assert!(has_default_signal_handler(TEST_SIGNAL.into()).unwrap());
}
/// Stores the thread used by WaitForInterruptHandler.
static WAIT_FOR_INTERRUPT_THREAD_ID: AtomicI32 = AtomicI32::new(0);
/// Forwards SIGINT to the appropriate thread.
struct WaitForInterruptHandler;
/// # Safety
/// Safe because handle_signal is async-signal safe.
unsafe impl SignalHandler for WaitForInterruptHandler {
fn handle_signal(_: Signal) {
let tid = WAIT_FOR_INTERRUPT_THREAD_ID.load(Ordering::SeqCst);
// If the thread ID is set and executed on the wrong thread, forward the signal.
if tid != 0 && gettid() != tid {
// Safe because the handler is safe and the target thread id is expecting the signal.
unsafe { kill(tid, Signal::Interrupt.into()) }.unwrap();
}
}
}
/// Query /proc/${tid}/status for its State and check if it is either S (sleeping) or in
/// D (disk sleep).
fn thread_is_sleeping(tid: Pid) -> result::Result<bool, errno::Error> {
const PREFIX: &str = "State:";
let mut status_reader = BufReader::new(File::open(format!("/proc/{}/status", tid))?);
let mut line = String::new();
loop {
let count = status_reader.read_line(&mut line)?;
if count == 0 {
return Err(errno::Error::new(libc::EIO));
}
if let Some(stripped) = line.strip_prefix(PREFIX) {
return Ok(matches!(
stripped.trim_start().chars().next(),
Some('S') | Some('D')
));
}
line.clear();
}
}
/// Wait for a process to block either in a sleeping or disk sleep state.
fn wait_for_thread_to_sleep(tid: Pid, timeout: Duration) -> result::Result<(), errno::Error> {
let start = Instant::now();
loop {
if thread_is_sleeping(tid)? {
return Ok(());
}
if start.elapsed() > timeout {
return Err(errno::Error::new(libc::EAGAIN));
}
sleep(Duration::from_millis(50));
}
}
#[test]
fn waitforinterrupt_success() {
// Prevent other test cases from running concurrently since the signal
// handlers are shared for the process.
let _guard = get_mutex();
let to_restore = get_sigaction(Signal::Interrupt).unwrap();
clear_signal_handler(Signal::Interrupt.into()).unwrap();
// Safe because TestHandler is async-signal safe.
let handler =
ScopedSignalHandler::new::<WaitForInterruptHandler>(&[Signal::Interrupt]).unwrap();
let tid = gettid();
WAIT_FOR_INTERRUPT_THREAD_ID.store(tid, Ordering::SeqCst);
let join_handle = spawn(move || -> result::Result<(), errno::Error> {
// Wait unitl the thread is ready to receive the signal.
wait_for_thread_to_sleep(tid, Duration::from_secs(10)).unwrap();
// Safe because the SIGINT handler is safe.
unsafe { kill(tid, Signal::Interrupt.into()) }
});
let wait_ret = wait_for_interrupt();
let join_ret = join_handle.join();
drop(handler);
// Safe because we are restoring the previous SIGINT handler.
unsafe { restore_sigaction(Signal::Interrupt, to_restore) }.unwrap();
wait_ret.unwrap();
join_ret.unwrap().unwrap();
}
}
| EmptySignalHandler | identifier_name |
twitter.go | package core
import (
"context"
"errors"
"fmt"
"net/url"
"strings"
"github.com/iwataka/anaconda"
"github.com/iwataka/mybot/data"
"github.com/iwataka/mybot/models"
"github.com/iwataka/mybot/oauth"
"github.com/iwataka/mybot/utils"
"github.com/slack-go/slack"
)
// TwitterAPI is a wrapper of anaconda.TwitterApi.
type TwitterAPI struct {
api models.TwitterAPI
config Config
cache data.Cache
self *anaconda.User
}
// NewTwitterAPIWithAuth takes a user's authentication, cache and configuration and
// returns TwitterAPI instance for that user
func NewTwitterAPIWithAuth(auth oauth.OAuthCreds, config Config, cache data.Cache) *TwitterAPI {
at, ats := auth.GetCreds()
var api models.TwitterAPI
if len(at) > 0 && len(ats) > 0 {
api = anaconda.NewTwitterApi(at, ats)
}
return NewTwitterAPI(api, config, cache)
}
func NewTwitterAPI(api models.TwitterAPI, config Config, cache data.Cache) *TwitterAPI {
return &TwitterAPI{api, config, cache, nil}
}
func (a *TwitterAPI) BaseAPI() models.TwitterAPI {
return a.api
}
func (a *TwitterAPI) VerifyCredentials() (bool, error) {
if a.Enabled() {
return a.api.VerifyCredentials()
}
return false, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) PostSlackMsg(text string, atts []slack.Attachment) (anaconda.Tweet, error) {
return a.api.PostTweet(text, nil)
}
// GetSelf gets the authenticated user's information and stores it as a cache,
// then returns it.
func (a *TwitterAPI) GetSelf() (anaconda.User, error) {
if a.self != nil {
return *a.self, nil
}
if a.Enabled() {
self, err := a.api.GetSelf(nil)
if err != nil {
return anaconda.User{}, utils.WithStack(err)
}
a.self = &self
return self, nil
}
return anaconda.User{}, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) Enabled() bool {
return a.api != nil
}
// CheckUser cheks if user is matched for the given allowSelf and users
// arguments.
func (a *TwitterAPI) CheckUser(user string, allowSelf bool, users []string) (bool, error) {
if allowSelf {
self, err := a.GetSelf()
if err != nil {
return false, utils.WithStack(err)
}
if user == self.ScreenName {
return true, nil
}
}
for _, u := range users {
if user == u {
return true, nil
}
}
return false, nil
}
// ProcessFavorites gets tweets from the specified user's favorite list and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessFavorites(
name string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
latestID := a.cache.GetLatestFavoriteID(name)
v.Set("screen_name", name)
if latestID > 0 {
v.Set("since_id", fmt.Sprintf("%d", latestID))
} else {
// If the latest favorite ID doesn't exist, this fetches just
// the latest tweet and store that ID.
v.Set("count", "1")
}
tweets, err := a.api.GetFavorites(v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
var pp TwitterPostProcessor
if c.ShouldRepeat() {
pp = &TwitterPostProcessorEach{action, a.cache}
} else {
pp = &TwitterPostProcessorTop{action, name, a.cache}
}
processedTweets, processedActions, err := a.processTweets(tweets, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, nil
}
// ProcessSearch gets tweets from search result by the specified query and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessSearch(
query string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
res, err := a.GetSearch(query, v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
pp := &TwitterPostProcessorEach{action, a.cache}
processedTweets, processedActions, err := a.processTweets(res.Statuses, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, utils.WithStack(err)
}
type (
TwitterPostProcessor interface {
Process(anaconda.Tweet, bool) error
}
TwitterPostProcessorTop struct {
action data.Action
screenName string
cache data.Cache
}
TwitterPostProcessorEach struct {
action data.Action
cache data.Cache
}
)
func (p *TwitterPostProcessorTop) Process(t anaconda.Tweet, match bool) error {
id := p.cache.GetLatestTweetID(p.screenName)
if t.Id > id {
p.cache.SetLatestTweetID(p.screenName, t.Id)
}
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (p *TwitterPostProcessorEach) Process(t anaconda.Tweet, match bool) error {
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (a *TwitterAPI) processTweets(
tweets []anaconda.Tweet,
c TweetChecker,
v VisionMatcher,
l LanguageMatcher,
slack *SlackAPI,
action data.Action,
pp TwitterPostProcessor,
) ([]anaconda.Tweet, []data.Action, error) {
processedTweets := []anaconda.Tweet{}
processedActions := []data.Action{}
// From the oldest to the newest
for i := len(tweets) - 1; i >= 0; i-- {
t := tweets[i]
match, err := c.CheckTweet(t, v, l, a.cache)
if err != nil {
return nil, nil, utils.WithStack(err)
}
if match {
done := a.cache.GetTweetAction(t.Id)
undone := action.Sub(done)
err = a.processTweet(t, undone, slack)
if err != nil {
return nil, nil, utils.WithStack(err)
}
processedTweets = append(processedTweets, t)
processedActions = append(processedActions, undone)
}
err = pp.Process(t, match)
if err != nil {
return nil, nil, utils.WithStack(err)
}
}
return processedTweets, processedActions, nil
}
func (a *TwitterAPI) processTweet(
t anaconda.Tweet,
action data.Action,
slack *SlackAPI,
) error {
if action.Twitter.Retweet && !t.Retweeted {
var id int64
if t.RetweetedStatus == nil {
id = t.Id
} else {
id = t.RetweetedStatus.Id
}
_, err := a.api.Retweet(id, false)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if action.Twitter.Favorite && !t.Favorited {
id := t.Id
_, err := a.api.Favorite(id)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
for _, col := range action.Twitter.Collections {
err := a.collectTweet(t, col)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if slack.Enabled() {
for _, ch := range action.Slack.Channels {
err := slack.PostTweet(ch, t)
if CheckSlackError(err) {
return utils.WithStack(err)
}
}
}
return nil
}
func (a *TwitterAPI) collectTweet(tweet anaconda.Tweet, collection string) error |
func (a *TwitterAPI) GetSearch(query string, url url.Values) (anaconda.SearchResponse, error) {
return a.api.GetSearch(query, url)
}
func (a *TwitterAPI) GetUserSearch(searchTerm string, v url.Values) ([]anaconda.User, error) {
return a.api.GetUserSearch(searchTerm, v)
}
func (a *TwitterAPI) GetFavorites(vals url.Values) ([]anaconda.Tweet, error) {
return a.api.GetFavorites(vals)
}
type TwitterUserListener struct {
stream *anaconda.Stream
api *TwitterAPI
vis VisionMatcher
lang LanguageMatcher
slack *SlackAPI
cache data.Cache
}
// ListenUsers listens timelines of the friends
func (a *TwitterAPI) ListenUsers(
v url.Values,
vis VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
cache data.Cache,
) (*TwitterUserListener, error) {
if v == nil {
v = url.Values{}
}
names := a.config.GetTwitterScreenNames()
usernames := strings.Join(names, ",")
if len(usernames) == 0 {
return nil, errors.New("No user specified")
} else {
users, err := a.api.GetUsersLookup(usernames, nil)
if err != nil {
return nil, utils.WithStack(err)
}
userids := []string{}
for _, u := range users {
userids = append(userids, u.IdStr)
}
v.Set("follow", strings.Join(userids, ","))
stream := a.api.PublicStreamFilter(v)
return &TwitterUserListener{stream, a, vis, lang, slack, cache}, nil
}
}
func (l *TwitterUserListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
for {
select {
case msg := <-l.stream.C:
err := l.processMessage(msg, outChan)
if err != nil {
return utils.WithStack(err)
}
case <-ctx.Done():
return nil
}
}
}
func (l *TwitterUserListener) processMessage(msg interface{}, outChan chan<- interface{}) error {
switch m := msg.(type) {
case anaconda.Tweet:
name := m.User.ScreenName
timelines := l.api.config.GetTwitterTimelinesByScreenName(name)
if len(timelines) != 0 {
outChan <- NewReceivedEvent(TwitterEventType, "tweet", m)
}
for _, timeline := range timelines {
if !checkTweetByTimelineConfig(m, timeline) {
continue
}
match, err := timeline.Filter.CheckTweet(m, l.vis, l.lang, l.cache)
if err != nil {
return utils.WithStack(err)
}
if !match {
continue
}
done := l.api.cache.GetTweetAction(m.Id)
undone := timeline.Action.Sub(done)
if err := l.api.processTweet(m, undone, l.slack); err != nil {
return utils.WithStack(err)
}
outChan <- NewActionEvent(undone, m)
l.api.cache.SetLatestTweetID(name, m.Id)
}
err := l.api.cache.Save()
if err != nil {
return utils.WithStack(err)
}
}
return nil
}
func (l *TwitterUserListener) Stop() {
l.stream.Stop()
}
func checkTweetByTimelineConfig(t anaconda.Tweet, c TimelineConfig) bool {
if c.ExcludeReplies && t.InReplyToScreenName != "" {
return false
}
if !c.IncludeRts && t.RetweetedStatus != nil {
return false
}
return true
}
type TwitterDMListener struct {
stream *anaconda.Stream
api *TwitterAPI
}
// ListenMyself listens to the authenticated user by Twitter's User Streaming
// API and reacts with direct messages.
func (a *TwitterAPI) ListenMyself(v url.Values) (*TwitterDMListener, error) {
ok, err := a.VerifyCredentials()
if err != nil {
return nil, utils.WithStack(err)
} else if !ok {
return nil, errors.New("Twitter Account Verification failed")
}
stream := a.api.UserStream(v)
return &TwitterDMListener{stream, a}, nil
}
func (l *TwitterDMListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
// TODO: Twitter User Stream API has been retired, so I temporarily disable this feature.
// Later I completely remove this feature.
// https://developer.twitter.com/en/docs/twitter-api/enterprise/account-activity-api/migration/us-ss-migration-guide
return nil
// for {
// select {
// case msg := <-l.stream.C:
// switch c := msg.(type) {
// case anaconda.DirectMessage:
// outChan <- NewReceivedEvent(TwitterEventType, "DM", c)
// // TODO: Handle direct messages in the same way as the other sources
// id := l.api.cache.GetLatestDMID()
// if id < c.Id {
// l.api.cache.SetLatestDMID(c.Id)
// }
// err := l.api.cache.Save()
// if err != nil {
// return utils.WithStack(err)
// }
// }
// case <-ctx.Done():
// return nil
// }
// }
}
func (l *TwitterDMListener) Stop() {
l.stream.Stop()
}
// TweetChecker function checks if the specified tweet is acceptable, which means it
// should be retweeted.
type TweetChecker interface {
CheckTweet(t anaconda.Tweet, v VisionMatcher, l LanguageMatcher, c data.Cache) (bool, error)
ShouldRepeat() bool
}
func CheckTwitterError(err error) bool {
if err == nil {
return false
}
switch twitterErr := err.(type) {
case *anaconda.TwitterError:
// https://developer.twitter.com/ja/docs/basics/response-codes
// 130: Over capacity
// 131: Internal error
// 139: You have already favorited this status.
// 187: The status text has already been Tweeted by the authenticated account.
// 327: You have already retweeted this tweet.
switch twitterErr.Code {
case 130, 131, 139, 187, 327:
return false
}
case anaconda.TwitterError:
return CheckTwitterError(&twitterErr)
case *anaconda.ApiError:
code := twitterErr.StatusCode
// Status code 5?? means server error
if code >= 500 && code < 600 {
return false
}
for _, e := range twitterErr.Decoded.Errors {
if CheckTwitterError(e) {
return true
}
}
return false
case anaconda.ApiError:
return CheckTwitterError(&twitterErr)
}
return true
}
func TwitterStatusURL(t anaconda.Tweet) string {
srcFmt := "https://twitter.com/%s/status/%s"
return fmt.Sprintf(srcFmt, t.User.IdStr, t.IdStr)
}
| {
self, err := a.GetSelf()
if err != nil {
return utils.WithStack(err)
}
list, err := a.api.GetCollectionListByUserId(self.Id, nil)
if err != nil {
return utils.WithStack(err)
}
exists := false
var id string
for i, t := range list.Objects.Timelines {
if collection == t.Name {
exists = true
id = i
break
}
}
if !exists {
col, err := a.api.CreateCollection(collection, nil)
if err != nil {
return utils.WithStack(err)
}
id = col.Response.TimelineId
}
_, err = a.api.AddEntryToCollection(id, tweet.Id, nil)
if err != nil {
return utils.WithStack(err)
}
return nil
} | identifier_body |
twitter.go | package core
import (
"context"
"errors"
"fmt"
"net/url"
"strings"
"github.com/iwataka/anaconda"
"github.com/iwataka/mybot/data"
"github.com/iwataka/mybot/models"
"github.com/iwataka/mybot/oauth"
"github.com/iwataka/mybot/utils"
"github.com/slack-go/slack"
)
// TwitterAPI is a wrapper of anaconda.TwitterApi.
type TwitterAPI struct {
api models.TwitterAPI
config Config
cache data.Cache
self *anaconda.User
}
// NewTwitterAPIWithAuth takes a user's authentication, cache and configuration and
// returns TwitterAPI instance for that user
func | (auth oauth.OAuthCreds, config Config, cache data.Cache) *TwitterAPI {
at, ats := auth.GetCreds()
var api models.TwitterAPI
if len(at) > 0 && len(ats) > 0 {
api = anaconda.NewTwitterApi(at, ats)
}
return NewTwitterAPI(api, config, cache)
}
func NewTwitterAPI(api models.TwitterAPI, config Config, cache data.Cache) *TwitterAPI {
return &TwitterAPI{api, config, cache, nil}
}
func (a *TwitterAPI) BaseAPI() models.TwitterAPI {
return a.api
}
func (a *TwitterAPI) VerifyCredentials() (bool, error) {
if a.Enabled() {
return a.api.VerifyCredentials()
}
return false, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) PostSlackMsg(text string, atts []slack.Attachment) (anaconda.Tweet, error) {
return a.api.PostTweet(text, nil)
}
// GetSelf gets the authenticated user's information and stores it as a cache,
// then returns it.
func (a *TwitterAPI) GetSelf() (anaconda.User, error) {
if a.self != nil {
return *a.self, nil
}
if a.Enabled() {
self, err := a.api.GetSelf(nil)
if err != nil {
return anaconda.User{}, utils.WithStack(err)
}
a.self = &self
return self, nil
}
return anaconda.User{}, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) Enabled() bool {
return a.api != nil
}
// CheckUser cheks if user is matched for the given allowSelf and users
// arguments.
func (a *TwitterAPI) CheckUser(user string, allowSelf bool, users []string) (bool, error) {
if allowSelf {
self, err := a.GetSelf()
if err != nil {
return false, utils.WithStack(err)
}
if user == self.ScreenName {
return true, nil
}
}
for _, u := range users {
if user == u {
return true, nil
}
}
return false, nil
}
// ProcessFavorites gets tweets from the specified user's favorite list and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessFavorites(
name string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
latestID := a.cache.GetLatestFavoriteID(name)
v.Set("screen_name", name)
if latestID > 0 {
v.Set("since_id", fmt.Sprintf("%d", latestID))
} else {
// If the latest favorite ID doesn't exist, this fetches just
// the latest tweet and store that ID.
v.Set("count", "1")
}
tweets, err := a.api.GetFavorites(v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
var pp TwitterPostProcessor
if c.ShouldRepeat() {
pp = &TwitterPostProcessorEach{action, a.cache}
} else {
pp = &TwitterPostProcessorTop{action, name, a.cache}
}
processedTweets, processedActions, err := a.processTweets(tweets, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, nil
}
// ProcessSearch gets tweets from search result by the specified query and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessSearch(
query string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
res, err := a.GetSearch(query, v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
pp := &TwitterPostProcessorEach{action, a.cache}
processedTweets, processedActions, err := a.processTweets(res.Statuses, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, utils.WithStack(err)
}
type (
TwitterPostProcessor interface {
Process(anaconda.Tweet, bool) error
}
TwitterPostProcessorTop struct {
action data.Action
screenName string
cache data.Cache
}
TwitterPostProcessorEach struct {
action data.Action
cache data.Cache
}
)
func (p *TwitterPostProcessorTop) Process(t anaconda.Tweet, match bool) error {
id := p.cache.GetLatestTweetID(p.screenName)
if t.Id > id {
p.cache.SetLatestTweetID(p.screenName, t.Id)
}
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (p *TwitterPostProcessorEach) Process(t anaconda.Tweet, match bool) error {
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (a *TwitterAPI) processTweets(
tweets []anaconda.Tweet,
c TweetChecker,
v VisionMatcher,
l LanguageMatcher,
slack *SlackAPI,
action data.Action,
pp TwitterPostProcessor,
) ([]anaconda.Tweet, []data.Action, error) {
processedTweets := []anaconda.Tweet{}
processedActions := []data.Action{}
// From the oldest to the newest
for i := len(tweets) - 1; i >= 0; i-- {
t := tweets[i]
match, err := c.CheckTweet(t, v, l, a.cache)
if err != nil {
return nil, nil, utils.WithStack(err)
}
if match {
done := a.cache.GetTweetAction(t.Id)
undone := action.Sub(done)
err = a.processTweet(t, undone, slack)
if err != nil {
return nil, nil, utils.WithStack(err)
}
processedTweets = append(processedTweets, t)
processedActions = append(processedActions, undone)
}
err = pp.Process(t, match)
if err != nil {
return nil, nil, utils.WithStack(err)
}
}
return processedTweets, processedActions, nil
}
func (a *TwitterAPI) processTweet(
t anaconda.Tweet,
action data.Action,
slack *SlackAPI,
) error {
if action.Twitter.Retweet && !t.Retweeted {
var id int64
if t.RetweetedStatus == nil {
id = t.Id
} else {
id = t.RetweetedStatus.Id
}
_, err := a.api.Retweet(id, false)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if action.Twitter.Favorite && !t.Favorited {
id := t.Id
_, err := a.api.Favorite(id)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
for _, col := range action.Twitter.Collections {
err := a.collectTweet(t, col)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if slack.Enabled() {
for _, ch := range action.Slack.Channels {
err := slack.PostTweet(ch, t)
if CheckSlackError(err) {
return utils.WithStack(err)
}
}
}
return nil
}
func (a *TwitterAPI) collectTweet(tweet anaconda.Tweet, collection string) error {
self, err := a.GetSelf()
if err != nil {
return utils.WithStack(err)
}
list, err := a.api.GetCollectionListByUserId(self.Id, nil)
if err != nil {
return utils.WithStack(err)
}
exists := false
var id string
for i, t := range list.Objects.Timelines {
if collection == t.Name {
exists = true
id = i
break
}
}
if !exists {
col, err := a.api.CreateCollection(collection, nil)
if err != nil {
return utils.WithStack(err)
}
id = col.Response.TimelineId
}
_, err = a.api.AddEntryToCollection(id, tweet.Id, nil)
if err != nil {
return utils.WithStack(err)
}
return nil
}
func (a *TwitterAPI) GetSearch(query string, url url.Values) (anaconda.SearchResponse, error) {
return a.api.GetSearch(query, url)
}
func (a *TwitterAPI) GetUserSearch(searchTerm string, v url.Values) ([]anaconda.User, error) {
return a.api.GetUserSearch(searchTerm, v)
}
func (a *TwitterAPI) GetFavorites(vals url.Values) ([]anaconda.Tweet, error) {
return a.api.GetFavorites(vals)
}
type TwitterUserListener struct {
stream *anaconda.Stream
api *TwitterAPI
vis VisionMatcher
lang LanguageMatcher
slack *SlackAPI
cache data.Cache
}
// ListenUsers listens timelines of the friends
func (a *TwitterAPI) ListenUsers(
v url.Values,
vis VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
cache data.Cache,
) (*TwitterUserListener, error) {
if v == nil {
v = url.Values{}
}
names := a.config.GetTwitterScreenNames()
usernames := strings.Join(names, ",")
if len(usernames) == 0 {
return nil, errors.New("No user specified")
} else {
users, err := a.api.GetUsersLookup(usernames, nil)
if err != nil {
return nil, utils.WithStack(err)
}
userids := []string{}
for _, u := range users {
userids = append(userids, u.IdStr)
}
v.Set("follow", strings.Join(userids, ","))
stream := a.api.PublicStreamFilter(v)
return &TwitterUserListener{stream, a, vis, lang, slack, cache}, nil
}
}
func (l *TwitterUserListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
for {
select {
case msg := <-l.stream.C:
err := l.processMessage(msg, outChan)
if err != nil {
return utils.WithStack(err)
}
case <-ctx.Done():
return nil
}
}
}
func (l *TwitterUserListener) processMessage(msg interface{}, outChan chan<- interface{}) error {
switch m := msg.(type) {
case anaconda.Tweet:
name := m.User.ScreenName
timelines := l.api.config.GetTwitterTimelinesByScreenName(name)
if len(timelines) != 0 {
outChan <- NewReceivedEvent(TwitterEventType, "tweet", m)
}
for _, timeline := range timelines {
if !checkTweetByTimelineConfig(m, timeline) {
continue
}
match, err := timeline.Filter.CheckTweet(m, l.vis, l.lang, l.cache)
if err != nil {
return utils.WithStack(err)
}
if !match {
continue
}
done := l.api.cache.GetTweetAction(m.Id)
undone := timeline.Action.Sub(done)
if err := l.api.processTweet(m, undone, l.slack); err != nil {
return utils.WithStack(err)
}
outChan <- NewActionEvent(undone, m)
l.api.cache.SetLatestTweetID(name, m.Id)
}
err := l.api.cache.Save()
if err != nil {
return utils.WithStack(err)
}
}
return nil
}
func (l *TwitterUserListener) Stop() {
l.stream.Stop()
}
func checkTweetByTimelineConfig(t anaconda.Tweet, c TimelineConfig) bool {
if c.ExcludeReplies && t.InReplyToScreenName != "" {
return false
}
if !c.IncludeRts && t.RetweetedStatus != nil {
return false
}
return true
}
type TwitterDMListener struct {
stream *anaconda.Stream
api *TwitterAPI
}
// ListenMyself listens to the authenticated user by Twitter's User Streaming
// API and reacts with direct messages.
func (a *TwitterAPI) ListenMyself(v url.Values) (*TwitterDMListener, error) {
ok, err := a.VerifyCredentials()
if err != nil {
return nil, utils.WithStack(err)
} else if !ok {
return nil, errors.New("Twitter Account Verification failed")
}
stream := a.api.UserStream(v)
return &TwitterDMListener{stream, a}, nil
}
func (l *TwitterDMListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
// TODO: Twitter User Stream API has been retired, so I temporarily disable this feature.
// Later I completely remove this feature.
// https://developer.twitter.com/en/docs/twitter-api/enterprise/account-activity-api/migration/us-ss-migration-guide
return nil
// for {
// select {
// case msg := <-l.stream.C:
// switch c := msg.(type) {
// case anaconda.DirectMessage:
// outChan <- NewReceivedEvent(TwitterEventType, "DM", c)
// // TODO: Handle direct messages in the same way as the other sources
// id := l.api.cache.GetLatestDMID()
// if id < c.Id {
// l.api.cache.SetLatestDMID(c.Id)
// }
// err := l.api.cache.Save()
// if err != nil {
// return utils.WithStack(err)
// }
// }
// case <-ctx.Done():
// return nil
// }
// }
}
func (l *TwitterDMListener) Stop() {
l.stream.Stop()
}
// TweetChecker function checks if the specified tweet is acceptable, which means it
// should be retweeted.
type TweetChecker interface {
CheckTweet(t anaconda.Tweet, v VisionMatcher, l LanguageMatcher, c data.Cache) (bool, error)
ShouldRepeat() bool
}
func CheckTwitterError(err error) bool {
if err == nil {
return false
}
switch twitterErr := err.(type) {
case *anaconda.TwitterError:
// https://developer.twitter.com/ja/docs/basics/response-codes
// 130: Over capacity
// 131: Internal error
// 139: You have already favorited this status.
// 187: The status text has already been Tweeted by the authenticated account.
// 327: You have already retweeted this tweet.
switch twitterErr.Code {
case 130, 131, 139, 187, 327:
return false
}
case anaconda.TwitterError:
return CheckTwitterError(&twitterErr)
case *anaconda.ApiError:
code := twitterErr.StatusCode
// Status code 5?? means server error
if code >= 500 && code < 600 {
return false
}
for _, e := range twitterErr.Decoded.Errors {
if CheckTwitterError(e) {
return true
}
}
return false
case anaconda.ApiError:
return CheckTwitterError(&twitterErr)
}
return true
}
func TwitterStatusURL(t anaconda.Tweet) string {
srcFmt := "https://twitter.com/%s/status/%s"
return fmt.Sprintf(srcFmt, t.User.IdStr, t.IdStr)
}
| NewTwitterAPIWithAuth | identifier_name |
twitter.go | package core
import (
"context"
"errors"
"fmt"
"net/url"
"strings"
"github.com/iwataka/anaconda"
"github.com/iwataka/mybot/data"
"github.com/iwataka/mybot/models"
"github.com/iwataka/mybot/oauth"
"github.com/iwataka/mybot/utils"
"github.com/slack-go/slack"
)
// TwitterAPI is a wrapper of anaconda.TwitterApi.
type TwitterAPI struct {
api models.TwitterAPI
config Config
cache data.Cache
self *anaconda.User
}
// NewTwitterAPIWithAuth takes a user's authentication, cache and configuration and
// returns TwitterAPI instance for that user
func NewTwitterAPIWithAuth(auth oauth.OAuthCreds, config Config, cache data.Cache) *TwitterAPI {
at, ats := auth.GetCreds()
var api models.TwitterAPI
if len(at) > 0 && len(ats) > 0 {
api = anaconda.NewTwitterApi(at, ats)
}
return NewTwitterAPI(api, config, cache)
}
func NewTwitterAPI(api models.TwitterAPI, config Config, cache data.Cache) *TwitterAPI {
return &TwitterAPI{api, config, cache, nil}
}
func (a *TwitterAPI) BaseAPI() models.TwitterAPI {
return a.api
}
func (a *TwitterAPI) VerifyCredentials() (bool, error) {
if a.Enabled() {
return a.api.VerifyCredentials()
}
return false, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) PostSlackMsg(text string, atts []slack.Attachment) (anaconda.Tweet, error) {
return a.api.PostTweet(text, nil)
}
// GetSelf gets the authenticated user's information and stores it as a cache,
// then returns it.
func (a *TwitterAPI) GetSelf() (anaconda.User, error) {
if a.self != nil {
return *a.self, nil
}
if a.Enabled() {
self, err := a.api.GetSelf(nil)
if err != nil {
return anaconda.User{}, utils.WithStack(err)
}
a.self = &self
return self, nil
}
return anaconda.User{}, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) Enabled() bool {
return a.api != nil
}
// CheckUser cheks if user is matched for the given allowSelf and users
// arguments.
func (a *TwitterAPI) CheckUser(user string, allowSelf bool, users []string) (bool, error) {
if allowSelf {
self, err := a.GetSelf()
if err != nil {
return false, utils.WithStack(err)
}
if user == self.ScreenName {
return true, nil
}
}
for _, u := range users {
if user == u {
return true, nil
}
}
return false, nil
}
// ProcessFavorites gets tweets from the specified user's favorite list and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessFavorites(
name string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
latestID := a.cache.GetLatestFavoriteID(name)
v.Set("screen_name", name)
if latestID > 0 {
v.Set("since_id", fmt.Sprintf("%d", latestID))
} else {
// If the latest favorite ID doesn't exist, this fetches just
// the latest tweet and store that ID.
v.Set("count", "1")
}
tweets, err := a.api.GetFavorites(v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
var pp TwitterPostProcessor
if c.ShouldRepeat() {
pp = &TwitterPostProcessorEach{action, a.cache}
} else {
pp = &TwitterPostProcessorTop{action, name, a.cache}
}
processedTweets, processedActions, err := a.processTweets(tweets, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, nil
}
// ProcessSearch gets tweets from search result by the specified query and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessSearch(
query string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
res, err := a.GetSearch(query, v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
pp := &TwitterPostProcessorEach{action, a.cache}
processedTweets, processedActions, err := a.processTweets(res.Statuses, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, utils.WithStack(err)
}
type (
TwitterPostProcessor interface {
Process(anaconda.Tweet, bool) error
}
TwitterPostProcessorTop struct {
action data.Action
screenName string
cache data.Cache
}
TwitterPostProcessorEach struct {
action data.Action
cache data.Cache
}
)
func (p *TwitterPostProcessorTop) Process(t anaconda.Tweet, match bool) error {
id := p.cache.GetLatestTweetID(p.screenName)
if t.Id > id {
p.cache.SetLatestTweetID(p.screenName, t.Id)
}
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (p *TwitterPostProcessorEach) Process(t anaconda.Tweet, match bool) error {
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (a *TwitterAPI) processTweets(
tweets []anaconda.Tweet,
c TweetChecker,
v VisionMatcher,
l LanguageMatcher,
slack *SlackAPI,
action data.Action,
pp TwitterPostProcessor,
) ([]anaconda.Tweet, []data.Action, error) {
processedTweets := []anaconda.Tweet{}
processedActions := []data.Action{}
// From the oldest to the newest
for i := len(tweets) - 1; i >= 0; i-- {
t := tweets[i]
match, err := c.CheckTweet(t, v, l, a.cache)
if err != nil {
return nil, nil, utils.WithStack(err)
}
if match {
done := a.cache.GetTweetAction(t.Id)
undone := action.Sub(done)
err = a.processTweet(t, undone, slack)
if err != nil {
return nil, nil, utils.WithStack(err)
}
processedTweets = append(processedTweets, t)
processedActions = append(processedActions, undone)
}
err = pp.Process(t, match)
if err != nil {
return nil, nil, utils.WithStack(err)
}
}
return processedTweets, processedActions, nil
}
func (a *TwitterAPI) processTweet(
t anaconda.Tweet,
action data.Action,
slack *SlackAPI,
) error {
if action.Twitter.Retweet && !t.Retweeted {
var id int64
if t.RetweetedStatus == nil {
id = t.Id
} else {
id = t.RetweetedStatus.Id
}
_, err := a.api.Retweet(id, false)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if action.Twitter.Favorite && !t.Favorited {
id := t.Id
_, err := a.api.Favorite(id)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
for _, col := range action.Twitter.Collections {
err := a.collectTweet(t, col)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if slack.Enabled() {
for _, ch := range action.Slack.Channels {
err := slack.PostTweet(ch, t)
if CheckSlackError(err) {
return utils.WithStack(err)
}
}
}
return nil
}
func (a *TwitterAPI) collectTweet(tweet anaconda.Tweet, collection string) error {
self, err := a.GetSelf()
if err != nil {
return utils.WithStack(err)
}
list, err := a.api.GetCollectionListByUserId(self.Id, nil)
if err != nil {
return utils.WithStack(err)
}
exists := false
var id string
for i, t := range list.Objects.Timelines {
if collection == t.Name {
exists = true
id = i
break
}
}
if !exists {
col, err := a.api.CreateCollection(collection, nil)
if err != nil {
return utils.WithStack(err)
}
id = col.Response.TimelineId
}
_, err = a.api.AddEntryToCollection(id, tweet.Id, nil)
if err != nil {
return utils.WithStack(err)
}
return nil
}
func (a *TwitterAPI) GetSearch(query string, url url.Values) (anaconda.SearchResponse, error) {
return a.api.GetSearch(query, url)
}
func (a *TwitterAPI) GetUserSearch(searchTerm string, v url.Values) ([]anaconda.User, error) {
return a.api.GetUserSearch(searchTerm, v)
}
func (a *TwitterAPI) GetFavorites(vals url.Values) ([]anaconda.Tweet, error) {
return a.api.GetFavorites(vals)
}
type TwitterUserListener struct {
stream *anaconda.Stream
api *TwitterAPI
vis VisionMatcher
lang LanguageMatcher
slack *SlackAPI
cache data.Cache
}
// ListenUsers listens timelines of the friends
func (a *TwitterAPI) ListenUsers(
v url.Values,
vis VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
cache data.Cache,
) (*TwitterUserListener, error) {
if v == nil {
v = url.Values{}
}
names := a.config.GetTwitterScreenNames()
usernames := strings.Join(names, ",")
if len(usernames) == 0 | else {
users, err := a.api.GetUsersLookup(usernames, nil)
if err != nil {
return nil, utils.WithStack(err)
}
userids := []string{}
for _, u := range users {
userids = append(userids, u.IdStr)
}
v.Set("follow", strings.Join(userids, ","))
stream := a.api.PublicStreamFilter(v)
return &TwitterUserListener{stream, a, vis, lang, slack, cache}, nil
}
}
func (l *TwitterUserListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
for {
select {
case msg := <-l.stream.C:
err := l.processMessage(msg, outChan)
if err != nil {
return utils.WithStack(err)
}
case <-ctx.Done():
return nil
}
}
}
func (l *TwitterUserListener) processMessage(msg interface{}, outChan chan<- interface{}) error {
switch m := msg.(type) {
case anaconda.Tweet:
name := m.User.ScreenName
timelines := l.api.config.GetTwitterTimelinesByScreenName(name)
if len(timelines) != 0 {
outChan <- NewReceivedEvent(TwitterEventType, "tweet", m)
}
for _, timeline := range timelines {
if !checkTweetByTimelineConfig(m, timeline) {
continue
}
match, err := timeline.Filter.CheckTweet(m, l.vis, l.lang, l.cache)
if err != nil {
return utils.WithStack(err)
}
if !match {
continue
}
done := l.api.cache.GetTweetAction(m.Id)
undone := timeline.Action.Sub(done)
if err := l.api.processTweet(m, undone, l.slack); err != nil {
return utils.WithStack(err)
}
outChan <- NewActionEvent(undone, m)
l.api.cache.SetLatestTweetID(name, m.Id)
}
err := l.api.cache.Save()
if err != nil {
return utils.WithStack(err)
}
}
return nil
}
func (l *TwitterUserListener) Stop() {
l.stream.Stop()
}
func checkTweetByTimelineConfig(t anaconda.Tweet, c TimelineConfig) bool {
if c.ExcludeReplies && t.InReplyToScreenName != "" {
return false
}
if !c.IncludeRts && t.RetweetedStatus != nil {
return false
}
return true
}
type TwitterDMListener struct {
stream *anaconda.Stream
api *TwitterAPI
}
// ListenMyself listens to the authenticated user by Twitter's User Streaming
// API and reacts with direct messages.
func (a *TwitterAPI) ListenMyself(v url.Values) (*TwitterDMListener, error) {
ok, err := a.VerifyCredentials()
if err != nil {
return nil, utils.WithStack(err)
} else if !ok {
return nil, errors.New("Twitter Account Verification failed")
}
stream := a.api.UserStream(v)
return &TwitterDMListener{stream, a}, nil
}
func (l *TwitterDMListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
// TODO: Twitter User Stream API has been retired, so I temporarily disable this feature.
// Later I completely remove this feature.
// https://developer.twitter.com/en/docs/twitter-api/enterprise/account-activity-api/migration/us-ss-migration-guide
return nil
// for {
// select {
// case msg := <-l.stream.C:
// switch c := msg.(type) {
// case anaconda.DirectMessage:
// outChan <- NewReceivedEvent(TwitterEventType, "DM", c)
// // TODO: Handle direct messages in the same way as the other sources
// id := l.api.cache.GetLatestDMID()
// if id < c.Id {
// l.api.cache.SetLatestDMID(c.Id)
// }
// err := l.api.cache.Save()
// if err != nil {
// return utils.WithStack(err)
// }
// }
// case <-ctx.Done():
// return nil
// }
// }
}
func (l *TwitterDMListener) Stop() {
l.stream.Stop()
}
// TweetChecker function checks if the specified tweet is acceptable, which means it
// should be retweeted.
type TweetChecker interface {
CheckTweet(t anaconda.Tweet, v VisionMatcher, l LanguageMatcher, c data.Cache) (bool, error)
ShouldRepeat() bool
}
func CheckTwitterError(err error) bool {
if err == nil {
return false
}
switch twitterErr := err.(type) {
case *anaconda.TwitterError:
// https://developer.twitter.com/ja/docs/basics/response-codes
// 130: Over capacity
// 131: Internal error
// 139: You have already favorited this status.
// 187: The status text has already been Tweeted by the authenticated account.
// 327: You have already retweeted this tweet.
switch twitterErr.Code {
case 130, 131, 139, 187, 327:
return false
}
case anaconda.TwitterError:
return CheckTwitterError(&twitterErr)
case *anaconda.ApiError:
code := twitterErr.StatusCode
// Status code 5?? means server error
if code >= 500 && code < 600 {
return false
}
for _, e := range twitterErr.Decoded.Errors {
if CheckTwitterError(e) {
return true
}
}
return false
case anaconda.ApiError:
return CheckTwitterError(&twitterErr)
}
return true
}
func TwitterStatusURL(t anaconda.Tweet) string {
srcFmt := "https://twitter.com/%s/status/%s"
return fmt.Sprintf(srcFmt, t.User.IdStr, t.IdStr)
}
| {
return nil, errors.New("No user specified")
} | conditional_block |
twitter.go | package core
import (
"context"
"errors"
"fmt"
"net/url"
"strings"
"github.com/iwataka/anaconda"
"github.com/iwataka/mybot/data"
"github.com/iwataka/mybot/models"
"github.com/iwataka/mybot/oauth"
"github.com/iwataka/mybot/utils"
"github.com/slack-go/slack"
)
// TwitterAPI is a wrapper of anaconda.TwitterApi.
type TwitterAPI struct {
api models.TwitterAPI
config Config
cache data.Cache
self *anaconda.User
}
// NewTwitterAPIWithAuth takes a user's authentication, cache and configuration and
// returns TwitterAPI instance for that user
func NewTwitterAPIWithAuth(auth oauth.OAuthCreds, config Config, cache data.Cache) *TwitterAPI {
at, ats := auth.GetCreds()
var api models.TwitterAPI
if len(at) > 0 && len(ats) > 0 {
api = anaconda.NewTwitterApi(at, ats)
}
return NewTwitterAPI(api, config, cache)
}
func NewTwitterAPI(api models.TwitterAPI, config Config, cache data.Cache) *TwitterAPI {
return &TwitterAPI{api, config, cache, nil}
}
func (a *TwitterAPI) BaseAPI() models.TwitterAPI {
return a.api
}
func (a *TwitterAPI) VerifyCredentials() (bool, error) {
if a.Enabled() {
return a.api.VerifyCredentials()
}
return false, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) PostSlackMsg(text string, atts []slack.Attachment) (anaconda.Tweet, error) {
return a.api.PostTweet(text, nil)
}
// GetSelf gets the authenticated user's information and stores it as a cache,
// then returns it.
func (a *TwitterAPI) GetSelf() (anaconda.User, error) {
if a.self != nil {
return *a.self, nil
}
if a.Enabled() {
self, err := a.api.GetSelf(nil)
if err != nil {
return anaconda.User{}, utils.WithStack(err)
}
a.self = &self
return self, nil
}
return anaconda.User{}, fmt.Errorf("Twitter API is not available")
}
func (a *TwitterAPI) Enabled() bool {
return a.api != nil
}
// CheckUser cheks if user is matched for the given allowSelf and users
// arguments.
func (a *TwitterAPI) CheckUser(user string, allowSelf bool, users []string) (bool, error) {
if allowSelf {
self, err := a.GetSelf()
if err != nil {
return false, utils.WithStack(err)
}
if user == self.ScreenName {
return true, nil
}
}
for _, u := range users {
if user == u {
return true, nil
}
}
return false, nil
}
// ProcessFavorites gets tweets from the specified user's favorite list and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessFavorites(
name string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
latestID := a.cache.GetLatestFavoriteID(name)
v.Set("screen_name", name)
if latestID > 0 {
v.Set("since_id", fmt.Sprintf("%d", latestID))
} else {
// If the latest favorite ID doesn't exist, this fetches just
// the latest tweet and store that ID.
v.Set("count", "1")
}
tweets, err := a.api.GetFavorites(v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
var pp TwitterPostProcessor
if c.ShouldRepeat() {
pp = &TwitterPostProcessorEach{action, a.cache}
} else {
pp = &TwitterPostProcessorTop{action, name, a.cache}
}
processedTweets, processedActions, err := a.processTweets(tweets, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, nil
}
// ProcessSearch gets tweets from search result by the specified query and do
// action for tweets filtered by c.
func (a *TwitterAPI) ProcessSearch(
query string,
v url.Values,
c TweetChecker,
vision VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
action data.Action,
) ([]anaconda.Tweet, []data.Action, error) {
res, err := a.GetSearch(query, v)
if err != nil {
return nil, nil, utils.WithStack(err)
}
pp := &TwitterPostProcessorEach{action, a.cache}
processedTweets, processedActions, err := a.processTweets(res.Statuses, c, vision, lang, slack, action, pp)
if err != nil {
return nil, nil, utils.WithStack(err)
}
return processedTweets, processedActions, utils.WithStack(err)
}
type (
TwitterPostProcessor interface {
Process(anaconda.Tweet, bool) error
}
TwitterPostProcessorTop struct {
action data.Action
screenName string
cache data.Cache
}
TwitterPostProcessorEach struct {
action data.Action
cache data.Cache
}
)
func (p *TwitterPostProcessorTop) Process(t anaconda.Tweet, match bool) error {
id := p.cache.GetLatestTweetID(p.screenName)
if t.Id > id {
p.cache.SetLatestTweetID(p.screenName, t.Id)
}
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (p *TwitterPostProcessorEach) Process(t anaconda.Tweet, match bool) error {
if match {
ac := p.cache.GetTweetAction(t.Id)
p.cache.SetTweetAction(t.Id, ac.Add(p.action))
}
return nil
}
func (a *TwitterAPI) processTweets(
tweets []anaconda.Tweet,
c TweetChecker,
v VisionMatcher,
l LanguageMatcher,
slack *SlackAPI,
action data.Action,
pp TwitterPostProcessor,
) ([]anaconda.Tweet, []data.Action, error) {
processedTweets := []anaconda.Tweet{}
processedActions := []data.Action{}
// From the oldest to the newest
for i := len(tweets) - 1; i >= 0; i-- {
t := tweets[i]
match, err := c.CheckTweet(t, v, l, a.cache)
if err != nil {
return nil, nil, utils.WithStack(err)
}
if match {
done := a.cache.GetTweetAction(t.Id)
undone := action.Sub(done)
err = a.processTweet(t, undone, slack)
if err != nil {
return nil, nil, utils.WithStack(err)
}
processedTweets = append(processedTweets, t)
processedActions = append(processedActions, undone)
}
err = pp.Process(t, match) | if err != nil {
return nil, nil, utils.WithStack(err)
}
}
return processedTweets, processedActions, nil
}
func (a *TwitterAPI) processTweet(
t anaconda.Tweet,
action data.Action,
slack *SlackAPI,
) error {
if action.Twitter.Retweet && !t.Retweeted {
var id int64
if t.RetweetedStatus == nil {
id = t.Id
} else {
id = t.RetweetedStatus.Id
}
_, err := a.api.Retweet(id, false)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if action.Twitter.Favorite && !t.Favorited {
id := t.Id
_, err := a.api.Favorite(id)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
for _, col := range action.Twitter.Collections {
err := a.collectTweet(t, col)
if CheckTwitterError(err) {
return utils.WithStack(err)
}
}
if slack.Enabled() {
for _, ch := range action.Slack.Channels {
err := slack.PostTweet(ch, t)
if CheckSlackError(err) {
return utils.WithStack(err)
}
}
}
return nil
}
func (a *TwitterAPI) collectTweet(tweet anaconda.Tweet, collection string) error {
self, err := a.GetSelf()
if err != nil {
return utils.WithStack(err)
}
list, err := a.api.GetCollectionListByUserId(self.Id, nil)
if err != nil {
return utils.WithStack(err)
}
exists := false
var id string
for i, t := range list.Objects.Timelines {
if collection == t.Name {
exists = true
id = i
break
}
}
if !exists {
col, err := a.api.CreateCollection(collection, nil)
if err != nil {
return utils.WithStack(err)
}
id = col.Response.TimelineId
}
_, err = a.api.AddEntryToCollection(id, tweet.Id, nil)
if err != nil {
return utils.WithStack(err)
}
return nil
}
func (a *TwitterAPI) GetSearch(query string, url url.Values) (anaconda.SearchResponse, error) {
return a.api.GetSearch(query, url)
}
func (a *TwitterAPI) GetUserSearch(searchTerm string, v url.Values) ([]anaconda.User, error) {
return a.api.GetUserSearch(searchTerm, v)
}
func (a *TwitterAPI) GetFavorites(vals url.Values) ([]anaconda.Tweet, error) {
return a.api.GetFavorites(vals)
}
type TwitterUserListener struct {
stream *anaconda.Stream
api *TwitterAPI
vis VisionMatcher
lang LanguageMatcher
slack *SlackAPI
cache data.Cache
}
// ListenUsers listens timelines of the friends
func (a *TwitterAPI) ListenUsers(
v url.Values,
vis VisionMatcher,
lang LanguageMatcher,
slack *SlackAPI,
cache data.Cache,
) (*TwitterUserListener, error) {
if v == nil {
v = url.Values{}
}
names := a.config.GetTwitterScreenNames()
usernames := strings.Join(names, ",")
if len(usernames) == 0 {
return nil, errors.New("No user specified")
} else {
users, err := a.api.GetUsersLookup(usernames, nil)
if err != nil {
return nil, utils.WithStack(err)
}
userids := []string{}
for _, u := range users {
userids = append(userids, u.IdStr)
}
v.Set("follow", strings.Join(userids, ","))
stream := a.api.PublicStreamFilter(v)
return &TwitterUserListener{stream, a, vis, lang, slack, cache}, nil
}
}
func (l *TwitterUserListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
for {
select {
case msg := <-l.stream.C:
err := l.processMessage(msg, outChan)
if err != nil {
return utils.WithStack(err)
}
case <-ctx.Done():
return nil
}
}
}
func (l *TwitterUserListener) processMessage(msg interface{}, outChan chan<- interface{}) error {
switch m := msg.(type) {
case anaconda.Tweet:
name := m.User.ScreenName
timelines := l.api.config.GetTwitterTimelinesByScreenName(name)
if len(timelines) != 0 {
outChan <- NewReceivedEvent(TwitterEventType, "tweet", m)
}
for _, timeline := range timelines {
if !checkTweetByTimelineConfig(m, timeline) {
continue
}
match, err := timeline.Filter.CheckTweet(m, l.vis, l.lang, l.cache)
if err != nil {
return utils.WithStack(err)
}
if !match {
continue
}
done := l.api.cache.GetTweetAction(m.Id)
undone := timeline.Action.Sub(done)
if err := l.api.processTweet(m, undone, l.slack); err != nil {
return utils.WithStack(err)
}
outChan <- NewActionEvent(undone, m)
l.api.cache.SetLatestTweetID(name, m.Id)
}
err := l.api.cache.Save()
if err != nil {
return utils.WithStack(err)
}
}
return nil
}
func (l *TwitterUserListener) Stop() {
l.stream.Stop()
}
func checkTweetByTimelineConfig(t anaconda.Tweet, c TimelineConfig) bool {
if c.ExcludeReplies && t.InReplyToScreenName != "" {
return false
}
if !c.IncludeRts && t.RetweetedStatus != nil {
return false
}
return true
}
type TwitterDMListener struct {
stream *anaconda.Stream
api *TwitterAPI
}
// ListenMyself listens to the authenticated user by Twitter's User Streaming
// API and reacts with direct messages.
func (a *TwitterAPI) ListenMyself(v url.Values) (*TwitterDMListener, error) {
ok, err := a.VerifyCredentials()
if err != nil {
return nil, utils.WithStack(err)
} else if !ok {
return nil, errors.New("Twitter Account Verification failed")
}
stream := a.api.UserStream(v)
return &TwitterDMListener{stream, a}, nil
}
func (l *TwitterDMListener) Listen(ctx context.Context, outChan chan<- interface{}) error {
// TODO: Twitter User Stream API has been retired, so I temporarily disable this feature.
// Later I completely remove this feature.
// https://developer.twitter.com/en/docs/twitter-api/enterprise/account-activity-api/migration/us-ss-migration-guide
return nil
// for {
// select {
// case msg := <-l.stream.C:
// switch c := msg.(type) {
// case anaconda.DirectMessage:
// outChan <- NewReceivedEvent(TwitterEventType, "DM", c)
// // TODO: Handle direct messages in the same way as the other sources
// id := l.api.cache.GetLatestDMID()
// if id < c.Id {
// l.api.cache.SetLatestDMID(c.Id)
// }
// err := l.api.cache.Save()
// if err != nil {
// return utils.WithStack(err)
// }
// }
// case <-ctx.Done():
// return nil
// }
// }
}
func (l *TwitterDMListener) Stop() {
l.stream.Stop()
}
// TweetChecker function checks if the specified tweet is acceptable, which means it
// should be retweeted.
type TweetChecker interface {
CheckTweet(t anaconda.Tweet, v VisionMatcher, l LanguageMatcher, c data.Cache) (bool, error)
ShouldRepeat() bool
}
func CheckTwitterError(err error) bool {
if err == nil {
return false
}
switch twitterErr := err.(type) {
case *anaconda.TwitterError:
// https://developer.twitter.com/ja/docs/basics/response-codes
// 130: Over capacity
// 131: Internal error
// 139: You have already favorited this status.
// 187: The status text has already been Tweeted by the authenticated account.
// 327: You have already retweeted this tweet.
switch twitterErr.Code {
case 130, 131, 139, 187, 327:
return false
}
case anaconda.TwitterError:
return CheckTwitterError(&twitterErr)
case *anaconda.ApiError:
code := twitterErr.StatusCode
// Status code 5?? means server error
if code >= 500 && code < 600 {
return false
}
for _, e := range twitterErr.Decoded.Errors {
if CheckTwitterError(e) {
return true
}
}
return false
case anaconda.ApiError:
return CheckTwitterError(&twitterErr)
}
return true
}
func TwitterStatusURL(t anaconda.Tweet) string {
srcFmt := "https://twitter.com/%s/status/%s"
return fmt.Sprintf(srcFmt, t.User.IdStr, t.IdStr)
} | random_line_split | |
wavelet_tree_pointer.rs | use bio::data_structures::rank_select::RankSelect;
use bv::BitVec;
use bv::BitsMut;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::{ensure, Snafu};
use std::fmt::Debug;
use std::hash::Hash;
use std::ops::Index;
///custom errors for the tree with pointer
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display(
"Es gibt kein 0tes Element, das erste Element wird mit access(1) angesprochen"
))]
Access0,
#[snafu(display("Eingabe darf bei select nicht kleiner als 1 sein"))]
SelectSmaller0,
#[snafu(display("Fehler bei root unwrap in access"))]
RootUnwrapError,
#[snafu(display("Index ist größer als die Länge der Sequence"))]
IndexOutOfBound,
#[snafu(display("Element nicht gefunden"))]
NoSuchElement,
#[snafu(display("Element nicht im Alphabet, Fehler bei select"))]
NotInAlphabet,
#[snafu(display("Das Symbol kommt nicht oft genug im Wort vor"))]
NotEnoughElements,
#[snafu(display("PlatzhalterError"))]
TempError,
}
///representation of the WaveletTree
#[derive(Serialize, Deserialize)]
pub struct WaveletTree<T> {
//The alphabet of the sequence the tree is build from
alphabet: Vec<T>,
//the first node that holds a bitmap over the entire sequence
root: Option<Box<BinNode>>,
}
///representation of the nodes in the tree,
///they are managed by the tree and the user has no direct access
#[derive(Serialize, Deserialize)]
struct BinNode {
///The bitmap stored in the node
value: RankSelect,
///The left Child of the node
left: Option<Box<BinNode>>,
///The right child of the node
right: Option<Box<BinNode>>,
}
///The Iterator for WaveletTrees
pub struct Iterhelper<'de, T> {
position: usize,
tree: &'de WaveletTree<T>,
}
impl<'de, T> WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
/// creates a WaveletTree out of a given sequence
/// * `sequence` - the sequence that is representet in the tree
pub fn create<S: Clone + Iterator<Item = T>>(sequence: S) -> WaveletTree<T> {
let mut sequence = sequence.peekable();
if sequence.peek().is_none() {
panic!("Die übergebene Sequence ist leer!")
};
let seqvec = sequence.clone().collect::<Vec<_>>();
let mut alphabet: Vec<T> = Vec::new();
alphabet.extend(sequence.unique());
alphabet.sort();
let alphslice = &alphabet[..];
WaveletTree {
root: Some(Box::new(BinNode::create_node(alphslice, seqvec))),
alphabet: alphabet,
}
}
///Returns the element at index, or an error if something goes wrong.
///To make the use of this funktion more intuitiv index starts at 1, so if you want the xth element you can call access(x)
pub fn access(&self, index: usize) -> Result<T, Error> {
ensure!(index > 0, Access0);
// Abfangen von fehlerhafter Eingabe, Index ist größer als Sequenz
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
ensure!(z.len() >= index as u64, IndexOutOfBound);
let z = match &self.root {
Some(x) => x.access((index - 1) as u64, 0, self.alphabet.len() - 1),
None => return Err(Error::RootUnwrapError),
};
match z {
Some(x) => Ok(self.alphabet[x]),
None => return Err(Error::NoSuchElement),
}
}
fn access_ref(&self, index: usize) -> &T {
let result = match self.access(index) {
Ok(x) => x,
Err(_) => panic!("Index out of Bounds"),
};
for i in 0..self.alphabet.len() {
if self.alphabet[i] == result {
return &self.alphabet[i];
}
}
panic!("Index in Bounds but not found");
}
///Returns the the position of the index'th occurence of the character
pub fn select(&self, character: T, index: usize) -> Result<u64, Error> {
// Abfangen von fehlerhafter Eingabe, Index darf hier nicht 0 sein
ensure!(index > 0, SelectSmaller0);
//------------------------
let character_index1 = &self.alphabet.binary_search(&character); // speichere an welchem index steht das gesuchte zeichen im alphabet steht
let character_index = match character_index1 {
Ok(x) => x,
Err(_) => return Err(Error::NotInAlphabet),
};
//Abfangen dass der Buchstabe nicht index oft vorkommt
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
if &self.rank(character, z.len() as usize).unwrap() < &(index as u64) {
return Err(Error::NotEnoughElements);
}
let result = match &self.root {
Some(x) => x.select(index as u64, character_index, 0, self.alphabet.len() - 1),
None => return Err(Error::TempError), //Err("Fehler"),
};
match result {
Some(x) => return Ok(x + 1),
None => return Err(Error::TempError),
}
}
/// Returns the number of occurences of the character in the Intervall [1..index].
pub fn rank(&self, character: T, index: usize) -> Result<u64, Error> {
if index < 1 {
return Ok(0);
}
let index = index - 1;
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
// Abfangen von fehlerhafter Eingabe, Index ist größer als Sequenz
ensure!(z.len() > index as u64, IndexOutOfBound);
//---------------------------------
let character_index1 = &self.alphabet.binary_search(&character); // speichere an welchem index das gesuchte zeichen im alphabet steht
let character_index = match character_index1 {
Ok(x) => x,
Err(_) => return Ok(0), //element nicht in alphabet => gib 0 zurück
};
let result = match &self.root {
Some(x) => (*x).rank(index as u64, character_index, 0, &self.alphabet.len() - 1),
None => return Err(Error::NoSuchElement),
};
match result {
Some(x) => return Ok(x),
None => return Err(Error::NoSuchElement),
}
}
/// Returns a Vector that holds the sequence, this does not consume the tree
pub fn rebuild(&'de self) -> Vec<T> {
let mut result: Vec<T> = Vec::new();
for x in self.into_iter() {
result.push(x);
}
result
}
///Returns the length of the sequence or an error if the root is missing
pub fn len(&self) -> Result<u64, Error> {
let root = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
Ok(root.len())
}
///Returns the lenght of the alphabet
pub fn alphabet_len(&self) -> usize {
self.alphabet.len()
}
}
///Implements the Index Trait to allow access with [index], since it uses the access function index starts at 1
impl<'de, T> Index<usize> for WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
&self.access_ref(index)
}
}
impl BinNode {
fn create_no | + Clone + Ord + Debug>(alphabet: &[E], sequence: Vec<E>) -> BinNode {
let count = sequence.len();
if alphabet.len() <= 1 {
let value = BitVec::new_fill(true, count as u64);
BinNode {
value: RankSelect::new(value, 1),
left: None,
right: None,
}
} else {
let mut value = BitVec::new_fill(false, count as u64);
let mid = (alphabet.len() + 1) / 2;
//Das Alphabet wird geteilt, die 2. Hälfte wird in alphabet2 gespeichert
let (alphabet1, alphabet2) = alphabet.split_at(mid);
//Die Sequenzen für den nächsten Schritt
let mut sequence1 = Vec::new();
let mut sequence2 = Vec::new();
//Es werden alle Elemente der Sequenz durchegangen
for x in 0..(sequence.len()) {
//wenn sie in der 2. Hälfte des Alphabets sind wird ihr Eintrag in der Bitmap auf 1 gesetzt
if alphabet2.contains(&sequence[x]) {
value.set_bit(x as u64, true)
}
}
//Group_by teilt in Gruppen key ist true wenn Zeichen in alphabet1, sonst false
for (key, group) in &sequence
.into_iter()
.group_by(|elem| alphabet1.contains(&elem))
{
//neue Sequencen werden anhand der Keys gebaut
if key {
sequence1.extend(group)
} else {
sequence2.extend(group)
}
}
BinNode {
value: RankSelect::new(value, 1),
left: Some(Box::new(BinNode::create_node(alphabet1, sequence1))),
right: Some(Box::new(BinNode::create_node(alphabet2, sequence2))),
}
}
}
fn access(&self, index: u64, min: usize, max: usize) -> Option<usize> {
if min == max {
return Some(min);
} else {
if self.value.get((index) as u64) {
let next_index = self.value.rank((index) as u64).unwrap();
match &self.right {
Some(x) => return (*x).access(next_index - 1, 1 + (min + max) / 2, max),
None => return None,
}
} else {
let next_index = self.value.rank_0((index) as u64).unwrap();
match &self.left {
Some(x) => return (*x).access(next_index - 1, min, (min + max) / 2),
None => return None,
}
}
}
}
fn select(&self, index: u64, character: &usize, min: usize, max: usize) -> Option<(u64)> {
//Blatt erreicht
if min == max {
return Some(index - 1);
}
// Position wird in Index umgerechnet, da Eingabe mit Position erfolgt
else {
if character <= &((max + min) / 2) {
let result = match &self.left {
Some(x) => (*x).select(index, character, min, (min + max) / 2),
None => return None,
};
let new_index = match result {
Some(x) => x,
None => return None,
};
return self.value.select_0(new_index + 1); //+1 da Index in Position umgerechnet wird
} else {
let result = match &self.right {
Some(x) => (*x).select(index, character, (min + max) / 2 + 1, max),
None => return None,
};
let new_index = match result {
Some(x) => x,
None => return None,
};
return self.value.select_1(new_index + 1); //+1 da Index in Position umgerechnet wird
}
}
}
fn rank(&self, index: u64, character: &usize, min: usize, max: usize) -> Option<u64> {
if min == max {
return Some(index + 1);
}
//Wenn nicht im blatt
else {
if character <= &((max + min) / 2) {
let next_index = self.value.rank_0((index) as u64).unwrap();
match &self.left {
Some(x) => return (*x).rank(next_index - 1, character, min, (min + max) / 2),
None => return None,
}
} else {
let next_index = self.value.rank((index) as u64).unwrap();
match &self.right {
Some(x) => {
return (*x).rank(next_index - 1, character, ((min + max) / 2) + 1, max);
}
None => return None,
}
}
}
}
fn len(&self) -> u64 {
self.value.bits().len()
}
}
///Implements a non-consuming Iterator for the WaveletTree
impl<'de, T> IntoIterator for &'de WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Item = T;
type IntoIter = Iterhelper<'de, T>;
fn into_iter(self) -> Self::IntoIter {
Iterhelper {
position: 0,
tree: self,
}
}
}
impl<'de, T> Iterator for Iterhelper<'de, T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.position += 1;
let len = match self.tree.len() {
Ok(x) => x,
Err(_) => return None,
};
if self.position <= len as usize {
match self.tree.access(self.position) {
Ok(x) => return Some(x),
Err(_) => return None,
};
} else {
None
}
}
}
| de<E: Hash | identifier_name |
wavelet_tree_pointer.rs | use bio::data_structures::rank_select::RankSelect;
use bv::BitVec;
use bv::BitsMut;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::{ensure, Snafu};
use std::fmt::Debug;
use std::hash::Hash;
use std::ops::Index;
///custom errors for the tree with pointer
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display(
"Es gibt kein 0tes Element, das erste Element wird mit access(1) angesprochen"
))]
Access0,
#[snafu(display("Eingabe darf bei select nicht kleiner als 1 sein"))]
SelectSmaller0,
#[snafu(display("Fehler bei root unwrap in access"))]
RootUnwrapError,
#[snafu(display("Index ist größer als die Länge der Sequence"))]
IndexOutOfBound,
#[snafu(display("Element nicht gefunden"))]
NoSuchElement,
#[snafu(display("Element nicht im Alphabet, Fehler bei select"))]
NotInAlphabet,
#[snafu(display("Das Symbol kommt nicht oft genug im Wort vor"))]
NotEnoughElements,
#[snafu(display("PlatzhalterError"))]
TempError,
}
///representation of the WaveletTree
#[derive(Serialize, Deserialize)]
pub struct WaveletTree<T> {
//The alphabet of the sequence the tree is build from
alphabet: Vec<T>,
//the first node that holds a bitmap over the entire sequence
root: Option<Box<BinNode>>,
}
///representation of the nodes in the tree,
///they are managed by the tree and the user has no direct access
#[derive(Serialize, Deserialize)]
struct BinNode {
///The bitmap stored in the node
value: RankSelect,
///The left Child of the node
left: Option<Box<BinNode>>,
///The right child of the node
right: Option<Box<BinNode>>,
}
///The Iterator for WaveletTrees
pub struct Iterhelper<'de, T> {
position: usize,
tree: &'de WaveletTree<T>,
}
impl<'de, T> WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
/// creates a WaveletTree out of a given sequence
/// * `sequence` - the sequence that is representet in the tree
pub fn create<S: Clone + Iterator<Item = T>>(sequence: S) -> WaveletTree<T> {
let mut sequence = sequence.peekable();
if sequence.peek().is_none() {
panic!("Die übergebene Sequence ist leer!")
};
let seqvec = sequence.clone().collect::<Vec<_>>();
let mut alphabet: Vec<T> = Vec::new();
alphabet.extend(sequence.unique());
alphabet.sort();
let alphslice = &alphabet[..];
WaveletTree {
root: Some(Box::new(BinNode::create_node(alphslice, seqvec))),
alphabet: alphabet,
}
}
///Returns the element at index, or an error if something goes wrong.
///To make the use of this funktion more intuitiv index starts at 1, so if you want the xth element you can call access(x)
pub fn access(&self, index: usize) -> Result<T, Error> {
ensure!(index > 0, Access0);
// Abfangen von fehlerhafter Eingabe, Index ist größer als Sequenz
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
ensure!(z.len() >= index as u64, IndexOutOfBound);
let z = match &self.root {
Some(x) => x.access((index - 1) as u64, 0, self.alphabet.len() - 1),
None => return Err(Error::RootUnwrapError),
};
match z {
Some(x) => Ok(self.alphabet[x]),
None => return Err(Error::NoSuchElement),
}
}
fn access_ref(&self, index: usize) -> &T {
let result = match self.access(index) {
Ok(x) => x,
Err(_) => panic!("Index out of Bounds"),
};
for i in 0..self.alphabet.len() {
if self.alphabet[i] == result {
return &self.alphabet[i];
}
}
panic!("Index in Bounds but not found");
}
///Returns the the position of the index'th occurence of the character
pub fn select(&self, character: T, index: usize) -> Result<u64, Error> {
// Abfangen von fehlerhafter Eingabe, Index darf hier nicht 0 sein
ensure!(index > 0, SelectSmaller0);
//------------------------
let character_index1 = &self.alphabet.binary_search(&character); // speichere an welchem index steht das gesuchte zeichen im alphabet steht
let character_index = match character_index1 {
Ok(x) => x,
Err(_) => return Err(Error::NotInAlphabet),
};
//Abfangen dass der Buchstabe nicht index oft vorkommt
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
if &self.rank(character, z.len() as usize).unwrap() < &(index as u64) {
return Err(Error::NotEnoughElements);
}
let result = match &self.root {
Some(x) => x.select(index as u64, character_index, 0, self.alphabet.len() - 1),
None => return Err(Error::TempError), //Err("Fehler"),
};
match result {
Some(x) => return Ok(x + 1),
None => return Err(Error::TempError),
}
}
/// Returns the number of occurences of the character in the Intervall [1..index].
pub fn rank(&self, character: T, index: usize) -> Result<u64, Error> {
if index < 1 {
return Ok(0);
}
let index = index - 1;
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
// Abfangen von fehlerhafter Eingabe, Index ist größer als Sequenz
ensure!(z.len() > index as u64, IndexOutOfBound);
//---------------------------------
let character_index1 = &self.alphabet.binary_search(&character); // speichere an welchem index das gesuchte zeichen im alphabet steht
let character_index = match character_index1 {
Ok(x) => x,
Err(_) => return Ok(0), //element nicht in alphabet => gib 0 zurück
};
let result = match &self.root {
Some(x) => (*x).rank(index as u64, character_index, 0, &self.alphabet.len() - 1),
None => return Err(Error::NoSuchElement),
};
match result {
Some(x) => return Ok(x),
None => return Err(Error::NoSuchElement),
}
}
/// Returns a Vector that holds the sequence, this does not consume the tree
pub fn rebuild(&'de self) -> Vec<T> {
let mut result: Vec<T> = Vec::new();
for x in self.into_iter() {
result.push(x);
}
result
}
///Returns the length of the sequence or an error if the root is missing
pub fn len(&self) -> Result<u64, Error> {
let root = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
Ok(root.len())
}
///Returns the lenght of the alphabet
pub fn alphabet_len(&self) -> usize {
self.alphabet.len()
}
}
///Implements the Index Trait to allow access with [index], since it uses the access function index starts at 1
impl<'de, T> Index<usize> for WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
&self.access_ref(index)
}
}
impl BinNode {
fn create_node<E: Hash + Clone + Ord + Debug>(alphabet: &[E], sequence: Vec<E>) -> BinNode {
let count = sequence.len();
if alphabet.len() <= 1 {
let value = BitVec::new_fill(true, count as u64);
BinNode {
value: RankSelect::new(value, 1),
left: None,
right: None,
}
} else {
let mut value = BitVec::new_fill(false, count as u64);
let mid = (alphabet.len() + 1) / 2;
//Das Alphabet wird geteilt, die 2. Hälfte wird in alphabet2 gespeichert
let (alphabet1, alphabet2) = alphabet.split_at(mid);
//Die Sequenzen für den nächsten Schritt
let mut sequence1 = Vec::new();
let mut sequence2 = Vec::new();
//Es werden alle Elemente der Sequenz durchegangen
for x in 0..(sequence.len()) {
//wenn sie in der 2. Hälfte des Alphabets sind wird ihr Eintrag in der Bitmap auf 1 gesetzt
if alphabet2.contains(&sequence[x]) {
value.set_bit(x as u64, true)
}
}
//Group_by teilt in Gruppen key ist true wenn Zeichen in alphabet1, sonst false
for (key, group) in &sequence
.into_iter()
.group_by(|elem| alphabet1.contains(&elem))
{
//neue Sequencen werden anhand der Keys gebaut
if key {
sequence1.extend(group)
} else {
sequence2.extend(group)
}
}
BinNode {
value: RankSelect::new(value, 1),
left: Some(Box::new(BinNode::create_node(alphabet1, sequence1))),
right: Some(Box::new(BinNode::create_node(alphabet2, sequence2))),
}
}
}
fn access(&self, index: u64, min: usize, max: usize) -> Option<usize> {
if min == max {
return Some(min);
} else {
if self.value.get((index) as u64) {
let next_index = self.value.rank((index) as u64).unwrap();
match &self.right {
Some(x) => return (*x).access(next_index - 1, 1 + (min + max) / 2, max),
None => return None,
}
} else {
let next_index = self.value.rank_0((index) as u64).unwrap();
match &self.left {
Some(x) => return (*x).access(next_index - 1, min, (min + max) / 2),
None => return None,
}
}
}
}
fn select(&self, index: u64, character: &usize, min: usize, max: usize) -> Option<(u64)> {
//Blatt erreicht
if min == max {
return Some(index - 1);
}
// Position wird in Index umgerechnet, da Eingabe mit Position erfolgt
else {
if character <= &((max + min) / 2) {
let result = match &self.left {
Some(x) => (*x).select(index, character, min, (min + max) / 2),
None => return None,
};
let new_index = match result {
Some(x) => x,
None => return None,
};
return self.value.select_0(new_index + 1); //+1 da Index in Position umgerechnet wird
} else {
let result = match &self.right {
Some(x) => (*x).select(index, character, (min + max) / 2 + 1, max),
None => return None,
};
let new_index = match result {
Some(x) => x,
None => return None,
};
return self.value.select_1(new_index + 1); //+1 da Index in Position umgerechnet wird
}
}
}
fn rank(&self, index: u64, character: &usize, min: usize, max: usize) -> Option<u64> {
if min == max {
return Some(index + 1);
}
//Wenn nicht im blatt
else {
if character <= &((max + min) / 2) {
let next_index = self.value.rank_0((index) as u64).unwrap();
match &self.left {
Some(x) => return (*x).rank(next_index - 1, character, min, (min + max) / 2),
None => return None,
}
} else {
let next_index = self.value.rank((index) as u64).unwrap();
match &self.right {
Some(x) => {
return (*x).rank(next_index - 1, character, ((min + max) / 2) + 1, max);
}
None => return None,
}
}
}
}
fn len(&self) -> u64 {
sel | nts a non-consuming Iterator for the WaveletTree
impl<'de, T> IntoIterator for &'de WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Item = T;
type IntoIter = Iterhelper<'de, T>;
fn into_iter(self) -> Self::IntoIter {
Iterhelper {
position: 0,
tree: self,
}
}
}
impl<'de, T> Iterator for Iterhelper<'de, T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.position += 1;
let len = match self.tree.len() {
Ok(x) => x,
Err(_) => return None,
};
if self.position <= len as usize {
match self.tree.access(self.position) {
Ok(x) => return Some(x),
Err(_) => return None,
};
} else {
None
}
}
}
| f.value.bits().len()
}
}
///Impleme | identifier_body |
wavelet_tree_pointer.rs | use bio::data_structures::rank_select::RankSelect;
use bv::BitVec;
use bv::BitsMut;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::{ensure, Snafu};
use std::fmt::Debug;
use std::hash::Hash;
use std::ops::Index;
///custom errors for the tree with pointer
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display(
"Es gibt kein 0tes Element, das erste Element wird mit access(1) angesprochen"
))]
Access0,
#[snafu(display("Eingabe darf bei select nicht kleiner als 1 sein"))]
SelectSmaller0,
#[snafu(display("Fehler bei root unwrap in access"))]
RootUnwrapError,
#[snafu(display("Index ist größer als die Länge der Sequence"))]
IndexOutOfBound,
#[snafu(display("Element nicht gefunden"))]
NoSuchElement,
#[snafu(display("Element nicht im Alphabet, Fehler bei select"))]
NotInAlphabet,
#[snafu(display("Das Symbol kommt nicht oft genug im Wort vor"))]
NotEnoughElements,
#[snafu(display("PlatzhalterError"))]
TempError,
}
///representation of the WaveletTree
#[derive(Serialize, Deserialize)]
pub struct WaveletTree<T> {
//The alphabet of the sequence the tree is build from
alphabet: Vec<T>,
//the first node that holds a bitmap over the entire sequence
root: Option<Box<BinNode>>,
}
///representation of the nodes in the tree,
///they are managed by the tree and the user has no direct access
#[derive(Serialize, Deserialize)]
struct BinNode {
///The bitmap stored in the node
value: RankSelect,
///The left Child of the node
left: Option<Box<BinNode>>,
///The right child of the node
right: Option<Box<BinNode>>,
}
///The Iterator for WaveletTrees
pub struct Iterhelper<'de, T> {
position: usize,
tree: &'de WaveletTree<T>,
}
impl<'de, T> WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
/// creates a WaveletTree out of a given sequence
/// * `sequence` - the sequence that is representet in the tree
pub fn create<S: Clone + Iterator<Item = T>>(sequence: S) -> WaveletTree<T> {
let mut sequence = sequence.peekable();
if sequence.peek().is_none() {
panic!("Die übergebene Sequence ist leer!")
};
let seqvec = sequence.clone().collect::<Vec<_>>();
let mut alphabet: Vec<T> = Vec::new();
alphabet.extend(sequence.unique());
alphabet.sort();
let alphslice = &alphabet[..];
WaveletTree {
root: Some(Box::new(BinNode::create_node(alphslice, seqvec))),
alphabet: alphabet,
}
}
///Returns the element at index, or an error if something goes wrong.
///To make the use of this funktion more intuitiv index starts at 1, so if you want the xth element you can call access(x)
pub fn access(&self, index: usize) -> Result<T, Error> {
ensure!(index > 0, Access0);
// Abfangen von fehlerhafter Eingabe, Index ist größer als Sequenz
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
ensure!(z.len() >= index as u64, IndexOutOfBound);
let z = match &self.root {
Some(x) => x.access((index - 1) as u64, 0, self.alphabet.len() - 1),
None => return Err(Error::RootUnwrapError),
};
match z {
Some(x) => Ok(self.alphabet[x]),
None => return Err(Error::NoSuchElement),
}
}
fn access_ref(&self, index: usize) -> &T {
let result = match self.access(index) {
Ok(x) => x,
Err(_) => panic!("Index out of Bounds"),
};
for i in 0..self.alphabet.len() {
if self.alphabet[i] == result {
return &self.alphabet[i];
}
}
panic!("Index in Bounds but not found");
}
///Returns the the position of the index'th occurence of the character
pub fn select(&self, character: T, index: usize) -> Result<u64, Error> {
// Abfangen von fehlerhafter Eingabe, Index darf hier nicht 0 sein
ensure!(index > 0, SelectSmaller0);
//------------------------
let character_index1 = &self.alphabet.binary_search(&character); // speichere an welchem index steht das gesuchte zeichen im alphabet steht
let character_index = match character_index1 {
Ok(x) => x,
Err(_) => return Err(Error::NotInAlphabet),
};
//Abfangen dass der Buchstabe nicht index oft vorkommt
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
if &self.rank(character, z.len() as usize).unwrap() < &(index as u64) {
return Err(Error::NotEnoughElements);
}
let result = match &self.root {
Some(x) => x.select(index as u64, character_index, 0, self.alphabet.len() - 1),
None => return Err(Error::TempError), //Err("Fehler"),
};
match result {
Some(x) => return Ok(x + 1),
None => return Err(Error::TempError),
}
}
/// Returns the number of occurences of the character in the Intervall [1..index].
pub fn rank(&self, character: T, index: usize) -> Result<u64, Error> {
if index < 1 {
return Ok(0);
}
let index = index - 1;
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
// Abfangen von fehlerhafter Eingabe, Index ist größer als Sequenz
ensure!(z.len() > index as u64, IndexOutOfBound);
//---------------------------------
let character_index1 = &self.alphabet.binary_search(&character); // speichere an welchem index das gesuchte zeichen im alphabet steht
let character_index = match character_index1 {
Ok(x) => x,
Err(_) => return Ok(0), //element nicht in alphabet => gib 0 zurück
};
let result = match &self.root {
Some(x) => (*x).rank(index as u64, character_index, 0, &self.alphabet.len() - 1),
None => return Err(Error::NoSuchElement),
};
match result {
Some(x) => return Ok(x),
None => return Err(Error::NoSuchElement),
}
}
/// Returns a Vector that holds the sequence, this does not consume the tree
pub fn rebuild(&'de self) -> Vec<T> {
let mut result: Vec<T> = Vec::new();
for x in self.into_iter() {
result.push(x);
}
result
}
///Returns the length of the sequence or an error if the root is missing
pub fn len(&self) -> Result<u64, Error> {
let root = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
Ok(root.len())
}
///Returns the lenght of the alphabet
pub fn alphabet_len(&self) -> usize {
self.alphabet.len()
}
}
///Implements the Index Trait to allow access with [index], since it uses the access function index starts at 1
impl<'de, T> Index<usize> for WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
&self.access_ref(index)
}
}
impl BinNode {
fn create_node<E: Hash + Clone + Ord + Debug>(alphabet: &[E], sequence: Vec<E>) -> BinNode {
let count = sequence.len();
if alphabet.len() <= 1 {
let value = BitVec::new_fill(true, count as u64);
BinNode {
value: RankSelect::new(value, 1),
left: None,
right: None,
}
} else {
let mut value = BitVec::new_fill(false, count as u64);
let mid = (alphabet.len() + 1) / 2;
//Das Alphabet wird geteilt, die 2. Hälfte wird in alphabet2 gespeichert
let (alphabet1, alphabet2) = alphabet.split_at(mid);
//Die Sequenzen für den nächsten Schritt
let mut sequence1 = Vec::new();
let mut sequence2 = Vec::new();
//Es werden alle Elemente der Sequenz durchegangen
for x in 0..(sequence.len()) {
//wenn sie in der 2. Hälfte des Alphabets sind wird ihr Eintrag in der Bitmap auf 1 gesetzt
if alphabet2.contains(&sequence[x]) {
value.set_bit(x as u64, true)
}
}
//Group_by teilt in Gruppen key ist true wenn Zeichen in alphabet1, sonst false
for (key, group) in &sequence
.into_iter()
.group_by(|elem| alphabet1.contains(&elem))
{
//neue Sequencen werden anhand der Keys gebaut
if key {
sequence1.extend(group)
} else {
sequence2.extend(group)
}
}
BinNode {
value: RankSelect::new(value, 1),
left: Some(Box::new(BinNode::create_node(alphabet1, sequence1))),
right: Some(Box::new(BinNode::create_node(alphabet2, sequence2))),
}
}
}
fn access(&self, index: u64, min: usize, max: usize) -> Option<usize> {
if min == max {
return Some(min);
} else {
if self.value.get((index) as u64) {
let next_index = self.value.rank((index) as u64).unwrap();
match &self.right {
Some(x) => return (*x).access(next_index - 1, 1 + (min + max) / 2, max),
None => return None,
}
} else {
let next_index = self.value.rank_0((index) as u64).unwrap();
match &self.left {
Some(x) => return (*x).access(next_index - 1, min, (min + max) / 2),
None => return None,
}
}
}
}
fn select(&self, index: u64, character: &usize, min: usize, max: usize) -> Option<(u64)> {
//Blatt erreicht
if min == max {
return Some(index - 1);
}
// Position wird in Index umgerechnet, da Eingabe mit Position erfolgt
else {
if character <= &((max + min) / 2) {
let result = match &self.left {
Some(x) => (*x).select(index, character, min, (min + max) / 2),
None => return None,
};
let new_index = match result {
Some(x) => x,
None => return None,
};
return self.value.select_0(new_index + 1); //+1 da Index in Position umgerechnet wird
} else {
let result = match &self.right {
Some(x) => (*x).select(index, character, (min + max) / 2 + 1, max),
None => return None,
};
let new_index = match result {
Some(x) => x,
None => return None,
};
return self.value.select_1(new_index + 1); //+1 da Index in Position umgerechnet wird
}
}
}
fn rank(&self, index: u64, character: &usize, min: usize, max: usize) -> Option<u64> {
if min == max {
return Some(index + 1);
}
//Wenn nicht im blatt
else {
if character <= &((max + min) / 2) {
let next_index = self.value.rank_0((index) as u64).unwrap();
match &self.left {
Some(x) => return (*x).rank(next_index - 1, character, min, (min + max) / 2),
None => return None,
}
} else {
let next_index = self.value.rank((index) as u64).unwrap();
match &self.right {
Some(x) => {
return (*x).rank(next_index - 1, character, ((min + max) / 2) + 1, max);
}
None => return None,
}
}
}
}
fn len(&self) -> u64 {
self.value.bits().len()
}
}
///Implements a non-consuming Iterator for the WaveletTree
impl<'de, T> IntoIterator for &'de WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{ | Iterhelper {
position: 0,
tree: self,
}
}
}
impl<'de, T> Iterator for Iterhelper<'de, T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.position += 1;
let len = match self.tree.len() {
Ok(x) => x,
Err(_) => return None,
};
if self.position <= len as usize {
match self.tree.access(self.position) {
Ok(x) => return Some(x),
Err(_) => return None,
};
} else {
None
}
}
} | type Item = T;
type IntoIter = Iterhelper<'de, T>;
fn into_iter(self) -> Self::IntoIter { | random_line_split |
wavelet_tree_pointer.rs | use bio::data_structures::rank_select::RankSelect;
use bv::BitVec;
use bv::BitsMut;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::{ensure, Snafu};
use std::fmt::Debug;
use std::hash::Hash;
use std::ops::Index;
///custom errors for the tree with pointer
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display(
"Es gibt kein 0tes Element, das erste Element wird mit access(1) angesprochen"
))]
Access0,
#[snafu(display("Eingabe darf bei select nicht kleiner als 1 sein"))]
SelectSmaller0,
#[snafu(display("Fehler bei root unwrap in access"))]
RootUnwrapError,
#[snafu(display("Index ist größer als die Länge der Sequence"))]
IndexOutOfBound,
#[snafu(display("Element nicht gefunden"))]
NoSuchElement,
#[snafu(display("Element nicht im Alphabet, Fehler bei select"))]
NotInAlphabet,
#[snafu(display("Das Symbol kommt nicht oft genug im Wort vor"))]
NotEnoughElements,
#[snafu(display("PlatzhalterError"))]
TempError,
}
///representation of the WaveletTree
#[derive(Serialize, Deserialize)]
pub struct WaveletTree<T> {
//The alphabet of the sequence the tree is build from
alphabet: Vec<T>,
//the first node that holds a bitmap over the entire sequence
root: Option<Box<BinNode>>,
}
///representation of the nodes in the tree,
///they are managed by the tree and the user has no direct access
#[derive(Serialize, Deserialize)]
struct BinNode {
///The bitmap stored in the node
value: RankSelect,
///The left Child of the node
left: Option<Box<BinNode>>,
///The right child of the node
right: Option<Box<BinNode>>,
}
///The Iterator for WaveletTrees
pub struct Iterhelper<'de, T> {
position: usize,
tree: &'de WaveletTree<T>,
}
impl<'de, T> WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
/// creates a WaveletTree out of a given sequence
/// * `sequence` - the sequence that is representet in the tree
pub fn create<S: Clone + Iterator<Item = T>>(sequence: S) -> WaveletTree<T> {
let mut sequence = sequence.peekable();
if sequence.peek().is_none() {
panic!("Die übergebene Sequence ist leer!")
};
let seqvec = sequence.clone().collect::<Vec<_>>();
let mut alphabet: Vec<T> = Vec::new();
alphabet.extend(sequence.unique());
alphabet.sort();
let alphslice = &alphabet[..];
WaveletTree {
root: Some(Box::new(BinNode::create_node(alphslice, seqvec))),
alphabet: alphabet,
}
}
///Returns the element at index, or an error if something goes wrong.
///To make the use of this funktion more intuitiv index starts at 1, so if you want the xth element you can call access(x)
pub fn access(&self, index: usize) -> Result<T, Error> {
ensure!(index > 0, Access0);
// Abfangen von fehlerhafter Eingabe, Index ist größer als Sequenz
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
ensure!(z.len() >= index as u64, IndexOutOfBound);
let z = match &self.root {
Some(x) => x.access((index - 1) as u64, 0, self.alphabet.len() - 1),
None => return Err(Error::RootUnwrapError),
};
match z {
Some(x) => Ok(self.alphabet[x]),
None => return Err(Error::NoSuchElement),
}
}
fn access_ref(&self, index: usize) -> &T {
let result = match self.access(index) {
Ok(x) => x,
Err(_) => panic!("Index out of Bounds"),
};
for i in 0..self.alphabet.len() {
if self.alphabet[i] == result {
return &self.alphabet[i];
}
}
panic!("Index in Bounds but not found");
}
///Returns the the position of the index'th occurence of the character
pub fn select(&self, character: T, index: usize) -> Result<u64, Error> {
// Abfangen von fehlerhafter Eingabe, Index darf hier nicht 0 sein
ensure!(index > 0, SelectSmaller0);
//------------------------
let character_index1 = &self.alphabet.binary_search(&character); // speichere an welchem index steht das gesuchte zeichen im alphabet steht
let character_index = match character_index1 {
Ok(x) => x,
Err(_) => return Err(Error::NotInAlphabet),
};
//Abfangen dass der Buchstabe nicht index oft vorkommt
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
if &self.rank(character, z.len() as usize).unwrap() < &(index as u64) {
return Err(Error::NotEnoughElements);
}
let result = match &self.root {
Some(x) => x.select(index as u64, character_index, 0, self.alphabet.len() - 1),
None => return Err(Error::TempError), //Err("Fehler"),
};
match result {
Some(x) => return Ok(x + 1),
None => return Err(Error::TempError),
}
}
/// Returns the number of occurences of the character in the Intervall [1..index].
pub fn rank(&self, character: T, index: usize) -> Result<u64, Error> {
if index < 1 {
return Ok(0);
}
let index = index - 1;
let z = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
// Abfangen von fehlerhafter Eingabe, Index ist größer als Sequenz
ensure!(z.len() > index as u64, IndexOutOfBound);
//---------------------------------
let character_index1 = &self.alphabet.binary_search(&character); // speichere an welchem index das gesuchte zeichen im alphabet steht
let character_index = match character_index1 {
Ok(x) => x,
Err(_) => return Ok(0), //element nicht in alphabet => gib 0 zurück
};
let result = match &self.root {
Some(x) => (*x).rank(index as u64, character_index, 0, &self.alphabet.len() - 1),
None => return Err(Error::NoSuchElement),
};
match result {
Some(x) => return Ok(x),
None => return Err(Error::NoSuchElement),
}
}
/// Returns a Vector that holds the sequence, this does not consume the tree
pub fn rebuild(&'de self) -> Vec<T> {
let mut result: Vec<T> = Vec::new();
for x in self.into_iter() {
result.push(x);
}
result
}
///Returns the length of the sequence or an error if the root is missing
pub fn len(&self) -> Result<u64, Error> {
let root = match &self.root {
Some(x) => x,
None => return Err(Error::RootUnwrapError),
};
Ok(root.len())
}
///Returns the lenght of the alphabet
pub fn alphabet_len(&self) -> usize {
self.alphabet.len()
}
}
///Implements the Index Trait to allow access with [index], since it uses the access function index starts at 1
impl<'de, T> Index<usize> for WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
&self.access_ref(index)
}
}
impl BinNode {
fn create_node<E: Hash + Clone + Ord + Debug>(alphabet: &[E], sequence: Vec<E>) -> BinNode {
let count = sequence.len();
if alphabet.len() <= 1 {
let value = BitVec::new_fill(true, count as u64);
BinNode {
value: RankSelect::new(value, 1),
left: None,
right: None,
}
} else {
let mut value = BitVec::new_fill(false, count as u64);
let mid = (alphabet.len() + 1) / 2;
//Das Alphabet wird geteilt, die 2. Hälfte wird in alphabet2 gespeichert
let (alphabet1, alphabet2) = alphabet.split_at(mid);
//Die Sequenzen für den nächsten Schritt
let mut sequence1 = Vec::new();
let mut sequence2 = Vec::new();
//Es werden alle Elemente der Sequenz durchegangen
for x in 0..(sequence.len()) {
//wenn sie in der 2. Hälfte des Alphabets sind wird ihr Eintrag in der Bitmap auf 1 gesetzt
if alphabet2.contains(&sequence[x]) {
value.set_bit(x as u64, true)
}
}
//Group_by teilt in Gruppen key ist true wenn Zeichen in alphabet1, sonst false
for (key, group) in &sequence
.into_iter()
.group_by(|elem| alphabet1.contains(&elem))
{
//neue Sequencen werden anhand der Keys gebaut
if key {
sequence1.extend(group)
} else {
sequence2.extend(group)
}
}
BinNode {
value: RankSelect::new(value, 1),
left: Some(Box::new(BinNode::create_node(alphabet1, sequence1))),
right: Some(Box::new(BinNode::create_node(alphabet2, sequence2))),
}
}
}
fn access(&self, index: u64, min: usize, max: usize) -> Option<usize> {
if min == max {
return Some(min);
} else {
if self.value.get((index) as u64) {
let next_index = self.value.rank((index) as u64).unwrap();
match &self.right {
Some(x) => return (*x).access(next_index - 1, 1 + (min + max) / 2, max),
None => return None,
}
} else {
let next_index = self.value.rank_0((index) as u64).unwrap();
match &self.left {
Some(x) => return (*x).access(next_index - 1, min, (min + max) / 2),
None => return None,
}
}
}
}
fn select(&self, index: u64, character: &usize, min: usize, max: usize) -> Option<(u64)> {
//Blatt erreicht
if min == max {
return Some(index - 1);
}
// Position wird in Index umgerechnet, da Eingabe mit Position erfolgt
else {
if character <= &((max + min) / 2) {
let result = match &self.left {
Some(x) => (*x).select(index, character, min, (min + max) / 2),
None => return None,
};
let new_index = match result {
Some(x) => x,
None => return None,
};
return self.value.select_0(new_index + 1); //+1 da Index in Position umgerechnet wird
} else {
let result = match &self.right {
Some(x) => (*x).select(index, character, (min + max) / 2 + 1, max),
None => return None,
};
let new_index = match result {
Some(x) => x,
None => return None,
};
return self.value.select_1(new_index + 1); //+1 da Index in Position umgerechnet wird
}
}
}
fn rank(&self, index: u64, character: &usize, min: usize, max: usize) -> Option<u64> {
if min == max {
return Some(index + 1);
}
//Wenn nicht im blatt
else {
if character <= &((max + min) / 2) {
let next_index = self.value.rank_0((index) as u64).unwrap();
match &self.left {
Some(x) => return (*x).rank(next_index - 1, character, min, (min + max) / 2),
None => return None,
}
} else {
let next_index = self.value.rank((index) as u64).unwrap();
match &self.right {
Some(x) => {
return (*x).rank(next_index - 1, character, ((min + max) / 2) + 1, max);
}
None => return None,
}
}
}
}
fn len(&self) -> u64 {
self.value.bits().len()
}
}
///Implements a non-consuming Iterator for the WaveletTree
impl<'de, T> IntoIterator for &'de WaveletTree<T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Item = T;
type IntoIter = Iterhelper<'de, T>;
fn into_iter(self) -> Self::IntoIter {
Iterhelper {
position: 0,
tree: self,
}
}
}
impl<'de, T> Iterator for Iterhelper<'de, T>
where
T: Hash + Clone + Ord + Debug + Copy + Serialize + Deserialize<'de>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.position += 1;
let len = match self.tree.len() {
Ok(x) => x,
Err(_) => return None,
};
if self.position <= len as usize {
match self.tree.access(self.position) {
Ok(x) => return Some(x),
Err(_) => return None,
};
} else {
| None
}
}
}
| conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.