file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
rpc.rs | use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr};
use std::rc::Rc;
use std::sync::Arc;
use std::time::Duration;
use bytes::{Bytes, BytesMut};
use futures::{SinkExt, Stream, StreamExt};
use orion::aead::streaming::StreamOpener;
use orion::aead::SecretKey;
use tokio::net::{TcpListener, TcpStream};
use tokio::time::sleep;
use tokio::time::timeout;
use crate::comm::{ConnectionRegistration, RegisterWorker};
use crate::hwstats::WorkerHwStateMessage;
use crate::internal::common::resources::map::ResourceMap;
use crate::internal::common::resources::{Allocation, AllocationValue};
use crate::internal::common::WrappedRcRefCell;
use crate::internal::messages::worker::{
FromWorkerMessage, StealResponseMsg, TaskResourceAllocation, TaskResourceAllocationValue,
ToWorkerMessage, WorkerOverview, WorkerRegistrationResponse, WorkerStopReason,
};
use crate::internal::server::rpc::ConnectionDescriptor;
use crate::internal::transfer::auth::{
do_authentication, forward_queue_to_sealed_sink, open_message, seal_message, serialize,
};
use crate::internal::transfer::transport::make_protocol_builder;
use crate::internal::worker::comm::WorkerComm;
use crate::internal::worker::configuration::{
sync_worker_configuration, OverviewConfiguration, ServerLostPolicy, WorkerConfiguration,
};
use crate::internal::worker::hwmonitor::HwSampler;
use crate::internal::worker::reactor::run_task;
use crate::internal::worker::state::{WorkerState, WorkerStateRef};
use crate::internal::worker::task::Task;
use crate::launcher::TaskLauncher;
use crate::WorkerId;
use futures::future::Either;
use tokio::sync::Notify;
async fn start_listener() -> crate::Result<(TcpListener, u16)> {
let address = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0);
let listener = TcpListener::bind(address).await?;
let port = {
let socketaddr = listener.local_addr()?;
socketaddr.port()
};
log::info!("Listening on port {}", port);
Ok((listener, port))
}
async fn connect_to_server(addresses: &[SocketAddr]) -> crate::Result<(TcpStream, SocketAddr)> {
log::info!(
"Connecting to server (candidate addresses = {:?})",
addresses
);
let max_attempts = 20;
for _ in 0..max_attempts {
match TcpStream::connect(addresses).await {
Ok(stream) => {
let address = stream.peer_addr()?;
log::debug!("Connected to server at {address:?}");
return Ok((stream, address));
}
Err(e) => {
log::error!("Could not connect to server, error: {}", e);
sleep(Duration::from_secs(2)).await;
}
}
}
Result::Err(crate::Error::GenericError(
"Server could not be connected".into(),
))
}
pub async fn connect_to_server_and_authenticate(
server_addresses: &[SocketAddr],
secret_key: &Option<Arc<SecretKey>>,
) -> crate::Result<ConnectionDescriptor> {
let (stream, address) = connect_to_server(server_addresses).await?;
let (mut writer, mut reader) = make_protocol_builder().new_framed(stream).split();
let (sealer, opener) = do_authentication(
0,
"worker".to_string(),
"server".to_string(),
secret_key.clone(),
&mut writer,
&mut reader,
)
.await?;
Ok(ConnectionDescriptor {
address,
receiver: reader,
sender: writer,
sealer,
opener,
})
}
// Maximum time to wait for running tasks to be shutdown when worker ends.
const MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN: Duration = Duration::from_secs(5);
/// Connects to the server and starts a message receiving loop.
/// The worker will attempt to clean up after itself once it's stopped or once stop_flag is notified.
pub async fn run_worker(
scheduler_addresses: &[SocketAddr],
mut configuration: WorkerConfiguration,
secret_key: Option<Arc<SecretKey>>,
launcher_setup: Box<dyn TaskLauncher>,
stop_flag: Arc<Notify>,
) -> crate::Result<(
(WorkerId, WorkerConfiguration),
impl Future<Output = crate::Result<()>>,
)> {
let (_listener, port) = start_listener().await?;
configuration.listen_address = format!("{}:{}", configuration.hostname, port);
let ConnectionDescriptor {
mut sender,
mut receiver,
mut opener,
mut sealer,
..
} = connect_to_server_and_authenticate(scheduler_addresses, &secret_key).await?;
{
let message = ConnectionRegistration::Worker(RegisterWorker {
configuration: configuration.clone(),
});
let data = serialize(&message)?.into();
sender.send(seal_message(&mut sealer, data)).await?;
}
let (queue_sender, queue_receiver) = tokio::sync::mpsc::unbounded_channel::<Bytes>();
let heartbeat_interval = configuration.heartbeat_interval;
let overview_configuration = configuration.overview_configuration.clone();
let time_limit = configuration.time_limit;
let (worker_id, state, start_task_notify) = {
match timeout(Duration::from_secs(15), receiver.next()).await {
Ok(Some(data)) => {
let WorkerRegistrationResponse {
worker_id,
other_workers,
resource_names,
server_idle_timeout,
server_uid,
} = open_message(&mut opener, &data?)?;
sync_worker_configuration(&mut configuration, server_idle_timeout);
let start_task_notify = Rc::new(Notify::new());
let comm = WorkerComm::new(queue_sender, start_task_notify.clone());
let state_ref = WorkerStateRef::new(
comm,
worker_id,
configuration.clone(),
secret_key,
ResourceMap::from_vec(resource_names),
launcher_setup,
server_uid,
);
{
let mut state = state_ref.get_mut();
for worker_info in other_workers {
state.new_worker(worker_info);
}
}
(worker_id, state_ref, start_task_notify)
}
Ok(None) => panic!("Connection closed without receiving registration response"),
Err(_) => panic!("Did not receive worker registration response"),
}
};
let heartbeat_fut = heartbeat_process(heartbeat_interval, state.clone());
let idle_timeout_fut = match configuration.idle_timeout {
Some(timeout) => Either::Left(idle_timeout_process(timeout, state.clone())),
None => Either::Right(futures::future::pending()),
};
let overview_fut = match overview_configuration {
None => Either::Left(futures::future::pending()),
Some(configuration) => Either::Right(send_overview_loop(state.clone(), configuration)),
};
let time_limit_fut = match time_limit {
None => Either::Left(futures::future::pending::<()>()),
Some(d) => Either::Right(tokio::time::sleep(d)),
};
let future = async move {
let try_start_tasks = task_starter_process(state.clone(), start_task_notify);
let send_loop = forward_queue_to_sealed_sink(queue_receiver, sender, sealer);
tokio::pin! {
let send_loop = send_loop;
let try_start_tasks = try_start_tasks;
}
let result: crate::Result<Option<FromWorkerMessage>> = tokio::select! {
r = worker_message_loop(state.clone(), receiver, opener) => {
log::debug!("Server read connection has disconnected");
r.map(|_| None)
}
r = &mut send_loop => {
log::debug!("Server write connection has disconnected");
r.map_err(|e| e.into()).map(|_| None)
},
_ = time_limit_fut => {
log::info!("Time limit reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::TimeLimitReached)))
}
_ = idle_timeout_fut => {
log::info!("Idle timeout reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::IdleTimeout)))
}
_ = stop_flag.notified() => {
log::info!("Worker received an external stop notification");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::Interrupted)))
}
_ = &mut try_start_tasks => { unreachable!() }
_ = heartbeat_fut => { unreachable!() }
_ = overview_fut => { unreachable!() }
};
// Handle sending stop info to the server and finishing running tasks gracefully.
let result = match result {
Ok(Some(msg)) => {
// Worker wants to end gracefully, send message to the server
{
state.get_mut().comm().send_message_to_server(msg);
state.get_mut().comm().drop_sender();
}
send_loop.await?;
Ok(())
}
Ok(None) => {
// Graceful shutdown from server
Ok(())
}
Err(e) => {
// Server has disconnected
tokio::select! {
_ = &mut try_start_tasks => { unreachable!() }
r = finish_tasks_on_server_lost(state.clone()) => r
}
Err(e)
}
};
// At this point, there can still be some tasks that are running.
// We cancel them here to make sure that we do not leak their spawned processes, if possible.
// The futures of the tasks are scheduled onto the current tokio Runtime using spawn_local,
// therefore we do not need to await any specific future to drive them forward.
// try_start_tasks is not being polled, therefore no new tasks should be started.
cancel_running_tasks_on_worker_end(state).await;
result
};
// Provide a local task set for spawning futures
let future = async move {
let set = tokio::task::LocalSet::new();
set.run_until(future).await
};
Ok(((worker_id, configuration), future))
}
async fn finish_tasks_on_server_lost(state: WorkerStateRef) {
let on_server_lost = state.get().configuration.on_server_lost.clone();
match on_server_lost {
ServerLostPolicy::Stop => {}
ServerLostPolicy::FinishRunning => {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
if !state.is_empty() {
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
Some(notify)
} else {
None
}
};
if let Some(notify) = notify {
log::info!("Waiting for finishing running tasks");
notify.notified().await;
log::info!("All running tasks were finished");
} else {
log::info!("No running tasks remain")
}
}
}
}
async fn cancel_running_tasks_on_worker_end(state: WorkerStateRef) {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
for task in state.running_tasks.clone() {
state.cancel_task(task);
}
if state.running_tasks.is_empty() {
return;
}
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
notify
};
log::info!("Waiting for stopping running tasks");
match tokio::time::timeout(MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN, notify.notified()).await {
Ok(_) => {
log::info!("All running tasks were stopped");
}
Err(_) => {
log::info!("Timed out while waiting for running tasks to stop");
}
}
}
/// Tries to start tasks after a new task appears or some task finishes.
async fn task_starter_process(state_ref: WrappedRcRefCell<WorkerState>, notify: Rc<Notify>) {
loop {
notify.notified().await;
let mut state = state_ref.get_mut();
state.start_task_scheduled = false;
let remaining_time = if let Some(limit) = state.configuration.time_limit {
let life_time = std::time::Instant::now() - state.start_time;
if life_time >= limit {
log::debug!("Trying to start a task after time limit");
break;
}
Some(limit - life_time)
} else {
None
};
loop {
let (task_map, ready_task_queue) = state.borrow_tasks_and_queue();
let allocations = ready_task_queue.try_start_tasks(task_map, remaining_time);
if allocations.is_empty() {
break;
}
for (task_id, allocation, resource_index) in allocations {
run_task(&mut state, &state_ref, task_id, allocation, resource_index);
}
}
}
}
/// Repeatedly sends a heartbeat message to the server.
async fn heartbeat_process(heartbeat_interval: Duration, state_ref: WrappedRcRefCell<WorkerState>) {
let mut interval = tokio::time::interval(heartbeat_interval);
loop {
interval.tick().await;
state_ref
.get_mut()
.comm()
.send_message_to_server(FromWorkerMessage::Heartbeat);
log::debug!("Heartbeat sent");
}
}
/// Runs until an idle timeout happens.
/// Idle timeout occurs when the worker doesn't have anything to do for the specified duration.
async fn idle_timeout_process(idle_timeout: Duration, state_ref: WrappedRcRefCell<WorkerState>) |
pub(crate) fn process_worker_message(state: &mut WorkerState, message: ToWorkerMessage) -> bool {
match message {
ToWorkerMessage::ComputeTask(msg) => {
log::debug!("Task assigned: {}", msg.id);
let task = Task::new(msg);
state.add_task(task);
}
ToWorkerMessage::StealTasks(msg) => {
log::debug!("Steal {} attempts", msg.ids.len());
let responses: Vec<_> = msg
.ids
.iter()
.map(|task_id| {
let response = state.steal_task(*task_id);
log::debug!("Steal attempt: {}, response {:?}", task_id, response);
(*task_id, response)
})
.collect();
let message = FromWorkerMessage::StealResponse(StealResponseMsg { responses });
state.comm().send_message_to_server(message);
}
ToWorkerMessage::CancelTasks(msg) => {
for task_id in msg.ids {
state.cancel_task(task_id);
}
}
ToWorkerMessage::NewWorker(msg) => {
state.new_worker(msg);
}
ToWorkerMessage::LostWorker(worker_id) => {
state.remove_worker(worker_id);
}
ToWorkerMessage::SetReservation(on_off) => {
state.reservation = on_off;
if !on_off {
state.reset_idle_timer();
}
}
ToWorkerMessage::Stop => {
log::info!("Received stop command");
return true;
}
}
false
}
/// Runs until there are messages coming from the server.
async fn worker_message_loop(
state_ref: WorkerStateRef,
mut stream: impl Stream<Item = Result<BytesMut, std::io::Error>> + Unpin,
mut opener: Option<StreamOpener>,
) -> crate::Result<()> {
while let Some(data) = stream.next().await {
let data = data?;
let message: ToWorkerMessage = open_message(&mut opener, &data)?;
let mut state = state_ref.get_mut();
if process_worker_message(&mut state, message) {
return Ok(());
}
}
log::debug!("Connection to server is closed");
Err("Server connection closed".into())
}
async fn send_overview_loop(
state_ref: WorkerStateRef,
configuration: OverviewConfiguration,
) -> crate::Result<()> {
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
let OverviewConfiguration {
send_interval,
gpu_families,
} = configuration;
// Fetching the HW state performs blocking I/O, therefore we should do it in a separate thread.
// tokio::task::spawn_blocking is not used because it would need mutable access to a sampler,
// which shouldn't be created again and again.
std::thread::spawn(move || -> crate::Result<()> {
let mut sampler = HwSampler::init(gpu_families)?;
loop {
std::thread::sleep(send_interval);
let hw_state = sampler.fetch_hw_state()?;
if let Err(error) = tx.blocking_send(hw_state) {
log::error!("Cannot send HW state to overview loop: {error:?}");
break;
}
}
Ok(())
});
let mut poll_interval = tokio::time::interval(send_interval);
loop {
poll_interval.tick().await;
if let Some(hw_state) = rx.recv().await {
let mut worker_state = state_ref.get_mut();
let message = FromWorkerMessage::Overview(WorkerOverview {
id: worker_state.worker_id,
running_tasks: worker_state
.running_tasks
.iter()
.map(|&task_id| {
let task = worker_state.get_task(task_id);
let allocation: &Allocation = task.resource_allocation().unwrap();
(
task_id,
resource_allocation_to_msg(allocation, worker_state.get_resource_map()),
)
// TODO: Modify this when more cpus are allowed
})
.collect(),
hw_state: Some(WorkerHwStateMessage { state: hw_state }),
});
worker_state.comm().send_message_to_server(message);
}
}
}
fn resource_allocation_to_msg(
allocation: &Allocation,
resource_map: &ResourceMap,
) -> TaskResourceAllocation {
TaskResourceAllocation {
resources: allocation
.resources
.iter()
.map(
|alloc| crate::internal::messages::worker::ResourceAllocation {
resource: resource_map
.get_name(alloc.resource)
.unwrap_or("unknown")
.to_string(),
value: match &alloc.value {
AllocationValue::Indices(indices) => {
TaskResourceAllocationValue::Indices(indices.iter().cloned().collect())
}
AllocationValue::Sum(amount) => TaskResourceAllocationValue::Sum(*amount),
},
},
)
.collect(),
}
}
| {
let mut interval = tokio::time::interval(Duration::from_secs(1));
loop {
interval.tick().await;
let state = state_ref.get();
if !state.has_tasks() && !state.reservation {
let elapsed = state.last_task_finish_time.elapsed();
if elapsed > idle_timeout {
break;
}
}
}
} | identifier_body |
rpc.rs | use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr};
use std::rc::Rc;
use std::sync::Arc;
use std::time::Duration;
use bytes::{Bytes, BytesMut};
use futures::{SinkExt, Stream, StreamExt};
use orion::aead::streaming::StreamOpener;
use orion::aead::SecretKey;
use tokio::net::{TcpListener, TcpStream};
use tokio::time::sleep;
use tokio::time::timeout;
use crate::comm::{ConnectionRegistration, RegisterWorker};
use crate::hwstats::WorkerHwStateMessage; | use crate::internal::common::WrappedRcRefCell;
use crate::internal::messages::worker::{
FromWorkerMessage, StealResponseMsg, TaskResourceAllocation, TaskResourceAllocationValue,
ToWorkerMessage, WorkerOverview, WorkerRegistrationResponse, WorkerStopReason,
};
use crate::internal::server::rpc::ConnectionDescriptor;
use crate::internal::transfer::auth::{
do_authentication, forward_queue_to_sealed_sink, open_message, seal_message, serialize,
};
use crate::internal::transfer::transport::make_protocol_builder;
use crate::internal::worker::comm::WorkerComm;
use crate::internal::worker::configuration::{
sync_worker_configuration, OverviewConfiguration, ServerLostPolicy, WorkerConfiguration,
};
use crate::internal::worker::hwmonitor::HwSampler;
use crate::internal::worker::reactor::run_task;
use crate::internal::worker::state::{WorkerState, WorkerStateRef};
use crate::internal::worker::task::Task;
use crate::launcher::TaskLauncher;
use crate::WorkerId;
use futures::future::Either;
use tokio::sync::Notify;
async fn start_listener() -> crate::Result<(TcpListener, u16)> {
let address = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0);
let listener = TcpListener::bind(address).await?;
let port = {
let socketaddr = listener.local_addr()?;
socketaddr.port()
};
log::info!("Listening on port {}", port);
Ok((listener, port))
}
async fn connect_to_server(addresses: &[SocketAddr]) -> crate::Result<(TcpStream, SocketAddr)> {
log::info!(
"Connecting to server (candidate addresses = {:?})",
addresses
);
let max_attempts = 20;
for _ in 0..max_attempts {
match TcpStream::connect(addresses).await {
Ok(stream) => {
let address = stream.peer_addr()?;
log::debug!("Connected to server at {address:?}");
return Ok((stream, address));
}
Err(e) => {
log::error!("Could not connect to server, error: {}", e);
sleep(Duration::from_secs(2)).await;
}
}
}
Result::Err(crate::Error::GenericError(
"Server could not be connected".into(),
))
}
pub async fn connect_to_server_and_authenticate(
server_addresses: &[SocketAddr],
secret_key: &Option<Arc<SecretKey>>,
) -> crate::Result<ConnectionDescriptor> {
let (stream, address) = connect_to_server(server_addresses).await?;
let (mut writer, mut reader) = make_protocol_builder().new_framed(stream).split();
let (sealer, opener) = do_authentication(
0,
"worker".to_string(),
"server".to_string(),
secret_key.clone(),
&mut writer,
&mut reader,
)
.await?;
Ok(ConnectionDescriptor {
address,
receiver: reader,
sender: writer,
sealer,
opener,
})
}
// Maximum time to wait for running tasks to be shutdown when worker ends.
const MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN: Duration = Duration::from_secs(5);
/// Connects to the server and starts a message receiving loop.
/// The worker will attempt to clean up after itself once it's stopped or once stop_flag is notified.
pub async fn run_worker(
scheduler_addresses: &[SocketAddr],
mut configuration: WorkerConfiguration,
secret_key: Option<Arc<SecretKey>>,
launcher_setup: Box<dyn TaskLauncher>,
stop_flag: Arc<Notify>,
) -> crate::Result<(
(WorkerId, WorkerConfiguration),
impl Future<Output = crate::Result<()>>,
)> {
let (_listener, port) = start_listener().await?;
configuration.listen_address = format!("{}:{}", configuration.hostname, port);
let ConnectionDescriptor {
mut sender,
mut receiver,
mut opener,
mut sealer,
..
} = connect_to_server_and_authenticate(scheduler_addresses, &secret_key).await?;
{
let message = ConnectionRegistration::Worker(RegisterWorker {
configuration: configuration.clone(),
});
let data = serialize(&message)?.into();
sender.send(seal_message(&mut sealer, data)).await?;
}
let (queue_sender, queue_receiver) = tokio::sync::mpsc::unbounded_channel::<Bytes>();
let heartbeat_interval = configuration.heartbeat_interval;
let overview_configuration = configuration.overview_configuration.clone();
let time_limit = configuration.time_limit;
let (worker_id, state, start_task_notify) = {
match timeout(Duration::from_secs(15), receiver.next()).await {
Ok(Some(data)) => {
let WorkerRegistrationResponse {
worker_id,
other_workers,
resource_names,
server_idle_timeout,
server_uid,
} = open_message(&mut opener, &data?)?;
sync_worker_configuration(&mut configuration, server_idle_timeout);
let start_task_notify = Rc::new(Notify::new());
let comm = WorkerComm::new(queue_sender, start_task_notify.clone());
let state_ref = WorkerStateRef::new(
comm,
worker_id,
configuration.clone(),
secret_key,
ResourceMap::from_vec(resource_names),
launcher_setup,
server_uid,
);
{
let mut state = state_ref.get_mut();
for worker_info in other_workers {
state.new_worker(worker_info);
}
}
(worker_id, state_ref, start_task_notify)
}
Ok(None) => panic!("Connection closed without receiving registration response"),
Err(_) => panic!("Did not receive worker registration response"),
}
};
let heartbeat_fut = heartbeat_process(heartbeat_interval, state.clone());
let idle_timeout_fut = match configuration.idle_timeout {
Some(timeout) => Either::Left(idle_timeout_process(timeout, state.clone())),
None => Either::Right(futures::future::pending()),
};
let overview_fut = match overview_configuration {
None => Either::Left(futures::future::pending()),
Some(configuration) => Either::Right(send_overview_loop(state.clone(), configuration)),
};
let time_limit_fut = match time_limit {
None => Either::Left(futures::future::pending::<()>()),
Some(d) => Either::Right(tokio::time::sleep(d)),
};
let future = async move {
let try_start_tasks = task_starter_process(state.clone(), start_task_notify);
let send_loop = forward_queue_to_sealed_sink(queue_receiver, sender, sealer);
tokio::pin! {
let send_loop = send_loop;
let try_start_tasks = try_start_tasks;
}
let result: crate::Result<Option<FromWorkerMessage>> = tokio::select! {
r = worker_message_loop(state.clone(), receiver, opener) => {
log::debug!("Server read connection has disconnected");
r.map(|_| None)
}
r = &mut send_loop => {
log::debug!("Server write connection has disconnected");
r.map_err(|e| e.into()).map(|_| None)
},
_ = time_limit_fut => {
log::info!("Time limit reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::TimeLimitReached)))
}
_ = idle_timeout_fut => {
log::info!("Idle timeout reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::IdleTimeout)))
}
_ = stop_flag.notified() => {
log::info!("Worker received an external stop notification");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::Interrupted)))
}
_ = &mut try_start_tasks => { unreachable!() }
_ = heartbeat_fut => { unreachable!() }
_ = overview_fut => { unreachable!() }
};
// Handle sending stop info to the server and finishing running tasks gracefully.
let result = match result {
Ok(Some(msg)) => {
// Worker wants to end gracefully, send message to the server
{
state.get_mut().comm().send_message_to_server(msg);
state.get_mut().comm().drop_sender();
}
send_loop.await?;
Ok(())
}
Ok(None) => {
// Graceful shutdown from server
Ok(())
}
Err(e) => {
// Server has disconnected
tokio::select! {
_ = &mut try_start_tasks => { unreachable!() }
r = finish_tasks_on_server_lost(state.clone()) => r
}
Err(e)
}
};
// At this point, there can still be some tasks that are running.
// We cancel them here to make sure that we do not leak their spawned processes, if possible.
// The futures of the tasks are scheduled onto the current tokio Runtime using spawn_local,
// therefore we do not need to await any specific future to drive them forward.
// try_start_tasks is not being polled, therefore no new tasks should be started.
cancel_running_tasks_on_worker_end(state).await;
result
};
// Provide a local task set for spawning futures
let future = async move {
let set = tokio::task::LocalSet::new();
set.run_until(future).await
};
Ok(((worker_id, configuration), future))
}
async fn finish_tasks_on_server_lost(state: WorkerStateRef) {
let on_server_lost = state.get().configuration.on_server_lost.clone();
match on_server_lost {
ServerLostPolicy::Stop => {}
ServerLostPolicy::FinishRunning => {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
if !state.is_empty() {
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
Some(notify)
} else {
None
}
};
if let Some(notify) = notify {
log::info!("Waiting for finishing running tasks");
notify.notified().await;
log::info!("All running tasks were finished");
} else {
log::info!("No running tasks remain")
}
}
}
}
async fn cancel_running_tasks_on_worker_end(state: WorkerStateRef) {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
for task in state.running_tasks.clone() {
state.cancel_task(task);
}
if state.running_tasks.is_empty() {
return;
}
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
notify
};
log::info!("Waiting for stopping running tasks");
match tokio::time::timeout(MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN, notify.notified()).await {
Ok(_) => {
log::info!("All running tasks were stopped");
}
Err(_) => {
log::info!("Timed out while waiting for running tasks to stop");
}
}
}
/// Tries to start tasks after a new task appears or some task finishes.
async fn task_starter_process(state_ref: WrappedRcRefCell<WorkerState>, notify: Rc<Notify>) {
loop {
notify.notified().await;
let mut state = state_ref.get_mut();
state.start_task_scheduled = false;
let remaining_time = if let Some(limit) = state.configuration.time_limit {
let life_time = std::time::Instant::now() - state.start_time;
if life_time >= limit {
log::debug!("Trying to start a task after time limit");
break;
}
Some(limit - life_time)
} else {
None
};
loop {
let (task_map, ready_task_queue) = state.borrow_tasks_and_queue();
let allocations = ready_task_queue.try_start_tasks(task_map, remaining_time);
if allocations.is_empty() {
break;
}
for (task_id, allocation, resource_index) in allocations {
run_task(&mut state, &state_ref, task_id, allocation, resource_index);
}
}
}
}
/// Repeatedly sends a heartbeat message to the server.
async fn heartbeat_process(heartbeat_interval: Duration, state_ref: WrappedRcRefCell<WorkerState>) {
let mut interval = tokio::time::interval(heartbeat_interval);
loop {
interval.tick().await;
state_ref
.get_mut()
.comm()
.send_message_to_server(FromWorkerMessage::Heartbeat);
log::debug!("Heartbeat sent");
}
}
/// Runs until an idle timeout happens.
/// Idle timeout occurs when the worker doesn't have anything to do for the specified duration.
async fn idle_timeout_process(idle_timeout: Duration, state_ref: WrappedRcRefCell<WorkerState>) {
let mut interval = tokio::time::interval(Duration::from_secs(1));
loop {
interval.tick().await;
let state = state_ref.get();
if !state.has_tasks() && !state.reservation {
let elapsed = state.last_task_finish_time.elapsed();
if elapsed > idle_timeout {
break;
}
}
}
}
pub(crate) fn process_worker_message(state: &mut WorkerState, message: ToWorkerMessage) -> bool {
match message {
ToWorkerMessage::ComputeTask(msg) => {
log::debug!("Task assigned: {}", msg.id);
let task = Task::new(msg);
state.add_task(task);
}
ToWorkerMessage::StealTasks(msg) => {
log::debug!("Steal {} attempts", msg.ids.len());
let responses: Vec<_> = msg
.ids
.iter()
.map(|task_id| {
let response = state.steal_task(*task_id);
log::debug!("Steal attempt: {}, response {:?}", task_id, response);
(*task_id, response)
})
.collect();
let message = FromWorkerMessage::StealResponse(StealResponseMsg { responses });
state.comm().send_message_to_server(message);
}
ToWorkerMessage::CancelTasks(msg) => {
for task_id in msg.ids {
state.cancel_task(task_id);
}
}
ToWorkerMessage::NewWorker(msg) => {
state.new_worker(msg);
}
ToWorkerMessage::LostWorker(worker_id) => {
state.remove_worker(worker_id);
}
ToWorkerMessage::SetReservation(on_off) => {
state.reservation = on_off;
if !on_off {
state.reset_idle_timer();
}
}
ToWorkerMessage::Stop => {
log::info!("Received stop command");
return true;
}
}
false
}
/// Runs until there are messages coming from the server.
async fn worker_message_loop(
state_ref: WorkerStateRef,
mut stream: impl Stream<Item = Result<BytesMut, std::io::Error>> + Unpin,
mut opener: Option<StreamOpener>,
) -> crate::Result<()> {
while let Some(data) = stream.next().await {
let data = data?;
let message: ToWorkerMessage = open_message(&mut opener, &data)?;
let mut state = state_ref.get_mut();
if process_worker_message(&mut state, message) {
return Ok(());
}
}
log::debug!("Connection to server is closed");
Err("Server connection closed".into())
}
async fn send_overview_loop(
state_ref: WorkerStateRef,
configuration: OverviewConfiguration,
) -> crate::Result<()> {
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
let OverviewConfiguration {
send_interval,
gpu_families,
} = configuration;
// Fetching the HW state performs blocking I/O, therefore we should do it in a separate thread.
// tokio::task::spawn_blocking is not used because it would need mutable access to a sampler,
// which shouldn't be created again and again.
std::thread::spawn(move || -> crate::Result<()> {
let mut sampler = HwSampler::init(gpu_families)?;
loop {
std::thread::sleep(send_interval);
let hw_state = sampler.fetch_hw_state()?;
if let Err(error) = tx.blocking_send(hw_state) {
log::error!("Cannot send HW state to overview loop: {error:?}");
break;
}
}
Ok(())
});
let mut poll_interval = tokio::time::interval(send_interval);
loop {
poll_interval.tick().await;
if let Some(hw_state) = rx.recv().await {
let mut worker_state = state_ref.get_mut();
let message = FromWorkerMessage::Overview(WorkerOverview {
id: worker_state.worker_id,
running_tasks: worker_state
.running_tasks
.iter()
.map(|&task_id| {
let task = worker_state.get_task(task_id);
let allocation: &Allocation = task.resource_allocation().unwrap();
(
task_id,
resource_allocation_to_msg(allocation, worker_state.get_resource_map()),
)
// TODO: Modify this when more cpus are allowed
})
.collect(),
hw_state: Some(WorkerHwStateMessage { state: hw_state }),
});
worker_state.comm().send_message_to_server(message);
}
}
}
fn resource_allocation_to_msg(
allocation: &Allocation,
resource_map: &ResourceMap,
) -> TaskResourceAllocation {
TaskResourceAllocation {
resources: allocation
.resources
.iter()
.map(
|alloc| crate::internal::messages::worker::ResourceAllocation {
resource: resource_map
.get_name(alloc.resource)
.unwrap_or("unknown")
.to_string(),
value: match &alloc.value {
AllocationValue::Indices(indices) => {
TaskResourceAllocationValue::Indices(indices.iter().cloned().collect())
}
AllocationValue::Sum(amount) => TaskResourceAllocationValue::Sum(*amount),
},
},
)
.collect(),
}
} | use crate::internal::common::resources::map::ResourceMap;
use crate::internal::common::resources::{Allocation, AllocationValue}; | random_line_split |
rpc.rs | use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr};
use std::rc::Rc;
use std::sync::Arc;
use std::time::Duration;
use bytes::{Bytes, BytesMut};
use futures::{SinkExt, Stream, StreamExt};
use orion::aead::streaming::StreamOpener;
use orion::aead::SecretKey;
use tokio::net::{TcpListener, TcpStream};
use tokio::time::sleep;
use tokio::time::timeout;
use crate::comm::{ConnectionRegistration, RegisterWorker};
use crate::hwstats::WorkerHwStateMessage;
use crate::internal::common::resources::map::ResourceMap;
use crate::internal::common::resources::{Allocation, AllocationValue};
use crate::internal::common::WrappedRcRefCell;
use crate::internal::messages::worker::{
FromWorkerMessage, StealResponseMsg, TaskResourceAllocation, TaskResourceAllocationValue,
ToWorkerMessage, WorkerOverview, WorkerRegistrationResponse, WorkerStopReason,
};
use crate::internal::server::rpc::ConnectionDescriptor;
use crate::internal::transfer::auth::{
do_authentication, forward_queue_to_sealed_sink, open_message, seal_message, serialize,
};
use crate::internal::transfer::transport::make_protocol_builder;
use crate::internal::worker::comm::WorkerComm;
use crate::internal::worker::configuration::{
sync_worker_configuration, OverviewConfiguration, ServerLostPolicy, WorkerConfiguration,
};
use crate::internal::worker::hwmonitor::HwSampler;
use crate::internal::worker::reactor::run_task;
use crate::internal::worker::state::{WorkerState, WorkerStateRef};
use crate::internal::worker::task::Task;
use crate::launcher::TaskLauncher;
use crate::WorkerId;
use futures::future::Either;
use tokio::sync::Notify;
async fn start_listener() -> crate::Result<(TcpListener, u16)> {
let address = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0);
let listener = TcpListener::bind(address).await?;
let port = {
let socketaddr = listener.local_addr()?;
socketaddr.port()
};
log::info!("Listening on port {}", port);
Ok((listener, port))
}
async fn connect_to_server(addresses: &[SocketAddr]) -> crate::Result<(TcpStream, SocketAddr)> {
log::info!(
"Connecting to server (candidate addresses = {:?})",
addresses
);
let max_attempts = 20;
for _ in 0..max_attempts {
match TcpStream::connect(addresses).await {
Ok(stream) => {
let address = stream.peer_addr()?;
log::debug!("Connected to server at {address:?}");
return Ok((stream, address));
}
Err(e) => {
log::error!("Could not connect to server, error: {}", e);
sleep(Duration::from_secs(2)).await;
}
}
}
Result::Err(crate::Error::GenericError(
"Server could not be connected".into(),
))
}
pub async fn connect_to_server_and_authenticate(
server_addresses: &[SocketAddr],
secret_key: &Option<Arc<SecretKey>>,
) -> crate::Result<ConnectionDescriptor> {
let (stream, address) = connect_to_server(server_addresses).await?;
let (mut writer, mut reader) = make_protocol_builder().new_framed(stream).split();
let (sealer, opener) = do_authentication(
0,
"worker".to_string(),
"server".to_string(),
secret_key.clone(),
&mut writer,
&mut reader,
)
.await?;
Ok(ConnectionDescriptor {
address,
receiver: reader,
sender: writer,
sealer,
opener,
})
}
// Maximum time to wait for running tasks to be shutdown when worker ends.
const MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN: Duration = Duration::from_secs(5);
/// Connects to the server and starts a message receiving loop.
/// The worker will attempt to clean up after itself once it's stopped or once stop_flag is notified.
pub async fn run_worker(
scheduler_addresses: &[SocketAddr],
mut configuration: WorkerConfiguration,
secret_key: Option<Arc<SecretKey>>,
launcher_setup: Box<dyn TaskLauncher>,
stop_flag: Arc<Notify>,
) -> crate::Result<(
(WorkerId, WorkerConfiguration),
impl Future<Output = crate::Result<()>>,
)> {
let (_listener, port) = start_listener().await?;
configuration.listen_address = format!("{}:{}", configuration.hostname, port);
let ConnectionDescriptor {
mut sender,
mut receiver,
mut opener,
mut sealer,
..
} = connect_to_server_and_authenticate(scheduler_addresses, &secret_key).await?;
{
let message = ConnectionRegistration::Worker(RegisterWorker {
configuration: configuration.clone(),
});
let data = serialize(&message)?.into();
sender.send(seal_message(&mut sealer, data)).await?;
}
let (queue_sender, queue_receiver) = tokio::sync::mpsc::unbounded_channel::<Bytes>();
let heartbeat_interval = configuration.heartbeat_interval;
let overview_configuration = configuration.overview_configuration.clone();
let time_limit = configuration.time_limit;
let (worker_id, state, start_task_notify) = {
match timeout(Duration::from_secs(15), receiver.next()).await {
Ok(Some(data)) => {
let WorkerRegistrationResponse {
worker_id,
other_workers,
resource_names,
server_idle_timeout,
server_uid,
} = open_message(&mut opener, &data?)?;
sync_worker_configuration(&mut configuration, server_idle_timeout);
let start_task_notify = Rc::new(Notify::new());
let comm = WorkerComm::new(queue_sender, start_task_notify.clone());
let state_ref = WorkerStateRef::new(
comm,
worker_id,
configuration.clone(),
secret_key,
ResourceMap::from_vec(resource_names),
launcher_setup,
server_uid,
);
{
let mut state = state_ref.get_mut();
for worker_info in other_workers {
state.new_worker(worker_info);
}
}
(worker_id, state_ref, start_task_notify)
}
Ok(None) => panic!("Connection closed without receiving registration response"),
Err(_) => panic!("Did not receive worker registration response"),
}
};
let heartbeat_fut = heartbeat_process(heartbeat_interval, state.clone());
let idle_timeout_fut = match configuration.idle_timeout {
Some(timeout) => Either::Left(idle_timeout_process(timeout, state.clone())),
None => Either::Right(futures::future::pending()),
};
let overview_fut = match overview_configuration {
None => Either::Left(futures::future::pending()),
Some(configuration) => Either::Right(send_overview_loop(state.clone(), configuration)),
};
let time_limit_fut = match time_limit {
None => Either::Left(futures::future::pending::<()>()),
Some(d) => Either::Right(tokio::time::sleep(d)),
};
let future = async move {
let try_start_tasks = task_starter_process(state.clone(), start_task_notify);
let send_loop = forward_queue_to_sealed_sink(queue_receiver, sender, sealer);
tokio::pin! {
let send_loop = send_loop;
let try_start_tasks = try_start_tasks;
}
let result: crate::Result<Option<FromWorkerMessage>> = tokio::select! {
r = worker_message_loop(state.clone(), receiver, opener) => {
log::debug!("Server read connection has disconnected");
r.map(|_| None)
}
r = &mut send_loop => {
log::debug!("Server write connection has disconnected");
r.map_err(|e| e.into()).map(|_| None)
},
_ = time_limit_fut => {
log::info!("Time limit reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::TimeLimitReached)))
}
_ = idle_timeout_fut => {
log::info!("Idle timeout reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::IdleTimeout)))
}
_ = stop_flag.notified() => {
log::info!("Worker received an external stop notification");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::Interrupted)))
}
_ = &mut try_start_tasks => { unreachable!() }
_ = heartbeat_fut => { unreachable!() }
_ = overview_fut => { unreachable!() }
};
// Handle sending stop info to the server and finishing running tasks gracefully.
let result = match result {
Ok(Some(msg)) => {
// Worker wants to end gracefully, send message to the server
{
state.get_mut().comm().send_message_to_server(msg);
state.get_mut().comm().drop_sender();
}
send_loop.await?;
Ok(())
}
Ok(None) => {
// Graceful shutdown from server
Ok(())
}
Err(e) => {
// Server has disconnected
tokio::select! {
_ = &mut try_start_tasks => { unreachable!() }
r = finish_tasks_on_server_lost(state.clone()) => r
}
Err(e)
}
};
// At this point, there can still be some tasks that are running.
// We cancel them here to make sure that we do not leak their spawned processes, if possible.
// The futures of the tasks are scheduled onto the current tokio Runtime using spawn_local,
// therefore we do not need to await any specific future to drive them forward.
// try_start_tasks is not being polled, therefore no new tasks should be started.
cancel_running_tasks_on_worker_end(state).await;
result
};
// Provide a local task set for spawning futures
let future = async move {
let set = tokio::task::LocalSet::new();
set.run_until(future).await
};
Ok(((worker_id, configuration), future))
}
async fn finish_tasks_on_server_lost(state: WorkerStateRef) {
let on_server_lost = state.get().configuration.on_server_lost.clone();
match on_server_lost {
ServerLostPolicy::Stop => {}
ServerLostPolicy::FinishRunning => {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
if !state.is_empty() {
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
Some(notify)
} else {
None
}
};
if let Some(notify) = notify {
log::info!("Waiting for finishing running tasks");
notify.notified().await;
log::info!("All running tasks were finished");
} else {
log::info!("No running tasks remain")
}
}
}
}
async fn cancel_running_tasks_on_worker_end(state: WorkerStateRef) {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
for task in state.running_tasks.clone() {
state.cancel_task(task);
}
if state.running_tasks.is_empty() {
return;
}
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
notify
};
log::info!("Waiting for stopping running tasks");
match tokio::time::timeout(MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN, notify.notified()).await {
Ok(_) => {
log::info!("All running tasks were stopped");
}
Err(_) => {
log::info!("Timed out while waiting for running tasks to stop");
}
}
}
/// Tries to start tasks after a new task appears or some task finishes.
async fn task_starter_process(state_ref: WrappedRcRefCell<WorkerState>, notify: Rc<Notify>) {
loop {
notify.notified().await;
let mut state = state_ref.get_mut();
state.start_task_scheduled = false;
let remaining_time = if let Some(limit) = state.configuration.time_limit {
let life_time = std::time::Instant::now() - state.start_time;
if life_time >= limit {
log::debug!("Trying to start a task after time limit");
break;
}
Some(limit - life_time)
} else {
None
};
loop {
let (task_map, ready_task_queue) = state.borrow_tasks_and_queue();
let allocations = ready_task_queue.try_start_tasks(task_map, remaining_time);
if allocations.is_empty() {
break;
}
for (task_id, allocation, resource_index) in allocations {
run_task(&mut state, &state_ref, task_id, allocation, resource_index);
}
}
}
}
/// Repeatedly sends a heartbeat message to the server.
async fn heartbeat_process(heartbeat_interval: Duration, state_ref: WrappedRcRefCell<WorkerState>) {
let mut interval = tokio::time::interval(heartbeat_interval);
loop {
interval.tick().await;
state_ref
.get_mut()
.comm()
.send_message_to_server(FromWorkerMessage::Heartbeat);
log::debug!("Heartbeat sent");
}
}
/// Runs until an idle timeout happens.
/// Idle timeout occurs when the worker doesn't have anything to do for the specified duration.
async fn idle_timeout_process(idle_timeout: Duration, state_ref: WrappedRcRefCell<WorkerState>) {
let mut interval = tokio::time::interval(Duration::from_secs(1));
loop {
interval.tick().await;
let state = state_ref.get();
if !state.has_tasks() && !state.reservation {
let elapsed = state.last_task_finish_time.elapsed();
if elapsed > idle_timeout {
break;
}
}
}
}
pub(crate) fn process_worker_message(state: &mut WorkerState, message: ToWorkerMessage) -> bool {
match message {
ToWorkerMessage::ComputeTask(msg) => {
log::debug!("Task assigned: {}", msg.id);
let task = Task::new(msg);
state.add_task(task);
}
ToWorkerMessage::StealTasks(msg) => {
log::debug!("Steal {} attempts", msg.ids.len());
let responses: Vec<_> = msg
.ids
.iter()
.map(|task_id| {
let response = state.steal_task(*task_id);
log::debug!("Steal attempt: {}, response {:?}", task_id, response);
(*task_id, response)
})
.collect();
let message = FromWorkerMessage::StealResponse(StealResponseMsg { responses });
state.comm().send_message_to_server(message);
}
ToWorkerMessage::CancelTasks(msg) => {
for task_id in msg.ids {
state.cancel_task(task_id);
}
}
ToWorkerMessage::NewWorker(msg) => {
state.new_worker(msg);
}
ToWorkerMessage::LostWorker(worker_id) => {
state.remove_worker(worker_id);
}
ToWorkerMessage::SetReservation(on_off) => {
state.reservation = on_off;
if !on_off |
}
ToWorkerMessage::Stop => {
log::info!("Received stop command");
return true;
}
}
false
}
/// Runs until there are messages coming from the server.
async fn worker_message_loop(
state_ref: WorkerStateRef,
mut stream: impl Stream<Item = Result<BytesMut, std::io::Error>> + Unpin,
mut opener: Option<StreamOpener>,
) -> crate::Result<()> {
while let Some(data) = stream.next().await {
let data = data?;
let message: ToWorkerMessage = open_message(&mut opener, &data)?;
let mut state = state_ref.get_mut();
if process_worker_message(&mut state, message) {
return Ok(());
}
}
log::debug!("Connection to server is closed");
Err("Server connection closed".into())
}
async fn send_overview_loop(
state_ref: WorkerStateRef,
configuration: OverviewConfiguration,
) -> crate::Result<()> {
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
let OverviewConfiguration {
send_interval,
gpu_families,
} = configuration;
// Fetching the HW state performs blocking I/O, therefore we should do it in a separate thread.
// tokio::task::spawn_blocking is not used because it would need mutable access to a sampler,
// which shouldn't be created again and again.
std::thread::spawn(move || -> crate::Result<()> {
let mut sampler = HwSampler::init(gpu_families)?;
loop {
std::thread::sleep(send_interval);
let hw_state = sampler.fetch_hw_state()?;
if let Err(error) = tx.blocking_send(hw_state) {
log::error!("Cannot send HW state to overview loop: {error:?}");
break;
}
}
Ok(())
});
let mut poll_interval = tokio::time::interval(send_interval);
loop {
poll_interval.tick().await;
if let Some(hw_state) = rx.recv().await {
let mut worker_state = state_ref.get_mut();
let message = FromWorkerMessage::Overview(WorkerOverview {
id: worker_state.worker_id,
running_tasks: worker_state
.running_tasks
.iter()
.map(|&task_id| {
let task = worker_state.get_task(task_id);
let allocation: &Allocation = task.resource_allocation().unwrap();
(
task_id,
resource_allocation_to_msg(allocation, worker_state.get_resource_map()),
)
// TODO: Modify this when more cpus are allowed
})
.collect(),
hw_state: Some(WorkerHwStateMessage { state: hw_state }),
});
worker_state.comm().send_message_to_server(message);
}
}
}
fn resource_allocation_to_msg(
allocation: &Allocation,
resource_map: &ResourceMap,
) -> TaskResourceAllocation {
TaskResourceAllocation {
resources: allocation
.resources
.iter()
.map(
|alloc| crate::internal::messages::worker::ResourceAllocation {
resource: resource_map
.get_name(alloc.resource)
.unwrap_or("unknown")
.to_string(),
value: match &alloc.value {
AllocationValue::Indices(indices) => {
TaskResourceAllocationValue::Indices(indices.iter().cloned().collect())
}
AllocationValue::Sum(amount) => TaskResourceAllocationValue::Sum(*amount),
},
},
)
.collect(),
}
}
| {
state.reset_idle_timer();
} | conditional_block |
rpc.rs | use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr};
use std::rc::Rc;
use std::sync::Arc;
use std::time::Duration;
use bytes::{Bytes, BytesMut};
use futures::{SinkExt, Stream, StreamExt};
use orion::aead::streaming::StreamOpener;
use orion::aead::SecretKey;
use tokio::net::{TcpListener, TcpStream};
use tokio::time::sleep;
use tokio::time::timeout;
use crate::comm::{ConnectionRegistration, RegisterWorker};
use crate::hwstats::WorkerHwStateMessage;
use crate::internal::common::resources::map::ResourceMap;
use crate::internal::common::resources::{Allocation, AllocationValue};
use crate::internal::common::WrappedRcRefCell;
use crate::internal::messages::worker::{
FromWorkerMessage, StealResponseMsg, TaskResourceAllocation, TaskResourceAllocationValue,
ToWorkerMessage, WorkerOverview, WorkerRegistrationResponse, WorkerStopReason,
};
use crate::internal::server::rpc::ConnectionDescriptor;
use crate::internal::transfer::auth::{
do_authentication, forward_queue_to_sealed_sink, open_message, seal_message, serialize,
};
use crate::internal::transfer::transport::make_protocol_builder;
use crate::internal::worker::comm::WorkerComm;
use crate::internal::worker::configuration::{
sync_worker_configuration, OverviewConfiguration, ServerLostPolicy, WorkerConfiguration,
};
use crate::internal::worker::hwmonitor::HwSampler;
use crate::internal::worker::reactor::run_task;
use crate::internal::worker::state::{WorkerState, WorkerStateRef};
use crate::internal::worker::task::Task;
use crate::launcher::TaskLauncher;
use crate::WorkerId;
use futures::future::Either;
use tokio::sync::Notify;
async fn start_listener() -> crate::Result<(TcpListener, u16)> {
let address = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0);
let listener = TcpListener::bind(address).await?;
let port = {
let socketaddr = listener.local_addr()?;
socketaddr.port()
};
log::info!("Listening on port {}", port);
Ok((listener, port))
}
async fn connect_to_server(addresses: &[SocketAddr]) -> crate::Result<(TcpStream, SocketAddr)> {
log::info!(
"Connecting to server (candidate addresses = {:?})",
addresses
);
let max_attempts = 20;
for _ in 0..max_attempts {
match TcpStream::connect(addresses).await {
Ok(stream) => {
let address = stream.peer_addr()?;
log::debug!("Connected to server at {address:?}");
return Ok((stream, address));
}
Err(e) => {
log::error!("Could not connect to server, error: {}", e);
sleep(Duration::from_secs(2)).await;
}
}
}
Result::Err(crate::Error::GenericError(
"Server could not be connected".into(),
))
}
pub async fn connect_to_server_and_authenticate(
server_addresses: &[SocketAddr],
secret_key: &Option<Arc<SecretKey>>,
) -> crate::Result<ConnectionDescriptor> {
let (stream, address) = connect_to_server(server_addresses).await?;
let (mut writer, mut reader) = make_protocol_builder().new_framed(stream).split();
let (sealer, opener) = do_authentication(
0,
"worker".to_string(),
"server".to_string(),
secret_key.clone(),
&mut writer,
&mut reader,
)
.await?;
Ok(ConnectionDescriptor {
address,
receiver: reader,
sender: writer,
sealer,
opener,
})
}
// Maximum time to wait for running tasks to be shutdown when worker ends.
const MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN: Duration = Duration::from_secs(5);
/// Connects to the server and starts a message receiving loop.
/// The worker will attempt to clean up after itself once it's stopped or once stop_flag is notified.
pub async fn run_worker(
scheduler_addresses: &[SocketAddr],
mut configuration: WorkerConfiguration,
secret_key: Option<Arc<SecretKey>>,
launcher_setup: Box<dyn TaskLauncher>,
stop_flag: Arc<Notify>,
) -> crate::Result<(
(WorkerId, WorkerConfiguration),
impl Future<Output = crate::Result<()>>,
)> {
let (_listener, port) = start_listener().await?;
configuration.listen_address = format!("{}:{}", configuration.hostname, port);
let ConnectionDescriptor {
mut sender,
mut receiver,
mut opener,
mut sealer,
..
} = connect_to_server_and_authenticate(scheduler_addresses, &secret_key).await?;
{
let message = ConnectionRegistration::Worker(RegisterWorker {
configuration: configuration.clone(),
});
let data = serialize(&message)?.into();
sender.send(seal_message(&mut sealer, data)).await?;
}
let (queue_sender, queue_receiver) = tokio::sync::mpsc::unbounded_channel::<Bytes>();
let heartbeat_interval = configuration.heartbeat_interval;
let overview_configuration = configuration.overview_configuration.clone();
let time_limit = configuration.time_limit;
let (worker_id, state, start_task_notify) = {
match timeout(Duration::from_secs(15), receiver.next()).await {
Ok(Some(data)) => {
let WorkerRegistrationResponse {
worker_id,
other_workers,
resource_names,
server_idle_timeout,
server_uid,
} = open_message(&mut opener, &data?)?;
sync_worker_configuration(&mut configuration, server_idle_timeout);
let start_task_notify = Rc::new(Notify::new());
let comm = WorkerComm::new(queue_sender, start_task_notify.clone());
let state_ref = WorkerStateRef::new(
comm,
worker_id,
configuration.clone(),
secret_key,
ResourceMap::from_vec(resource_names),
launcher_setup,
server_uid,
);
{
let mut state = state_ref.get_mut();
for worker_info in other_workers {
state.new_worker(worker_info);
}
}
(worker_id, state_ref, start_task_notify)
}
Ok(None) => panic!("Connection closed without receiving registration response"),
Err(_) => panic!("Did not receive worker registration response"),
}
};
let heartbeat_fut = heartbeat_process(heartbeat_interval, state.clone());
let idle_timeout_fut = match configuration.idle_timeout {
Some(timeout) => Either::Left(idle_timeout_process(timeout, state.clone())),
None => Either::Right(futures::future::pending()),
};
let overview_fut = match overview_configuration {
None => Either::Left(futures::future::pending()),
Some(configuration) => Either::Right(send_overview_loop(state.clone(), configuration)),
};
let time_limit_fut = match time_limit {
None => Either::Left(futures::future::pending::<()>()),
Some(d) => Either::Right(tokio::time::sleep(d)),
};
let future = async move {
let try_start_tasks = task_starter_process(state.clone(), start_task_notify);
let send_loop = forward_queue_to_sealed_sink(queue_receiver, sender, sealer);
tokio::pin! {
let send_loop = send_loop;
let try_start_tasks = try_start_tasks;
}
let result: crate::Result<Option<FromWorkerMessage>> = tokio::select! {
r = worker_message_loop(state.clone(), receiver, opener) => {
log::debug!("Server read connection has disconnected");
r.map(|_| None)
}
r = &mut send_loop => {
log::debug!("Server write connection has disconnected");
r.map_err(|e| e.into()).map(|_| None)
},
_ = time_limit_fut => {
log::info!("Time limit reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::TimeLimitReached)))
}
_ = idle_timeout_fut => {
log::info!("Idle timeout reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::IdleTimeout)))
}
_ = stop_flag.notified() => {
log::info!("Worker received an external stop notification");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::Interrupted)))
}
_ = &mut try_start_tasks => { unreachable!() }
_ = heartbeat_fut => { unreachable!() }
_ = overview_fut => { unreachable!() }
};
// Handle sending stop info to the server and finishing running tasks gracefully.
let result = match result {
Ok(Some(msg)) => {
// Worker wants to end gracefully, send message to the server
{
state.get_mut().comm().send_message_to_server(msg);
state.get_mut().comm().drop_sender();
}
send_loop.await?;
Ok(())
}
Ok(None) => {
// Graceful shutdown from server
Ok(())
}
Err(e) => {
// Server has disconnected
tokio::select! {
_ = &mut try_start_tasks => { unreachable!() }
r = finish_tasks_on_server_lost(state.clone()) => r
}
Err(e)
}
};
// At this point, there can still be some tasks that are running.
// We cancel them here to make sure that we do not leak their spawned processes, if possible.
// The futures of the tasks are scheduled onto the current tokio Runtime using spawn_local,
// therefore we do not need to await any specific future to drive them forward.
// try_start_tasks is not being polled, therefore no new tasks should be started.
cancel_running_tasks_on_worker_end(state).await;
result
};
// Provide a local task set for spawning futures
let future = async move {
let set = tokio::task::LocalSet::new();
set.run_until(future).await
};
Ok(((worker_id, configuration), future))
}
async fn finish_tasks_on_server_lost(state: WorkerStateRef) {
let on_server_lost = state.get().configuration.on_server_lost.clone();
match on_server_lost {
ServerLostPolicy::Stop => {}
ServerLostPolicy::FinishRunning => {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
if !state.is_empty() {
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
Some(notify)
} else {
None
}
};
if let Some(notify) = notify {
log::info!("Waiting for finishing running tasks");
notify.notified().await;
log::info!("All running tasks were finished");
} else {
log::info!("No running tasks remain")
}
}
}
}
async fn cancel_running_tasks_on_worker_end(state: WorkerStateRef) {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
for task in state.running_tasks.clone() {
state.cancel_task(task);
}
if state.running_tasks.is_empty() {
return;
}
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
notify
};
log::info!("Waiting for stopping running tasks");
match tokio::time::timeout(MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN, notify.notified()).await {
Ok(_) => {
log::info!("All running tasks were stopped");
}
Err(_) => {
log::info!("Timed out while waiting for running tasks to stop");
}
}
}
/// Tries to start tasks after a new task appears or some task finishes.
async fn task_starter_process(state_ref: WrappedRcRefCell<WorkerState>, notify: Rc<Notify>) {
loop {
notify.notified().await;
let mut state = state_ref.get_mut();
state.start_task_scheduled = false;
let remaining_time = if let Some(limit) = state.configuration.time_limit {
let life_time = std::time::Instant::now() - state.start_time;
if life_time >= limit {
log::debug!("Trying to start a task after time limit");
break;
}
Some(limit - life_time)
} else {
None
};
loop {
let (task_map, ready_task_queue) = state.borrow_tasks_and_queue();
let allocations = ready_task_queue.try_start_tasks(task_map, remaining_time);
if allocations.is_empty() {
break;
}
for (task_id, allocation, resource_index) in allocations {
run_task(&mut state, &state_ref, task_id, allocation, resource_index);
}
}
}
}
/// Repeatedly sends a heartbeat message to the server.
async fn heartbeat_process(heartbeat_interval: Duration, state_ref: WrappedRcRefCell<WorkerState>) {
let mut interval = tokio::time::interval(heartbeat_interval);
loop {
interval.tick().await;
state_ref
.get_mut()
.comm()
.send_message_to_server(FromWorkerMessage::Heartbeat);
log::debug!("Heartbeat sent");
}
}
/// Runs until an idle timeout happens.
/// Idle timeout occurs when the worker doesn't have anything to do for the specified duration.
async fn | (idle_timeout: Duration, state_ref: WrappedRcRefCell<WorkerState>) {
let mut interval = tokio::time::interval(Duration::from_secs(1));
loop {
interval.tick().await;
let state = state_ref.get();
if !state.has_tasks() && !state.reservation {
let elapsed = state.last_task_finish_time.elapsed();
if elapsed > idle_timeout {
break;
}
}
}
}
pub(crate) fn process_worker_message(state: &mut WorkerState, message: ToWorkerMessage) -> bool {
match message {
ToWorkerMessage::ComputeTask(msg) => {
log::debug!("Task assigned: {}", msg.id);
let task = Task::new(msg);
state.add_task(task);
}
ToWorkerMessage::StealTasks(msg) => {
log::debug!("Steal {} attempts", msg.ids.len());
let responses: Vec<_> = msg
.ids
.iter()
.map(|task_id| {
let response = state.steal_task(*task_id);
log::debug!("Steal attempt: {}, response {:?}", task_id, response);
(*task_id, response)
})
.collect();
let message = FromWorkerMessage::StealResponse(StealResponseMsg { responses });
state.comm().send_message_to_server(message);
}
ToWorkerMessage::CancelTasks(msg) => {
for task_id in msg.ids {
state.cancel_task(task_id);
}
}
ToWorkerMessage::NewWorker(msg) => {
state.new_worker(msg);
}
ToWorkerMessage::LostWorker(worker_id) => {
state.remove_worker(worker_id);
}
ToWorkerMessage::SetReservation(on_off) => {
state.reservation = on_off;
if !on_off {
state.reset_idle_timer();
}
}
ToWorkerMessage::Stop => {
log::info!("Received stop command");
return true;
}
}
false
}
/// Runs until there are messages coming from the server.
async fn worker_message_loop(
state_ref: WorkerStateRef,
mut stream: impl Stream<Item = Result<BytesMut, std::io::Error>> + Unpin,
mut opener: Option<StreamOpener>,
) -> crate::Result<()> {
while let Some(data) = stream.next().await {
let data = data?;
let message: ToWorkerMessage = open_message(&mut opener, &data)?;
let mut state = state_ref.get_mut();
if process_worker_message(&mut state, message) {
return Ok(());
}
}
log::debug!("Connection to server is closed");
Err("Server connection closed".into())
}
async fn send_overview_loop(
state_ref: WorkerStateRef,
configuration: OverviewConfiguration,
) -> crate::Result<()> {
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
let OverviewConfiguration {
send_interval,
gpu_families,
} = configuration;
// Fetching the HW state performs blocking I/O, therefore we should do it in a separate thread.
// tokio::task::spawn_blocking is not used because it would need mutable access to a sampler,
// which shouldn't be created again and again.
std::thread::spawn(move || -> crate::Result<()> {
let mut sampler = HwSampler::init(gpu_families)?;
loop {
std::thread::sleep(send_interval);
let hw_state = sampler.fetch_hw_state()?;
if let Err(error) = tx.blocking_send(hw_state) {
log::error!("Cannot send HW state to overview loop: {error:?}");
break;
}
}
Ok(())
});
let mut poll_interval = tokio::time::interval(send_interval);
loop {
poll_interval.tick().await;
if let Some(hw_state) = rx.recv().await {
let mut worker_state = state_ref.get_mut();
let message = FromWorkerMessage::Overview(WorkerOverview {
id: worker_state.worker_id,
running_tasks: worker_state
.running_tasks
.iter()
.map(|&task_id| {
let task = worker_state.get_task(task_id);
let allocation: &Allocation = task.resource_allocation().unwrap();
(
task_id,
resource_allocation_to_msg(allocation, worker_state.get_resource_map()),
)
// TODO: Modify this when more cpus are allowed
})
.collect(),
hw_state: Some(WorkerHwStateMessage { state: hw_state }),
});
worker_state.comm().send_message_to_server(message);
}
}
}
fn resource_allocation_to_msg(
allocation: &Allocation,
resource_map: &ResourceMap,
) -> TaskResourceAllocation {
TaskResourceAllocation {
resources: allocation
.resources
.iter()
.map(
|alloc| crate::internal::messages::worker::ResourceAllocation {
resource: resource_map
.get_name(alloc.resource)
.unwrap_or("unknown")
.to_string(),
value: match &alloc.value {
AllocationValue::Indices(indices) => {
TaskResourceAllocationValue::Indices(indices.iter().cloned().collect())
}
AllocationValue::Sum(amount) => TaskResourceAllocationValue::Sum(*amount),
},
},
)
.collect(),
}
}
| idle_timeout_process | identifier_name |
mopac_qr.py | """
Version 2012/08/20, Torsten Kerber
Contributors:
Torsten Kerber, Ecole normale superieure de Lyon:
Paul Fleurat-Lessard, Ecole normale superieure de Lyon
based on a script by Rosa Bulo, Ecole normale superieure de Lyon
This work is supported by Award No. UK-C0017, made by King Abdullah
University of Science and Technology (KAUST), Saudi Arabia
See accompanying license files for details.
"""
import os
import string
import numpy as np
import platform
from ase.units import kcal, mol
from ase.calculators.general import Calculator
str_keys = ['functional', 'job_type']
int_keys = ['restart', 'spin', 'charge','nproc']
bool_keys = ['OPT']
float_keys = ['RELSCF']
class Mopac(Calculator):
name = 'MOPAC'
def __init__(self,
label='ase',
**kwargs):
# define parameter fields
self.str_params = {}
self.int_params = {}
self.bool_params = {}
self.float_params = {}
# initials parameter fields
for key in str_keys:
self.str_params[key] = None
for key in int_keys:
self.int_params[key] = None
for key in bool_keys:
self.bool_params[key] = None
for key in float_keys:
self.float_params[key] = None
# set initial values
functional = 'PM7'
env = os.environ.get('MOPAC_FUNCTIONAL', None)
if env: functional = env
self.set(restart=0,
spin=0,
OPT=False,
functional=functional,
job_type=' 1SCF GRADIENTS AUX(0,PRECISION=9) ',
RELSCF= None)
# set user values
self.set(**kwargs)
# save label
self.label = label
#set atoms
self.atoms = None
# initialize the results
self.version = None
self.energy_zero = None
self.energy_free = None
self.forces = None
self.stress = None
self.calc_dir = None
# initialize the results
self.occupations = None
# command
self.command = self.get_command()
def set(self, **kwargs):
"""
Sets the parameters on the according keywords
Raises RuntimeError when wrong keyword is provided
"""
for key in kwargs:
if key in self.bool_params:
self.bool_params[key] = kwargs[key]
elif key in self.int_params:
self.int_params[key] = kwargs[key]
elif key in self.str_params:
self.str_params[key] = kwargs[key]
elif key in self.float_params:
self.float_params[key] = kwargs[key]
else:
raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)
def get_version(self):
return self.version
def initialize(self, atoms):
pass
def write_input(self, fname, atoms):
"""
Writes the files that have to be written each timestep
"""
# start the input
mopac_input = ''
#write functional and job_type
for key in 'functional', 'job_type':
if self.str_params[key] != None:
mopac_input += self.str_params[key] + ' '
if self.float_params['RELSCF'] != None:
mopac_input += 'RELSCF=' + str(self.float_params['RELSCF']) + ' '
#write charge/
# charge = sum(atoms.get_initial_charges())
#if charge != 0:
# mopac_input += 'CHARGE=%i ' % (charge)
charge=self.int_params['charge']
mopac_input += 'CHARGE= ' + str(charge)+' '
if (self.int_params['nproc'] > 1):
nproc=self.int_params['nproc']
else:
nproc=1
# threads should be specified by user
mopac_input += ' THREADS=%i' %(nproc)
# add solvent
mopac_input += ' EPS=78.4'
#write spin
spin = self.int_params['spin']
if spin == 1.:
mopac_input += 'DOUBLET '
elif spin == 2.:
mopac_input += 'TRIPLET '
#input down
mopac_input += '\n'
mopac_input += 'Title: ASE job\n\n'
f = 1
# write coordinates
for iat in range(len(atoms)):
atom = atoms[iat]
xyz = atom.position
mopac_input += ' %2s' % atom.symbol
# write x, y, z
for idir in range(3):
mopac_input += ' %16.5f %i' % (xyz[idir], f)
mopac_input += '\n'
if atoms.pbc.any():
for v in atoms.get_cell():
mopac_input += 'Tv %8.3f %8.3f %8.3f\n' % (v[0], v[1], v[2])
# write input
myfile = open(fname, 'w')
myfile.write(mopac_input)
myfile.close()
self.mopac_input = mopac_input
def get_command(self):
"""Return command string if program installed, otherwise None. """
command = None
if ('MOPAC_COMMAND' in os.environ):
command = os.environ['MOPAC_COMMAND']
return command
def set_command(self, command):
self.command = command
def | (self,command):
"""
execute <command> in a subprocess and check error code
"""
from subprocess import Popen, PIPE, STDOUT
if command == '':
raise RuntimeError('no command for run_command :(')
# print 'Running: ', command #debug
proc = Popen([command], shell=True, stderr=PIPE)
proc.wait()
exitcode = proc.returncode
if exitcode != 0:
# print exitcode,'label:', self.calc_dir
error='%s exited with error code %i in %s' % (
command,exitcode,self.calc_dir)
stdout,stderr = proc.communicate()
print 'shell output: ',stdout,stderr
raise RuntimeError(error)
return 0
def run(self):
import subprocess, shlex
from threading import Timer
def run_timeout(cmd, timeout_sec):
proc = subprocess.Popen(shlex.split(cmd),
# proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
shell=True,
stderr=subprocess.PIPE)
kill_proc = lambda p: p.kill()
timer = Timer(timeout_sec, kill_proc, [proc])
try:
timer.start()
stdout,stderr = proc.communicate()
print stdout,stderr
finally:
timer.cancel()
"""
Writes input in label.mop
Runs MOPAC
Reads Version, Energy and Forces
"""
# set the input file name
finput = self.label + '.mop'
foutput = self.label + '.out'
self.write_input(finput, self.atoms)
# directory
self.calc_dir = os.getcwd()
command = self.command
if command is None:
raise RuntimeError('MOPAC_COMMAND is not specified')
WhatOS=platform.system()
if "Linux" in WhatOS:
if ('MOPAC_DIR' in os.environ):
mdir = os.environ['MOPAC_DIR']
else:
raise RuntimeError('MOPAC_DIR is not specified')
command_exc= "LD_PRELOAD=%s/libiomp5.so %s %s" % (mdir,command,finput)
if "Darwin" in WhatOS:
command_exc= " ".join([command , finput])
# run_timeout(command_exc ,72000)# 20hours
self.run_command(command_exc)
# exitcode = os.system('%s %s' % (command, finput)+ ' > /dev/null 2>&1 ')
# if exitcode != 0:
# raise RuntimeError('MOPAC exited with error code')
self.version = self.read_version(foutput)
energy = self.read_energy(foutput)
self.energy_zero = energy
self.energy_free = energy
self.forces = self.read_forces(foutput)
def read_version(self, fname):
"""
Reads the MOPAC version string from the second line
"""
version = 'unknown'
lines = open(fname).readlines()
for line in lines:
if " Version" in line:
version = line.split()[-2]
break
return version
def read_energy(self, fname):
"""
Reads the ENERGY from the output file (HEAT of FORMATION in kcal / mol)
Raises RuntimeError if no energy was found
"""
outfile = open(fname)
lines = outfile.readlines()
outfile.close()
energy = None
for line in lines:
if line.find('HEAT OF FORMATION') != -1:
words = line.split()
energy = float(words[5])
if line.find('H.o.F. per unit cell') != -1:
words = line.split()
energy = float(words[5])
if line.find('UNABLE TO ACHIEVE SELF-CONSISTENCE') != -1:
energy = None
if energy is None:
raise RuntimeError('MOPAC: could not find total energy')
### do not change unit for mopac
energy *= (kcal / mol)
return energy
def read_forces(self, fname):
"""
Reads the FORCES from the output file
search string: (HEAT of FORMATION in kcal / mol / AA)
"""
outfile = open(fname)
lines = outfile.readlines()
outfile.close()
nats = len(self.atoms)
forces = np.zeros((nats, 3), float)
infinite_force="*****"
if 'mozyme' in self.str_params['job_type'].lower():
for i, line in enumerate(lines):
if line.find('FINAL POINT AND DERIVATIVES') != -1:
for j in range(nats):
gline = lines[i + j + 5]
pre_force=gline[8:35]
if(infinite_force in pre_force):
forces[j] = [999999999.9999,999999999.9999,999999999.9999]
else:
forces[j] = [float( pre_force[0:9].strip()),float( pre_force[9:18].strip()),float( pre_force[18:27].strip())]
else:
for i, line in enumerate(lines):
if line.find('GRADIENT\n') != -1:
for j in range(nats * 3):
gline = lines[i + j + 1]
pre_force=gline[49:62]
if(infinite_force in pre_force):
forces[int(j/3), int(j%3)] =999999999.9999
else:
forces[int(j/3), int(j%3)] = float(pre_force)
break
#do not change unit for mopac
forces *= - (kcal / mol)
return forces
def atoms_are_equal(self, atoms_new):
''' (adopted from jacapo.py)
comparison of atoms to self.atoms using tolerances to account
for float/double differences and float math.
'''
TOL = 1.0e-6 # angstroms
# check for change in cell parameters
test = len(atoms_new) == len(self.atoms)
if test is not True:
return False
# check for change in cell parameters
test = (abs(self.atoms.get_cell() - atoms_new.get_cell()) <= TOL).all()
if test is not True:
return False
old = self.atoms.arrays
new = atoms_new.arrays
# check for change in atom position
test = (abs(new['positions'] - old['positions']) <= TOL).all()
if test is not True:
return False
# passed all tests
return True
def update(self, atoms_new, **kwargs):
self.set(**kwargs)
if not self.atoms_are_equal(atoms_new):
self.atoms = atoms_new.copy()
self.run()
def run_qr(self, atoms_new, **kwargs):
for key in kwargs:
if key in self.bool_params:
self.bool_params[key] = kwargs[key]
elif key in self.int_params:
self.int_params[key] = kwargs[key]
elif key in self.str_params:
self.str_params[key] = kwargs[key]
elif key in self.float_params:
self.float_params[key] = kwargs[key]
self.atoms = atoms_new.copy()
self.run()
# Q|R requirements
def set_charge(self, charge):
self.int_params['charge'] = charge
def set_method(self, method):
self.str_params['functional'] = method
def set_label(self, label):
self.label = label
def set_nproc(self, nproc):
self.int_params['nproc'] = nproc
| run_command | identifier_name |
mopac_qr.py | """
Version 2012/08/20, Torsten Kerber
Contributors:
Torsten Kerber, Ecole normale superieure de Lyon:
Paul Fleurat-Lessard, Ecole normale superieure de Lyon
based on a script by Rosa Bulo, Ecole normale superieure de Lyon
This work is supported by Award No. UK-C0017, made by King Abdullah
University of Science and Technology (KAUST), Saudi Arabia
See accompanying license files for details.
"""
import os
import string
import numpy as np
import platform
from ase.units import kcal, mol
from ase.calculators.general import Calculator
str_keys = ['functional', 'job_type']
int_keys = ['restart', 'spin', 'charge','nproc']
bool_keys = ['OPT']
float_keys = ['RELSCF']
class Mopac(Calculator):
name = 'MOPAC'
def __init__(self,
label='ase',
**kwargs):
# define parameter fields
self.str_params = {}
self.int_params = {}
self.bool_params = {}
self.float_params = {}
# initials parameter fields
for key in str_keys:
self.str_params[key] = None
for key in int_keys:
self.int_params[key] = None
for key in bool_keys:
self.bool_params[key] = None
for key in float_keys:
self.float_params[key] = None
# set initial values
functional = 'PM7'
env = os.environ.get('MOPAC_FUNCTIONAL', None)
if env: functional = env
self.set(restart=0,
spin=0,
OPT=False,
functional=functional,
job_type=' 1SCF GRADIENTS AUX(0,PRECISION=9) ',
RELSCF= None)
# set user values
self.set(**kwargs)
# save label
self.label = label
#set atoms
self.atoms = None
# initialize the results
self.version = None
self.energy_zero = None
self.energy_free = None
self.forces = None
self.stress = None
self.calc_dir = None
# initialize the results
self.occupations = None
# command
self.command = self.get_command()
def set(self, **kwargs):
"""
Sets the parameters on the according keywords
Raises RuntimeError when wrong keyword is provided
"""
for key in kwargs:
if key in self.bool_params:
self.bool_params[key] = kwargs[key]
elif key in self.int_params:
self.int_params[key] = kwargs[key]
elif key in self.str_params:
self.str_params[key] = kwargs[key]
elif key in self.float_params:
self.float_params[key] = kwargs[key]
else:
raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)
def get_version(self):
return self.version
def initialize(self, atoms):
pass
def write_input(self, fname, atoms):
"""
Writes the files that have to be written each timestep
"""
# start the input
mopac_input = ''
#write functional and job_type
for key in 'functional', 'job_type':
if self.str_params[key] != None:
mopac_input += self.str_params[key] + ' '
if self.float_params['RELSCF'] != None:
mopac_input += 'RELSCF=' + str(self.float_params['RELSCF']) + ' '
#write charge/
# charge = sum(atoms.get_initial_charges())
#if charge != 0:
# mopac_input += 'CHARGE=%i ' % (charge)
charge=self.int_params['charge']
mopac_input += 'CHARGE= ' + str(charge)+' '
if (self.int_params['nproc'] > 1):
nproc=self.int_params['nproc']
else:
nproc=1
# threads should be specified by user
mopac_input += ' THREADS=%i' %(nproc)
# add solvent
mopac_input += ' EPS=78.4'
#write spin
spin = self.int_params['spin']
if spin == 1.:
mopac_input += 'DOUBLET '
elif spin == 2.:
mopac_input += 'TRIPLET '
#input down
mopac_input += '\n'
mopac_input += 'Title: ASE job\n\n'
f = 1
# write coordinates
for iat in range(len(atoms)):
atom = atoms[iat]
xyz = atom.position
mopac_input += ' %2s' % atom.symbol
# write x, y, z
for idir in range(3):
mopac_input += ' %16.5f %i' % (xyz[idir], f)
mopac_input += '\n'
if atoms.pbc.any():
for v in atoms.get_cell():
mopac_input += 'Tv %8.3f %8.3f %8.3f\n' % (v[0], v[1], v[2])
# write input
myfile = open(fname, 'w')
myfile.write(mopac_input)
myfile.close()
self.mopac_input = mopac_input
def get_command(self):
"""Return command string if program installed, otherwise None. """
command = None
if ('MOPAC_COMMAND' in os.environ):
command = os.environ['MOPAC_COMMAND']
return command
def set_command(self, command):
self.command = command
def run_command(self,command):
"""
execute <command> in a subprocess and check error code
"""
from subprocess import Popen, PIPE, STDOUT
if command == '':
raise RuntimeError('no command for run_command :(')
# print 'Running: ', command #debug
proc = Popen([command], shell=True, stderr=PIPE)
proc.wait()
exitcode = proc.returncode
if exitcode != 0:
# print exitcode,'label:', self.calc_dir
error='%s exited with error code %i in %s' % (
command,exitcode,self.calc_dir)
stdout,stderr = proc.communicate()
print 'shell output: ',stdout,stderr
raise RuntimeError(error)
return 0
def run(self):
import subprocess, shlex
from threading import Timer
def run_timeout(cmd, timeout_sec):
proc = subprocess.Popen(shlex.split(cmd),
# proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
shell=True,
stderr=subprocess.PIPE)
kill_proc = lambda p: p.kill()
timer = Timer(timeout_sec, kill_proc, [proc])
try:
timer.start()
stdout,stderr = proc.communicate()
print stdout,stderr
finally:
timer.cancel()
"""
Writes input in label.mop
Runs MOPAC
Reads Version, Energy and Forces
"""
# set the input file name
finput = self.label + '.mop'
foutput = self.label + '.out'
self.write_input(finput, self.atoms)
# directory
self.calc_dir = os.getcwd()
command = self.command
if command is None:
raise RuntimeError('MOPAC_COMMAND is not specified')
WhatOS=platform.system()
if "Linux" in WhatOS:
if ('MOPAC_DIR' in os.environ):
mdir = os.environ['MOPAC_DIR']
else:
raise RuntimeError('MOPAC_DIR is not specified')
command_exc= "LD_PRELOAD=%s/libiomp5.so %s %s" % (mdir,command,finput)
if "Darwin" in WhatOS:
command_exc= " ".join([command , finput])
# run_timeout(command_exc ,72000)# 20hours
self.run_command(command_exc)
# exitcode = os.system('%s %s' % (command, finput)+ ' > /dev/null 2>&1 ')
# if exitcode != 0:
# raise RuntimeError('MOPAC exited with error code')
self.version = self.read_version(foutput)
energy = self.read_energy(foutput)
self.energy_zero = energy
self.energy_free = energy
self.forces = self.read_forces(foutput)
def read_version(self, fname):
"""
Reads the MOPAC version string from the second line
"""
version = 'unknown'
lines = open(fname).readlines()
for line in lines:
if " Version" in line:
version = line.split()[-2]
break
return version
def read_energy(self, fname):
"""
Reads the ENERGY from the output file (HEAT of FORMATION in kcal / mol)
Raises RuntimeError if no energy was found
"""
outfile = open(fname)
lines = outfile.readlines()
outfile.close()
energy = None
for line in lines:
if line.find('HEAT OF FORMATION') != -1:
words = line.split()
energy = float(words[5])
if line.find('H.o.F. per unit cell') != -1:
words = line.split()
energy = float(words[5])
if line.find('UNABLE TO ACHIEVE SELF-CONSISTENCE') != -1:
energy = None
if energy is None:
raise RuntimeError('MOPAC: could not find total energy')
### do not change unit for mopac
energy *= (kcal / mol)
return energy
def read_forces(self, fname):
"""
Reads the FORCES from the output file
search string: (HEAT of FORMATION in kcal / mol / AA)
"""
outfile = open(fname)
lines = outfile.readlines()
outfile.close()
nats = len(self.atoms)
forces = np.zeros((nats, 3), float)
infinite_force="*****"
if 'mozyme' in self.str_params['job_type'].lower():
for i, line in enumerate(lines):
if line.find('FINAL POINT AND DERIVATIVES') != -1:
for j in range(nats):
gline = lines[i + j + 5]
pre_force=gline[8:35]
if(infinite_force in pre_force):
forces[j] = [999999999.9999,999999999.9999,999999999.9999]
else:
forces[j] = [float( pre_force[0:9].strip()),float( pre_force[9:18].strip()),float( pre_force[18:27].strip())]
else:
for i, line in enumerate(lines):
if line.find('GRADIENT\n') != -1:
for j in range(nats * 3):
gline = lines[i + j + 1]
pre_force=gline[49:62]
if(infinite_force in pre_force):
forces[int(j/3), int(j%3)] =999999999.9999
else:
forces[int(j/3), int(j%3)] = float(pre_force)
break
#do not change unit for mopac
forces *= - (kcal / mol)
return forces
def atoms_are_equal(self, atoms_new):
''' (adopted from jacapo.py)
comparison of atoms to self.atoms using tolerances to account
for float/double differences and float math.
'''
TOL = 1.0e-6 # angstroms
# check for change in cell parameters
test = len(atoms_new) == len(self.atoms)
if test is not True:
return False
# check for change in cell parameters
test = (abs(self.atoms.get_cell() - atoms_new.get_cell()) <= TOL).all()
if test is not True:
return False
old = self.atoms.arrays
new = atoms_new.arrays
# check for change in atom position
test = (abs(new['positions'] - old['positions']) <= TOL).all()
if test is not True:
return False
# passed all tests
return True
def update(self, atoms_new, **kwargs):
self.set(**kwargs)
if not self.atoms_are_equal(atoms_new): | for key in kwargs:
if key in self.bool_params:
self.bool_params[key] = kwargs[key]
elif key in self.int_params:
self.int_params[key] = kwargs[key]
elif key in self.str_params:
self.str_params[key] = kwargs[key]
elif key in self.float_params:
self.float_params[key] = kwargs[key]
self.atoms = atoms_new.copy()
self.run()
# Q|R requirements
def set_charge(self, charge):
self.int_params['charge'] = charge
def set_method(self, method):
self.str_params['functional'] = method
def set_label(self, label):
self.label = label
def set_nproc(self, nproc):
self.int_params['nproc'] = nproc | self.atoms = atoms_new.copy()
self.run()
def run_qr(self, atoms_new, **kwargs): | random_line_split |
mopac_qr.py | """
Version 2012/08/20, Torsten Kerber
Contributors:
Torsten Kerber, Ecole normale superieure de Lyon:
Paul Fleurat-Lessard, Ecole normale superieure de Lyon
based on a script by Rosa Bulo, Ecole normale superieure de Lyon
This work is supported by Award No. UK-C0017, made by King Abdullah
University of Science and Technology (KAUST), Saudi Arabia
See accompanying license files for details.
"""
import os
import string
import numpy as np
import platform
from ase.units import kcal, mol
from ase.calculators.general import Calculator
str_keys = ['functional', 'job_type']
int_keys = ['restart', 'spin', 'charge','nproc']
bool_keys = ['OPT']
float_keys = ['RELSCF']
class Mopac(Calculator):
name = 'MOPAC'
def __init__(self,
label='ase',
**kwargs):
# define parameter fields
self.str_params = {}
self.int_params = {}
self.bool_params = {}
self.float_params = {}
# initials parameter fields
for key in str_keys:
self.str_params[key] = None
for key in int_keys:
self.int_params[key] = None
for key in bool_keys:
self.bool_params[key] = None
for key in float_keys:
self.float_params[key] = None
# set initial values
functional = 'PM7'
env = os.environ.get('MOPAC_FUNCTIONAL', None)
if env: functional = env
self.set(restart=0,
spin=0,
OPT=False,
functional=functional,
job_type=' 1SCF GRADIENTS AUX(0,PRECISION=9) ',
RELSCF= None)
# set user values
self.set(**kwargs)
# save label
self.label = label
#set atoms
self.atoms = None
# initialize the results
self.version = None
self.energy_zero = None
self.energy_free = None
self.forces = None
self.stress = None
self.calc_dir = None
# initialize the results
self.occupations = None
# command
self.command = self.get_command()
def set(self, **kwargs):
"""
Sets the parameters on the according keywords
Raises RuntimeError when wrong keyword is provided
"""
for key in kwargs:
if key in self.bool_params:
|
elif key in self.int_params:
self.int_params[key] = kwargs[key]
elif key in self.str_params:
self.str_params[key] = kwargs[key]
elif key in self.float_params:
self.float_params[key] = kwargs[key]
else:
raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)
def get_version(self):
return self.version
def initialize(self, atoms):
pass
def write_input(self, fname, atoms):
"""
Writes the files that have to be written each timestep
"""
# start the input
mopac_input = ''
#write functional and job_type
for key in 'functional', 'job_type':
if self.str_params[key] != None:
mopac_input += self.str_params[key] + ' '
if self.float_params['RELSCF'] != None:
mopac_input += 'RELSCF=' + str(self.float_params['RELSCF']) + ' '
#write charge/
# charge = sum(atoms.get_initial_charges())
#if charge != 0:
# mopac_input += 'CHARGE=%i ' % (charge)
charge=self.int_params['charge']
mopac_input += 'CHARGE= ' + str(charge)+' '
if (self.int_params['nproc'] > 1):
nproc=self.int_params['nproc']
else:
nproc=1
# threads should be specified by user
mopac_input += ' THREADS=%i' %(nproc)
# add solvent
mopac_input += ' EPS=78.4'
#write spin
spin = self.int_params['spin']
if spin == 1.:
mopac_input += 'DOUBLET '
elif spin == 2.:
mopac_input += 'TRIPLET '
#input down
mopac_input += '\n'
mopac_input += 'Title: ASE job\n\n'
f = 1
# write coordinates
for iat in range(len(atoms)):
atom = atoms[iat]
xyz = atom.position
mopac_input += ' %2s' % atom.symbol
# write x, y, z
for idir in range(3):
mopac_input += ' %16.5f %i' % (xyz[idir], f)
mopac_input += '\n'
if atoms.pbc.any():
for v in atoms.get_cell():
mopac_input += 'Tv %8.3f %8.3f %8.3f\n' % (v[0], v[1], v[2])
# write input
myfile = open(fname, 'w')
myfile.write(mopac_input)
myfile.close()
self.mopac_input = mopac_input
def get_command(self):
"""Return command string if program installed, otherwise None. """
command = None
if ('MOPAC_COMMAND' in os.environ):
command = os.environ['MOPAC_COMMAND']
return command
def set_command(self, command):
self.command = command
def run_command(self,command):
"""
execute <command> in a subprocess and check error code
"""
from subprocess import Popen, PIPE, STDOUT
if command == '':
raise RuntimeError('no command for run_command :(')
# print 'Running: ', command #debug
proc = Popen([command], shell=True, stderr=PIPE)
proc.wait()
exitcode = proc.returncode
if exitcode != 0:
# print exitcode,'label:', self.calc_dir
error='%s exited with error code %i in %s' % (
command,exitcode,self.calc_dir)
stdout,stderr = proc.communicate()
print 'shell output: ',stdout,stderr
raise RuntimeError(error)
return 0
def run(self):
import subprocess, shlex
from threading import Timer
def run_timeout(cmd, timeout_sec):
proc = subprocess.Popen(shlex.split(cmd),
# proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
shell=True,
stderr=subprocess.PIPE)
kill_proc = lambda p: p.kill()
timer = Timer(timeout_sec, kill_proc, [proc])
try:
timer.start()
stdout,stderr = proc.communicate()
print stdout,stderr
finally:
timer.cancel()
"""
Writes input in label.mop
Runs MOPAC
Reads Version, Energy and Forces
"""
# set the input file name
finput = self.label + '.mop'
foutput = self.label + '.out'
self.write_input(finput, self.atoms)
# directory
self.calc_dir = os.getcwd()
command = self.command
if command is None:
raise RuntimeError('MOPAC_COMMAND is not specified')
WhatOS=platform.system()
if "Linux" in WhatOS:
if ('MOPAC_DIR' in os.environ):
mdir = os.environ['MOPAC_DIR']
else:
raise RuntimeError('MOPAC_DIR is not specified')
command_exc= "LD_PRELOAD=%s/libiomp5.so %s %s" % (mdir,command,finput)
if "Darwin" in WhatOS:
command_exc= " ".join([command , finput])
# run_timeout(command_exc ,72000)# 20hours
self.run_command(command_exc)
# exitcode = os.system('%s %s' % (command, finput)+ ' > /dev/null 2>&1 ')
# if exitcode != 0:
# raise RuntimeError('MOPAC exited with error code')
self.version = self.read_version(foutput)
energy = self.read_energy(foutput)
self.energy_zero = energy
self.energy_free = energy
self.forces = self.read_forces(foutput)
def read_version(self, fname):
"""
Reads the MOPAC version string from the second line
"""
version = 'unknown'
lines = open(fname).readlines()
for line in lines:
if " Version" in line:
version = line.split()[-2]
break
return version
def read_energy(self, fname):
"""
Reads the ENERGY from the output file (HEAT of FORMATION in kcal / mol)
Raises RuntimeError if no energy was found
"""
outfile = open(fname)
lines = outfile.readlines()
outfile.close()
energy = None
for line in lines:
if line.find('HEAT OF FORMATION') != -1:
words = line.split()
energy = float(words[5])
if line.find('H.o.F. per unit cell') != -1:
words = line.split()
energy = float(words[5])
if line.find('UNABLE TO ACHIEVE SELF-CONSISTENCE') != -1:
energy = None
if energy is None:
raise RuntimeError('MOPAC: could not find total energy')
### do not change unit for mopac
energy *= (kcal / mol)
return energy
def read_forces(self, fname):
"""
Reads the FORCES from the output file
search string: (HEAT of FORMATION in kcal / mol / AA)
"""
outfile = open(fname)
lines = outfile.readlines()
outfile.close()
nats = len(self.atoms)
forces = np.zeros((nats, 3), float)
infinite_force="*****"
if 'mozyme' in self.str_params['job_type'].lower():
for i, line in enumerate(lines):
if line.find('FINAL POINT AND DERIVATIVES') != -1:
for j in range(nats):
gline = lines[i + j + 5]
pre_force=gline[8:35]
if(infinite_force in pre_force):
forces[j] = [999999999.9999,999999999.9999,999999999.9999]
else:
forces[j] = [float( pre_force[0:9].strip()),float( pre_force[9:18].strip()),float( pre_force[18:27].strip())]
else:
for i, line in enumerate(lines):
if line.find('GRADIENT\n') != -1:
for j in range(nats * 3):
gline = lines[i + j + 1]
pre_force=gline[49:62]
if(infinite_force in pre_force):
forces[int(j/3), int(j%3)] =999999999.9999
else:
forces[int(j/3), int(j%3)] = float(pre_force)
break
#do not change unit for mopac
forces *= - (kcal / mol)
return forces
def atoms_are_equal(self, atoms_new):
''' (adopted from jacapo.py)
comparison of atoms to self.atoms using tolerances to account
for float/double differences and float math.
'''
TOL = 1.0e-6 # angstroms
# check for change in cell parameters
test = len(atoms_new) == len(self.atoms)
if test is not True:
return False
# check for change in cell parameters
test = (abs(self.atoms.get_cell() - atoms_new.get_cell()) <= TOL).all()
if test is not True:
return False
old = self.atoms.arrays
new = atoms_new.arrays
# check for change in atom position
test = (abs(new['positions'] - old['positions']) <= TOL).all()
if test is not True:
return False
# passed all tests
return True
def update(self, atoms_new, **kwargs):
self.set(**kwargs)
if not self.atoms_are_equal(atoms_new):
self.atoms = atoms_new.copy()
self.run()
def run_qr(self, atoms_new, **kwargs):
for key in kwargs:
if key in self.bool_params:
self.bool_params[key] = kwargs[key]
elif key in self.int_params:
self.int_params[key] = kwargs[key]
elif key in self.str_params:
self.str_params[key] = kwargs[key]
elif key in self.float_params:
self.float_params[key] = kwargs[key]
self.atoms = atoms_new.copy()
self.run()
# Q|R requirements
def set_charge(self, charge):
self.int_params['charge'] = charge
def set_method(self, method):
self.str_params['functional'] = method
def set_label(self, label):
self.label = label
def set_nproc(self, nproc):
self.int_params['nproc'] = nproc
| self.bool_params[key] = kwargs[key] | conditional_block |
mopac_qr.py | """
Version 2012/08/20, Torsten Kerber
Contributors:
Torsten Kerber, Ecole normale superieure de Lyon:
Paul Fleurat-Lessard, Ecole normale superieure de Lyon
based on a script by Rosa Bulo, Ecole normale superieure de Lyon
This work is supported by Award No. UK-C0017, made by King Abdullah
University of Science and Technology (KAUST), Saudi Arabia
See accompanying license files for details.
"""
import os
import string
import numpy as np
import platform
from ase.units import kcal, mol
from ase.calculators.general import Calculator
str_keys = ['functional', 'job_type']
int_keys = ['restart', 'spin', 'charge','nproc']
bool_keys = ['OPT']
float_keys = ['RELSCF']
class Mopac(Calculator):
name = 'MOPAC'
def __init__(self,
label='ase',
**kwargs):
# define parameter fields
self.str_params = {}
self.int_params = {}
self.bool_params = {}
self.float_params = {}
# initials parameter fields
for key in str_keys:
self.str_params[key] = None
for key in int_keys:
self.int_params[key] = None
for key in bool_keys:
self.bool_params[key] = None
for key in float_keys:
self.float_params[key] = None
# set initial values
functional = 'PM7'
env = os.environ.get('MOPAC_FUNCTIONAL', None)
if env: functional = env
self.set(restart=0,
spin=0,
OPT=False,
functional=functional,
job_type=' 1SCF GRADIENTS AUX(0,PRECISION=9) ',
RELSCF= None)
# set user values
self.set(**kwargs)
# save label
self.label = label
#set atoms
self.atoms = None
# initialize the results
self.version = None
self.energy_zero = None
self.energy_free = None
self.forces = None
self.stress = None
self.calc_dir = None
# initialize the results
self.occupations = None
# command
self.command = self.get_command()
def set(self, **kwargs):
"""
Sets the parameters on the according keywords
Raises RuntimeError when wrong keyword is provided
"""
for key in kwargs:
if key in self.bool_params:
self.bool_params[key] = kwargs[key]
elif key in self.int_params:
self.int_params[key] = kwargs[key]
elif key in self.str_params:
self.str_params[key] = kwargs[key]
elif key in self.float_params:
self.float_params[key] = kwargs[key]
else:
raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)
def get_version(self):
return self.version
def initialize(self, atoms):
pass
def write_input(self, fname, atoms):
"""
Writes the files that have to be written each timestep
"""
# start the input
mopac_input = ''
#write functional and job_type
for key in 'functional', 'job_type':
if self.str_params[key] != None:
mopac_input += self.str_params[key] + ' '
if self.float_params['RELSCF'] != None:
mopac_input += 'RELSCF=' + str(self.float_params['RELSCF']) + ' '
#write charge/
# charge = sum(atoms.get_initial_charges())
#if charge != 0:
# mopac_input += 'CHARGE=%i ' % (charge)
charge=self.int_params['charge']
mopac_input += 'CHARGE= ' + str(charge)+' '
if (self.int_params['nproc'] > 1):
nproc=self.int_params['nproc']
else:
nproc=1
# threads should be specified by user
mopac_input += ' THREADS=%i' %(nproc)
# add solvent
mopac_input += ' EPS=78.4'
#write spin
spin = self.int_params['spin']
if spin == 1.:
mopac_input += 'DOUBLET '
elif spin == 2.:
mopac_input += 'TRIPLET '
#input down
mopac_input += '\n'
mopac_input += 'Title: ASE job\n\n'
f = 1
# write coordinates
for iat in range(len(atoms)):
atom = atoms[iat]
xyz = atom.position
mopac_input += ' %2s' % atom.symbol
# write x, y, z
for idir in range(3):
mopac_input += ' %16.5f %i' % (xyz[idir], f)
mopac_input += '\n'
if atoms.pbc.any():
for v in atoms.get_cell():
mopac_input += 'Tv %8.3f %8.3f %8.3f\n' % (v[0], v[1], v[2])
# write input
myfile = open(fname, 'w')
myfile.write(mopac_input)
myfile.close()
self.mopac_input = mopac_input
def get_command(self):
"""Return command string if program installed, otherwise None. """
command = None
if ('MOPAC_COMMAND' in os.environ):
command = os.environ['MOPAC_COMMAND']
return command
def set_command(self, command):
self.command = command
def run_command(self,command):
"""
execute <command> in a subprocess and check error code
"""
from subprocess import Popen, PIPE, STDOUT
if command == '':
raise RuntimeError('no command for run_command :(')
# print 'Running: ', command #debug
proc = Popen([command], shell=True, stderr=PIPE)
proc.wait()
exitcode = proc.returncode
if exitcode != 0:
# print exitcode,'label:', self.calc_dir
error='%s exited with error code %i in %s' % (
command,exitcode,self.calc_dir)
stdout,stderr = proc.communicate()
print 'shell output: ',stdout,stderr
raise RuntimeError(error)
return 0
def run(self):
import subprocess, shlex
from threading import Timer
def run_timeout(cmd, timeout_sec):
proc = subprocess.Popen(shlex.split(cmd),
# proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
shell=True,
stderr=subprocess.PIPE)
kill_proc = lambda p: p.kill()
timer = Timer(timeout_sec, kill_proc, [proc])
try:
timer.start()
stdout,stderr = proc.communicate()
print stdout,stderr
finally:
timer.cancel()
"""
Writes input in label.mop
Runs MOPAC
Reads Version, Energy and Forces
"""
# set the input file name
finput = self.label + '.mop'
foutput = self.label + '.out'
self.write_input(finput, self.atoms)
# directory
self.calc_dir = os.getcwd()
command = self.command
if command is None:
raise RuntimeError('MOPAC_COMMAND is not specified')
WhatOS=platform.system()
if "Linux" in WhatOS:
if ('MOPAC_DIR' in os.environ):
mdir = os.environ['MOPAC_DIR']
else:
raise RuntimeError('MOPAC_DIR is not specified')
command_exc= "LD_PRELOAD=%s/libiomp5.so %s %s" % (mdir,command,finput)
if "Darwin" in WhatOS:
command_exc= " ".join([command , finput])
# run_timeout(command_exc ,72000)# 20hours
self.run_command(command_exc)
# exitcode = os.system('%s %s' % (command, finput)+ ' > /dev/null 2>&1 ')
# if exitcode != 0:
# raise RuntimeError('MOPAC exited with error code')
self.version = self.read_version(foutput)
energy = self.read_energy(foutput)
self.energy_zero = energy
self.energy_free = energy
self.forces = self.read_forces(foutput)
def read_version(self, fname):
"""
Reads the MOPAC version string from the second line
"""
version = 'unknown'
lines = open(fname).readlines()
for line in lines:
if " Version" in line:
version = line.split()[-2]
break
return version
def read_energy(self, fname):
"""
Reads the ENERGY from the output file (HEAT of FORMATION in kcal / mol)
Raises RuntimeError if no energy was found
"""
outfile = open(fname)
lines = outfile.readlines()
outfile.close()
energy = None
for line in lines:
if line.find('HEAT OF FORMATION') != -1:
words = line.split()
energy = float(words[5])
if line.find('H.o.F. per unit cell') != -1:
words = line.split()
energy = float(words[5])
if line.find('UNABLE TO ACHIEVE SELF-CONSISTENCE') != -1:
energy = None
if energy is None:
raise RuntimeError('MOPAC: could not find total energy')
### do not change unit for mopac
energy *= (kcal / mol)
return energy
def read_forces(self, fname):
"""
Reads the FORCES from the output file
search string: (HEAT of FORMATION in kcal / mol / AA)
"""
outfile = open(fname)
lines = outfile.readlines()
outfile.close()
nats = len(self.atoms)
forces = np.zeros((nats, 3), float)
infinite_force="*****"
if 'mozyme' in self.str_params['job_type'].lower():
for i, line in enumerate(lines):
if line.find('FINAL POINT AND DERIVATIVES') != -1:
for j in range(nats):
gline = lines[i + j + 5]
pre_force=gline[8:35]
if(infinite_force in pre_force):
forces[j] = [999999999.9999,999999999.9999,999999999.9999]
else:
forces[j] = [float( pre_force[0:9].strip()),float( pre_force[9:18].strip()),float( pre_force[18:27].strip())]
else:
for i, line in enumerate(lines):
if line.find('GRADIENT\n') != -1:
for j in range(nats * 3):
gline = lines[i + j + 1]
pre_force=gline[49:62]
if(infinite_force in pre_force):
forces[int(j/3), int(j%3)] =999999999.9999
else:
forces[int(j/3), int(j%3)] = float(pre_force)
break
#do not change unit for mopac
forces *= - (kcal / mol)
return forces
def atoms_are_equal(self, atoms_new):
''' (adopted from jacapo.py)
comparison of atoms to self.atoms using tolerances to account
for float/double differences and float math.
'''
TOL = 1.0e-6 # angstroms
# check for change in cell parameters
test = len(atoms_new) == len(self.atoms)
if test is not True:
return False
# check for change in cell parameters
test = (abs(self.atoms.get_cell() - atoms_new.get_cell()) <= TOL).all()
if test is not True:
return False
old = self.atoms.arrays
new = atoms_new.arrays
# check for change in atom position
test = (abs(new['positions'] - old['positions']) <= TOL).all()
if test is not True:
return False
# passed all tests
return True
def update(self, atoms_new, **kwargs):
self.set(**kwargs)
if not self.atoms_are_equal(atoms_new):
self.atoms = atoms_new.copy()
self.run()
def run_qr(self, atoms_new, **kwargs):
for key in kwargs:
if key in self.bool_params:
self.bool_params[key] = kwargs[key]
elif key in self.int_params:
self.int_params[key] = kwargs[key]
elif key in self.str_params:
self.str_params[key] = kwargs[key]
elif key in self.float_params:
self.float_params[key] = kwargs[key]
self.atoms = atoms_new.copy()
self.run()
# Q|R requirements
def set_charge(self, charge):
self.int_params['charge'] = charge
def set_method(self, method):
self.str_params['functional'] = method
def set_label(self, label):
self.label = label
def set_nproc(self, nproc):
| self.int_params['nproc'] = nproc | identifier_body | |
manager.go | package pipermail
import (
"encoding/json"
"fmt"
"log"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/LF-Engineering/da-ds/build"
"github.com/LF-Engineering/dev-analytics-libraries/auth0"
"github.com/LF-Engineering/dev-analytics-libraries/slack"
"github.com/LF-Engineering/dev-analytics-libraries/elastic"
timeLib "github.com/LF-Engineering/dev-analytics-libraries/time"
"github.com/LF-Engineering/dev-analytics-libraries/http"
libAffiliations "github.com/LF-Engineering/dev-analytics-libraries/affiliation"
)
// TopHits result
type TopHits struct {
Hits Hits `json:"hits"`
}
// Hits result
type Hits struct {
Hits []NestedHits `json:"hits"`
}
// NestedHits is the actual hit data
type NestedHits struct {
ID string `json:"_id"`
Source HitSource `json:"_source"`
}
// HitSource is the document _source data
type HitSource struct {
ID string `json:"id"`
ChangedAt time.Time `json:"changed_at"`
}
// Manager describes piper mail manager
type Manager struct {
Endpoint string
Slug string
GroupName string
SHConnString string
FetcherBackendVersion string
EnricherBackendVersion string
Fetch bool
Enrich bool
ESUrl string
ESUsername string
ESPassword string
ESIndex string
FromDate *time.Time
HTTPTimeout time.Duration
Project string
FetchSize int
EnrichSize int
AffBaseURL string
ESCacheURL string
ESCacheUsername string
ESCachePassword string
AuthGrantType string
AuthClientID string
AuthClientSecret string
AuthAudience string
Auth0URL string
Environment string
WebHookURL string
MaxWorkers int
NumberOfRawMessages int
esClientProvider ESClientProvider
fetcher *Fetcher
enricher *Enricher
workerPool *workerPool
}
// workerPool ...
type workerPool struct {
MaxWorker int
queuedTaskC chan func()
}
// result worker pool result struct
type result struct {
id int
enrichedItem *EnrichedMessage
}
// NewManager initiates piper mail manager instance
func NewManager(endPoint, slug, shConnStr, fetcherBackendVersion, enricherBackendVersion string, fetch bool, enrich bool, eSUrl string, esUser string, esPassword string, esIndex string, fromDate *time.Time, project string, fetchSize int, enrichSize int, affBaseURL, esCacheURL, esCacheUsername, esCachePassword, authGrantType, authClientID, authClientSecret, authAudience, auth0URL, env, webHookURL string) (*Manager, error) {
mng := &Manager{
Endpoint: endPoint,
Slug: slug,
SHConnString: shConnStr,
FetcherBackendVersion: fetcherBackendVersion,
EnricherBackendVersion: enricherBackendVersion,
Fetch: fetch,
Enrich: enrich,
ESUrl: eSUrl,
ESUsername: esUser,
ESPassword: esPassword,
ESIndex: esIndex,
FromDate: fromDate,
HTTPTimeout: 60 * time.Second,
Project: project,
FetchSize: fetchSize,
EnrichSize: enrichSize, | ESCacheURL: esCacheURL,
ESCacheUsername: esCacheUsername,
ESCachePassword: esCachePassword,
AuthGrantType: authGrantType,
AuthClientID: authClientID,
AuthClientSecret: authClientSecret,
AuthAudience: authAudience,
Auth0URL: auth0URL,
Environment: env,
esClientProvider: nil,
fetcher: nil,
enricher: nil,
WebHookURL: webHookURL,
MaxWorkers: 1000,
}
fetcher, enricher, esClientProvider, err := buildServices(mng)
if err != nil {
return nil, err
}
groupName, err := getGroupName(endPoint)
if err != nil {
return nil, err
}
mng.fetcher = fetcher
mng.enricher = enricher
mng.esClientProvider = esClientProvider
mng.GroupName = groupName
mng.workerPool = &workerPool{
MaxWorker: MaxConcurrentRequests,
queuedTaskC: make(chan func()),
}
return mng, nil
}
// Sync runs piper mail fetch and enrich according to passed parameters
func (m *Manager) Sync() error {
lastActionCachePostfix := "-last-action-date-cache"
status := make(map[string]bool)
status["doneFetch"] = !m.Fetch
status["doneEnrich"] = !m.Enrich
fetchCh := m.fetch(m.fetcher, lastActionCachePostfix)
var err error
if status["doneFetch"] == false {
err = <-fetchCh
if err == nil {
status["doneFetch"] = true
}
time.Sleep(5 * time.Second)
}
if status["doneEnrich"] == false {
err = <-m.enrich(m.enricher, lastActionCachePostfix)
if err == nil {
status["doneEnrich"] = true
}
time.Sleep(5 * time.Second)
}
return nil
}
func (m *Manager) fetch(fetcher *Fetcher, lastActionCachePostfix string) <-chan error {
ch := make(chan error)
go func() {
fetchID := "fetch"
query := map[string]interface{}{
"query": map[string]interface{}{
"term": map[string]interface{}{
"id": map[string]string{
"value": fetchID},
},
},
}
val := &TopHits{}
err := m.esClientProvider.Get(fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), query, val)
now := time.Now().UTC()
var lastFetch *time.Time
if err == nil && len(val.Hits.Hits) > 0 {
lastFetch = &val.Hits.Hits[0].Source.ChangedAt
}
fromDate := m.FromDate
if fromDate == nil {
fromDate = &DefaultDateTime
}
from := timeLib.GetOldestDate(fromDate, lastFetch)
data := make([]elastic.BulkData, 0)
raw, err := fetcher.FetchItem(m.Slug, m.GroupName, m.Endpoint, *from, m.FetchSize, now)
if err != nil {
ch <- err
return
}
result := len(raw)
if result != 0 {
from = &raw[len(raw)-1].ChangedAt
}
for _, message := range raw {
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s-raw", m.ESIndex), ID: message.UUID, Data: message})
}
// set mapping and create index if not exists
_, err = m.esClientProvider.CreateIndex(fmt.Sprintf("%s-raw", m.ESIndex), PipermailRawMapping)
if err != nil {
ch <- err
return
}
if len(data) > 0 {
// Update changed at in elastic cache index
cacheDoc, _ := data[len(data)-1].Data.(*RawMessage)
updateChan := HitSource{ID: fetchID, ChangedAt: cacheDoc.ChangedAt}
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), ID: fetchID, Data: updateChan})
// Insert raw data to elasticsearch
sizeOfData := len(data)
limit := 1000
if m.EnrichSize <= 1000 {
limit = m.EnrichSize
}
lastIndex := 0
remainingItemsLength := 0
log.Println("LEN RAW DATA : ", len(data))
// rate limit items to push to es to avoid the 413 error
if len(data) > m.EnrichSize {
for lastIndex < sizeOfData {
if lastIndex == 0 && limit <= len(data) {
_, err = m.esClientProvider.BulkInsert(data[:limit])
if err != nil {
ch <- err
return
}
lastIndex = limit
continue
}
if lastIndex > 0 && limit <= len(data[lastIndex:]) && remainingItemsLength == 0 {
_, err = m.esClientProvider.BulkInsert(data[lastIndex : lastIndex+limit])
if err != nil {
ch <- err
return
}
if lastIndex+limit < len(data[lastIndex:]) {
lastIndex += limit
} else {
remainingItemsLength = len(data[lastIndex:])
}
} else {
// handle cases where remaining messages are less than the limit
_, err = m.esClientProvider.BulkInsert(data[lastIndex:])
if err != nil {
ch <- err
return
}
// invalidate loop
lastIndex = sizeOfData + 1
}
}
}
// handle data for small docs
// es bulk upload limit is 1000
if m.EnrichSize >= sizeOfData {
if sizeOfData <= 1000 {
_, err = m.esClientProvider.BulkInsert(data)
if err != nil {
ch <- err
return
}
}
}
m.NumberOfRawMessages = sizeOfData
log.Println("DONE WITH RAW ENRICHMENT")
}
ch <- nil
}()
return ch
}
func (m *Manager) enrich(enricher *Enricher, lastActionCachePostfix string) <-chan error {
ch := make(chan error)
resultC := make(chan result, 0)
jobs := make(chan *RawMessage, m.NumberOfRawMessages)
go func() {
enrichID := "enrich"
query := map[string]interface{}{
"query": map[string]interface{}{
"term": map[string]interface{}{
"id": map[string]string{
"value": enrichID},
},
},
}
val := &TopHits{}
err := m.esClientProvider.Get(fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), query, val)
query = map[string]interface{}{
"size": 10000,
"query": map[string]interface{}{
"bool": map[string]interface{}{
"must": []map[string]interface{}{},
},
},
"sort": []map[string]interface{}{
{
"metadata__updated_on": map[string]string{
"order": "desc",
},
},
},
}
var lastEnrich time.Time
if err == nil && len(val.Hits.Hits) > 0 {
lastEnrich = val.Hits.Hits[0].Source.ChangedAt
}
from := timeLib.GetOldestDate(m.FromDate, &lastEnrich)
log.Println("From: ", from)
conditions := map[string]interface{}{
"range": map[string]interface{}{
"metadata__updated_on": map[string]interface{}{
"gte": (from).Format(time.RFC3339),
},
},
}
query["query"].(map[string]interface{})["bool"].(map[string]interface{})["must"] = conditions
results := m.EnrichSize
offset := 0
query["size"] = m.EnrichSize
for results == m.EnrichSize {
// make pagination to get the specified size of documents with offset
query["from"] = offset
bites, err := m.fetcher.ElasticSearchProvider.Search(fmt.Sprintf("%s-raw", m.ESIndex), query)
if err != nil {
ch <- nil
return
}
var topHits *RawHits
err = json.Unmarshal(bites, &topHits)
if err != nil {
ch <- nil
return
}
data := make([]elastic.BulkData, 0)
for w := 1; w <= m.MaxWorkers; w++ {
go m.enrichWorker(w, jobs, resultC)
}
for _, hit := range topHits.Hits.Hits {
nHitSource := hit.Source
if lastEnrich.Before(hit.Source.ChangedAt) {
jobs <- &nHitSource
}
}
close(jobs)
for a := 1; a <= len(topHits.Hits.Hits); a++ {
res := <-resultC
log.Printf("[main] task %d has been finished with result message id %+v", res.id, res.enrichedItem.MessageID)
data = append(data, elastic.BulkData{IndexName: m.ESIndex, ID: res.enrichedItem.UUID, Data: res.enrichedItem})
}
log.Println("LEN ENRICH DATA : ", len(data))
results = len(data)
// setting mapping and create index if not exists
if offset == 0 {
_, err := m.esClientProvider.CreateIndex(m.ESIndex, PiperRichMapping)
if err != nil {
ch <- err
return
}
}
if len(data) > 0 {
// Update changed at in elastic cache index
cacheDoc, _ := data[len(data)-1].Data.(*EnrichedMessage)
updateChan := HitSource{ID: enrichID, ChangedAt: cacheDoc.ChangedAt}
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), ID: enrichID, Data: updateChan})
// Insert enriched data to elasticsearch
_, err = m.esClientProvider.BulkInsert(data)
if err != nil {
ch <- err
return
}
}
results = len(data)
offset += results
}
log.Println("DONE WITH RICH ENRICHMENT")
ch <- nil
}()
return ch
}
// enrichWorker spins up workers to enrich messages
func (m *Manager) enrichWorker(workerID int, jobs <-chan *RawMessage, results chan<- result) {
for j := range jobs {
log.Printf("worker %+v started job %+v", workerID, j.UUID)
enrichedItem, err := m.enricher.EnrichMessage(j, time.Now().UTC())
// quit app if error isn't nil
if err != nil {
os.Exit(1)
}
time.Sleep(time.Second)
log.Printf("worker %+v finished job %+v", workerID, j.UUID)
results <- result{id: workerID, enrichedItem: enrichedItem}
}
}
// AddTask adds task to worker pool
func (m *Manager) AddTask(task func()) {
m.workerPool.queuedTaskC <- task
}
// run starts the tasks in the worker pool queue
func (m *Manager) run() {
for i := 0; i < m.workerPool.MaxWorker; i++ {
wID := i + 1
//log.Printf("[workerPool] worker %d spawned", wID)
go func(workerID int) {
for task := range m.workerPool.queuedTaskC {
log.Printf("[workerPool] worker %d is processing task", wID)
task()
log.Printf("[workerPool] worker %d has finished processing task", wID)
}
}(wID)
}
}
func buildServices(m *Manager) (*Fetcher, *Enricher, ESClientProvider, error) {
httpClientProvider := http.NewClientProvider(m.HTTPTimeout)
params := &Params{
BackendVersion: m.FetcherBackendVersion,
}
esClientProvider, err := elastic.NewClientProvider(&elastic.Params{
URL: m.ESUrl,
Username: m.ESUsername,
Password: m.ESPassword,
})
if err != nil {
return nil, nil, nil, err
}
esCacheClientProvider, err := elastic.NewClientProvider(&elastic.Params{
URL: m.ESCacheURL,
Username: m.ESCacheUsername,
Password: m.ESCachePassword,
})
// Initialize fetcher object to get data from piper mail archive link
fetcher := NewFetcher(params, httpClientProvider, esClientProvider)
slackProvider := slack.New(m.WebHookURL)
appNameVersion := fmt.Sprintf("%s-%v", build.AppName, strconv.FormatInt(time.Now().Unix(), 10))
auth0Client, err := auth0.NewAuth0Client(
m.Environment,
m.AuthGrantType,
m.AuthClientID,
m.AuthClientSecret,
m.AuthAudience,
m.Auth0URL,
httpClientProvider,
esCacheClientProvider,
&slackProvider,
appNameVersion)
affiliationsClientProvider, err := libAffiliations.NewAffiliationsClient(m.AffBaseURL, m.Slug, httpClientProvider, esCacheClientProvider, auth0Client, &slackProvider)
if err != nil {
return nil, nil, nil, err
}
//Initialize enrich object to enrich raw data
enricher := NewEnricher(m.EnricherBackendVersion, esClientProvider, affiliationsClientProvider)
return fetcher, enricher, esClientProvider, err
}
// getGroupName extracts a pipermail group name from the given mailing list url
func getGroupName(targetURL string) (string, error) {
u, err := url.Parse(targetURL)
if err != nil {
return "", err
}
path := u.Path
if strings.HasPrefix(path, "/") {
path = strings.TrimPrefix(path, "/")
}
if strings.HasSuffix(path, "/") {
path = strings.TrimSuffix(path, "/")
}
path = strings.ReplaceAll(path, "/", "-")
return path, nil
} | AffBaseURL: affBaseURL, | random_line_split |
manager.go | package pipermail
import (
"encoding/json"
"fmt"
"log"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/LF-Engineering/da-ds/build"
"github.com/LF-Engineering/dev-analytics-libraries/auth0"
"github.com/LF-Engineering/dev-analytics-libraries/slack"
"github.com/LF-Engineering/dev-analytics-libraries/elastic"
timeLib "github.com/LF-Engineering/dev-analytics-libraries/time"
"github.com/LF-Engineering/dev-analytics-libraries/http"
libAffiliations "github.com/LF-Engineering/dev-analytics-libraries/affiliation"
)
// TopHits result
type TopHits struct {
Hits Hits `json:"hits"`
}
// Hits result
type Hits struct {
Hits []NestedHits `json:"hits"`
}
// NestedHits is the actual hit data
type NestedHits struct {
ID string `json:"_id"`
Source HitSource `json:"_source"`
}
// HitSource is the document _source data
type HitSource struct {
ID string `json:"id"`
ChangedAt time.Time `json:"changed_at"`
}
// Manager describes piper mail manager
type Manager struct {
Endpoint string
Slug string
GroupName string
SHConnString string
FetcherBackendVersion string
EnricherBackendVersion string
Fetch bool
Enrich bool
ESUrl string
ESUsername string
ESPassword string
ESIndex string
FromDate *time.Time
HTTPTimeout time.Duration
Project string
FetchSize int
EnrichSize int
AffBaseURL string
ESCacheURL string
ESCacheUsername string
ESCachePassword string
AuthGrantType string
AuthClientID string
AuthClientSecret string
AuthAudience string
Auth0URL string
Environment string
WebHookURL string
MaxWorkers int
NumberOfRawMessages int
esClientProvider ESClientProvider
fetcher *Fetcher
enricher *Enricher
workerPool *workerPool
}
// workerPool ...
type workerPool struct {
MaxWorker int
queuedTaskC chan func()
}
// result worker pool result struct
type result struct {
id int
enrichedItem *EnrichedMessage
}
// NewManager initiates piper mail manager instance
func NewManager(endPoint, slug, shConnStr, fetcherBackendVersion, enricherBackendVersion string, fetch bool, enrich bool, eSUrl string, esUser string, esPassword string, esIndex string, fromDate *time.Time, project string, fetchSize int, enrichSize int, affBaseURL, esCacheURL, esCacheUsername, esCachePassword, authGrantType, authClientID, authClientSecret, authAudience, auth0URL, env, webHookURL string) (*Manager, error) {
mng := &Manager{
Endpoint: endPoint,
Slug: slug,
SHConnString: shConnStr,
FetcherBackendVersion: fetcherBackendVersion,
EnricherBackendVersion: enricherBackendVersion,
Fetch: fetch,
Enrich: enrich,
ESUrl: eSUrl,
ESUsername: esUser,
ESPassword: esPassword,
ESIndex: esIndex,
FromDate: fromDate,
HTTPTimeout: 60 * time.Second,
Project: project,
FetchSize: fetchSize,
EnrichSize: enrichSize,
AffBaseURL: affBaseURL,
ESCacheURL: esCacheURL,
ESCacheUsername: esCacheUsername,
ESCachePassword: esCachePassword,
AuthGrantType: authGrantType,
AuthClientID: authClientID,
AuthClientSecret: authClientSecret,
AuthAudience: authAudience,
Auth0URL: auth0URL,
Environment: env,
esClientProvider: nil,
fetcher: nil,
enricher: nil,
WebHookURL: webHookURL,
MaxWorkers: 1000,
}
fetcher, enricher, esClientProvider, err := buildServices(mng)
if err != nil {
return nil, err
}
groupName, err := getGroupName(endPoint)
if err != nil {
return nil, err
}
mng.fetcher = fetcher
mng.enricher = enricher
mng.esClientProvider = esClientProvider
mng.GroupName = groupName
mng.workerPool = &workerPool{
MaxWorker: MaxConcurrentRequests,
queuedTaskC: make(chan func()),
}
return mng, nil
}
// Sync runs piper mail fetch and enrich according to passed parameters
func (m *Manager) Sync() error {
lastActionCachePostfix := "-last-action-date-cache"
status := make(map[string]bool)
status["doneFetch"] = !m.Fetch
status["doneEnrich"] = !m.Enrich
fetchCh := m.fetch(m.fetcher, lastActionCachePostfix)
var err error
if status["doneFetch"] == false {
err = <-fetchCh
if err == nil {
status["doneFetch"] = true
}
time.Sleep(5 * time.Second)
}
if status["doneEnrich"] == false {
err = <-m.enrich(m.enricher, lastActionCachePostfix)
if err == nil {
status["doneEnrich"] = true
}
time.Sleep(5 * time.Second)
}
return nil
}
func (m *Manager) fetch(fetcher *Fetcher, lastActionCachePostfix string) <-chan error {
ch := make(chan error)
go func() {
fetchID := "fetch"
query := map[string]interface{}{
"query": map[string]interface{}{
"term": map[string]interface{}{
"id": map[string]string{
"value": fetchID},
},
},
}
val := &TopHits{}
err := m.esClientProvider.Get(fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), query, val)
now := time.Now().UTC()
var lastFetch *time.Time
if err == nil && len(val.Hits.Hits) > 0 {
lastFetch = &val.Hits.Hits[0].Source.ChangedAt
}
fromDate := m.FromDate
if fromDate == nil {
fromDate = &DefaultDateTime
}
from := timeLib.GetOldestDate(fromDate, lastFetch)
data := make([]elastic.BulkData, 0)
raw, err := fetcher.FetchItem(m.Slug, m.GroupName, m.Endpoint, *from, m.FetchSize, now)
if err != nil {
ch <- err
return
}
result := len(raw)
if result != 0 {
from = &raw[len(raw)-1].ChangedAt
}
for _, message := range raw {
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s-raw", m.ESIndex), ID: message.UUID, Data: message})
}
// set mapping and create index if not exists
_, err = m.esClientProvider.CreateIndex(fmt.Sprintf("%s-raw", m.ESIndex), PipermailRawMapping)
if err != nil {
ch <- err
return
}
if len(data) > 0 {
// Update changed at in elastic cache index
cacheDoc, _ := data[len(data)-1].Data.(*RawMessage)
updateChan := HitSource{ID: fetchID, ChangedAt: cacheDoc.ChangedAt}
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), ID: fetchID, Data: updateChan})
// Insert raw data to elasticsearch
sizeOfData := len(data)
limit := 1000
if m.EnrichSize <= 1000 {
limit = m.EnrichSize
}
lastIndex := 0
remainingItemsLength := 0
log.Println("LEN RAW DATA : ", len(data))
// rate limit items to push to es to avoid the 413 error
if len(data) > m.EnrichSize {
for lastIndex < sizeOfData {
if lastIndex == 0 && limit <= len(data) {
_, err = m.esClientProvider.BulkInsert(data[:limit])
if err != nil {
ch <- err
return
}
lastIndex = limit
continue
}
if lastIndex > 0 && limit <= len(data[lastIndex:]) && remainingItemsLength == 0 {
_, err = m.esClientProvider.BulkInsert(data[lastIndex : lastIndex+limit])
if err != nil {
ch <- err
return
}
if lastIndex+limit < len(data[lastIndex:]) {
lastIndex += limit
} else {
remainingItemsLength = len(data[lastIndex:])
}
} else {
// handle cases where remaining messages are less than the limit
_, err = m.esClientProvider.BulkInsert(data[lastIndex:])
if err != nil {
ch <- err
return
}
// invalidate loop
lastIndex = sizeOfData + 1
}
}
}
// handle data for small docs
// es bulk upload limit is 1000
if m.EnrichSize >= sizeOfData {
if sizeOfData <= 1000 {
_, err = m.esClientProvider.BulkInsert(data)
if err != nil {
ch <- err
return
}
}
}
m.NumberOfRawMessages = sizeOfData
log.Println("DONE WITH RAW ENRICHMENT")
}
ch <- nil
}()
return ch
}
func (m *Manager) enrich(enricher *Enricher, lastActionCachePostfix string) <-chan error {
ch := make(chan error)
resultC := make(chan result, 0)
jobs := make(chan *RawMessage, m.NumberOfRawMessages)
go func() {
enrichID := "enrich"
query := map[string]interface{}{
"query": map[string]interface{}{
"term": map[string]interface{}{
"id": map[string]string{
"value": enrichID},
},
},
}
val := &TopHits{}
err := m.esClientProvider.Get(fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), query, val)
query = map[string]interface{}{
"size": 10000,
"query": map[string]interface{}{
"bool": map[string]interface{}{
"must": []map[string]interface{}{},
},
},
"sort": []map[string]interface{}{
{
"metadata__updated_on": map[string]string{
"order": "desc",
},
},
},
}
var lastEnrich time.Time
if err == nil && len(val.Hits.Hits) > 0 {
lastEnrich = val.Hits.Hits[0].Source.ChangedAt
}
from := timeLib.GetOldestDate(m.FromDate, &lastEnrich)
log.Println("From: ", from)
conditions := map[string]interface{}{
"range": map[string]interface{}{
"metadata__updated_on": map[string]interface{}{
"gte": (from).Format(time.RFC3339),
},
},
}
query["query"].(map[string]interface{})["bool"].(map[string]interface{})["must"] = conditions
results := m.EnrichSize
offset := 0
query["size"] = m.EnrichSize
for results == m.EnrichSize {
// make pagination to get the specified size of documents with offset
query["from"] = offset
bites, err := m.fetcher.ElasticSearchProvider.Search(fmt.Sprintf("%s-raw", m.ESIndex), query)
if err != nil {
ch <- nil
return
}
var topHits *RawHits
err = json.Unmarshal(bites, &topHits)
if err != nil {
ch <- nil
return
}
data := make([]elastic.BulkData, 0)
for w := 1; w <= m.MaxWorkers; w++ {
go m.enrichWorker(w, jobs, resultC)
}
for _, hit := range topHits.Hits.Hits {
nHitSource := hit.Source
if lastEnrich.Before(hit.Source.ChangedAt) {
jobs <- &nHitSource
}
}
close(jobs)
for a := 1; a <= len(topHits.Hits.Hits); a++ {
res := <-resultC
log.Printf("[main] task %d has been finished with result message id %+v", res.id, res.enrichedItem.MessageID)
data = append(data, elastic.BulkData{IndexName: m.ESIndex, ID: res.enrichedItem.UUID, Data: res.enrichedItem})
}
log.Println("LEN ENRICH DATA : ", len(data))
results = len(data)
// setting mapping and create index if not exists
if offset == 0 {
_, err := m.esClientProvider.CreateIndex(m.ESIndex, PiperRichMapping)
if err != nil {
ch <- err
return
}
}
if len(data) > 0 {
// Update changed at in elastic cache index
cacheDoc, _ := data[len(data)-1].Data.(*EnrichedMessage)
updateChan := HitSource{ID: enrichID, ChangedAt: cacheDoc.ChangedAt}
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), ID: enrichID, Data: updateChan})
// Insert enriched data to elasticsearch
_, err = m.esClientProvider.BulkInsert(data)
if err != nil {
ch <- err
return
}
}
results = len(data)
offset += results
}
log.Println("DONE WITH RICH ENRICHMENT")
ch <- nil
}()
return ch
}
// enrichWorker spins up workers to enrich messages
func (m *Manager) enrichWorker(workerID int, jobs <-chan *RawMessage, results chan<- result) {
for j := range jobs {
log.Printf("worker %+v started job %+v", workerID, j.UUID)
enrichedItem, err := m.enricher.EnrichMessage(j, time.Now().UTC())
// quit app if error isn't nil
if err != nil {
os.Exit(1)
}
time.Sleep(time.Second)
log.Printf("worker %+v finished job %+v", workerID, j.UUID)
results <- result{id: workerID, enrichedItem: enrichedItem}
}
}
// AddTask adds task to worker pool
func (m *Manager) AddTask(task func()) {
m.workerPool.queuedTaskC <- task
}
// run starts the tasks in the worker pool queue
func (m *Manager) run() {
for i := 0; i < m.workerPool.MaxWorker; i++ {
wID := i + 1
//log.Printf("[workerPool] worker %d spawned", wID)
go func(workerID int) {
for task := range m.workerPool.queuedTaskC {
log.Printf("[workerPool] worker %d is processing task", wID)
task()
log.Printf("[workerPool] worker %d has finished processing task", wID)
}
}(wID)
}
}
func buildServices(m *Manager) (*Fetcher, *Enricher, ESClientProvider, error) {
httpClientProvider := http.NewClientProvider(m.HTTPTimeout)
params := &Params{
BackendVersion: m.FetcherBackendVersion,
}
esClientProvider, err := elastic.NewClientProvider(&elastic.Params{
URL: m.ESUrl,
Username: m.ESUsername,
Password: m.ESPassword,
})
if err != nil {
return nil, nil, nil, err
}
esCacheClientProvider, err := elastic.NewClientProvider(&elastic.Params{
URL: m.ESCacheURL,
Username: m.ESCacheUsername,
Password: m.ESCachePassword,
})
// Initialize fetcher object to get data from piper mail archive link
fetcher := NewFetcher(params, httpClientProvider, esClientProvider)
slackProvider := slack.New(m.WebHookURL)
appNameVersion := fmt.Sprintf("%s-%v", build.AppName, strconv.FormatInt(time.Now().Unix(), 10))
auth0Client, err := auth0.NewAuth0Client(
m.Environment,
m.AuthGrantType,
m.AuthClientID,
m.AuthClientSecret,
m.AuthAudience,
m.Auth0URL,
httpClientProvider,
esCacheClientProvider,
&slackProvider,
appNameVersion)
affiliationsClientProvider, err := libAffiliations.NewAffiliationsClient(m.AffBaseURL, m.Slug, httpClientProvider, esCacheClientProvider, auth0Client, &slackProvider)
if err != nil {
return nil, nil, nil, err
}
//Initialize enrich object to enrich raw data
enricher := NewEnricher(m.EnricherBackendVersion, esClientProvider, affiliationsClientProvider)
return fetcher, enricher, esClientProvider, err
}
// getGroupName extracts a pipermail group name from the given mailing list url
func getGroupName(targetURL string) (string, error) | {
u, err := url.Parse(targetURL)
if err != nil {
return "", err
}
path := u.Path
if strings.HasPrefix(path, "/") {
path = strings.TrimPrefix(path, "/")
}
if strings.HasSuffix(path, "/") {
path = strings.TrimSuffix(path, "/")
}
path = strings.ReplaceAll(path, "/", "-")
return path, nil
} | identifier_body | |
manager.go | package pipermail
import (
"encoding/json"
"fmt"
"log"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/LF-Engineering/da-ds/build"
"github.com/LF-Engineering/dev-analytics-libraries/auth0"
"github.com/LF-Engineering/dev-analytics-libraries/slack"
"github.com/LF-Engineering/dev-analytics-libraries/elastic"
timeLib "github.com/LF-Engineering/dev-analytics-libraries/time"
"github.com/LF-Engineering/dev-analytics-libraries/http"
libAffiliations "github.com/LF-Engineering/dev-analytics-libraries/affiliation"
)
// TopHits result
type TopHits struct {
Hits Hits `json:"hits"`
}
// Hits result
type Hits struct {
Hits []NestedHits `json:"hits"`
}
// NestedHits is the actual hit data
type NestedHits struct {
ID string `json:"_id"`
Source HitSource `json:"_source"`
}
// HitSource is the document _source data
type HitSource struct {
ID string `json:"id"`
ChangedAt time.Time `json:"changed_at"`
}
// Manager describes piper mail manager
type Manager struct {
Endpoint string
Slug string
GroupName string
SHConnString string
FetcherBackendVersion string
EnricherBackendVersion string
Fetch bool
Enrich bool
ESUrl string
ESUsername string
ESPassword string
ESIndex string
FromDate *time.Time
HTTPTimeout time.Duration
Project string
FetchSize int
EnrichSize int
AffBaseURL string
ESCacheURL string
ESCacheUsername string
ESCachePassword string
AuthGrantType string
AuthClientID string
AuthClientSecret string
AuthAudience string
Auth0URL string
Environment string
WebHookURL string
MaxWorkers int
NumberOfRawMessages int
esClientProvider ESClientProvider
fetcher *Fetcher
enricher *Enricher
workerPool *workerPool
}
// workerPool ...
type workerPool struct {
MaxWorker int
queuedTaskC chan func()
}
// result worker pool result struct
type result struct {
id int
enrichedItem *EnrichedMessage
}
// NewManager initiates piper mail manager instance
func NewManager(endPoint, slug, shConnStr, fetcherBackendVersion, enricherBackendVersion string, fetch bool, enrich bool, eSUrl string, esUser string, esPassword string, esIndex string, fromDate *time.Time, project string, fetchSize int, enrichSize int, affBaseURL, esCacheURL, esCacheUsername, esCachePassword, authGrantType, authClientID, authClientSecret, authAudience, auth0URL, env, webHookURL string) (*Manager, error) {
mng := &Manager{
Endpoint: endPoint,
Slug: slug,
SHConnString: shConnStr,
FetcherBackendVersion: fetcherBackendVersion,
EnricherBackendVersion: enricherBackendVersion,
Fetch: fetch,
Enrich: enrich,
ESUrl: eSUrl,
ESUsername: esUser,
ESPassword: esPassword,
ESIndex: esIndex,
FromDate: fromDate,
HTTPTimeout: 60 * time.Second,
Project: project,
FetchSize: fetchSize,
EnrichSize: enrichSize,
AffBaseURL: affBaseURL,
ESCacheURL: esCacheURL,
ESCacheUsername: esCacheUsername,
ESCachePassword: esCachePassword,
AuthGrantType: authGrantType,
AuthClientID: authClientID,
AuthClientSecret: authClientSecret,
AuthAudience: authAudience,
Auth0URL: auth0URL,
Environment: env,
esClientProvider: nil,
fetcher: nil,
enricher: nil,
WebHookURL: webHookURL,
MaxWorkers: 1000,
}
fetcher, enricher, esClientProvider, err := buildServices(mng)
if err != nil {
return nil, err
}
groupName, err := getGroupName(endPoint)
if err != nil {
return nil, err
}
mng.fetcher = fetcher
mng.enricher = enricher
mng.esClientProvider = esClientProvider
mng.GroupName = groupName
mng.workerPool = &workerPool{
MaxWorker: MaxConcurrentRequests,
queuedTaskC: make(chan func()),
}
return mng, nil
}
// Sync runs piper mail fetch and enrich according to passed parameters
func (m *Manager) | () error {
lastActionCachePostfix := "-last-action-date-cache"
status := make(map[string]bool)
status["doneFetch"] = !m.Fetch
status["doneEnrich"] = !m.Enrich
fetchCh := m.fetch(m.fetcher, lastActionCachePostfix)
var err error
if status["doneFetch"] == false {
err = <-fetchCh
if err == nil {
status["doneFetch"] = true
}
time.Sleep(5 * time.Second)
}
if status["doneEnrich"] == false {
err = <-m.enrich(m.enricher, lastActionCachePostfix)
if err == nil {
status["doneEnrich"] = true
}
time.Sleep(5 * time.Second)
}
return nil
}
func (m *Manager) fetch(fetcher *Fetcher, lastActionCachePostfix string) <-chan error {
ch := make(chan error)
go func() {
fetchID := "fetch"
query := map[string]interface{}{
"query": map[string]interface{}{
"term": map[string]interface{}{
"id": map[string]string{
"value": fetchID},
},
},
}
val := &TopHits{}
err := m.esClientProvider.Get(fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), query, val)
now := time.Now().UTC()
var lastFetch *time.Time
if err == nil && len(val.Hits.Hits) > 0 {
lastFetch = &val.Hits.Hits[0].Source.ChangedAt
}
fromDate := m.FromDate
if fromDate == nil {
fromDate = &DefaultDateTime
}
from := timeLib.GetOldestDate(fromDate, lastFetch)
data := make([]elastic.BulkData, 0)
raw, err := fetcher.FetchItem(m.Slug, m.GroupName, m.Endpoint, *from, m.FetchSize, now)
if err != nil {
ch <- err
return
}
result := len(raw)
if result != 0 {
from = &raw[len(raw)-1].ChangedAt
}
for _, message := range raw {
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s-raw", m.ESIndex), ID: message.UUID, Data: message})
}
// set mapping and create index if not exists
_, err = m.esClientProvider.CreateIndex(fmt.Sprintf("%s-raw", m.ESIndex), PipermailRawMapping)
if err != nil {
ch <- err
return
}
if len(data) > 0 {
// Update changed at in elastic cache index
cacheDoc, _ := data[len(data)-1].Data.(*RawMessage)
updateChan := HitSource{ID: fetchID, ChangedAt: cacheDoc.ChangedAt}
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), ID: fetchID, Data: updateChan})
// Insert raw data to elasticsearch
sizeOfData := len(data)
limit := 1000
if m.EnrichSize <= 1000 {
limit = m.EnrichSize
}
lastIndex := 0
remainingItemsLength := 0
log.Println("LEN RAW DATA : ", len(data))
// rate limit items to push to es to avoid the 413 error
if len(data) > m.EnrichSize {
for lastIndex < sizeOfData {
if lastIndex == 0 && limit <= len(data) {
_, err = m.esClientProvider.BulkInsert(data[:limit])
if err != nil {
ch <- err
return
}
lastIndex = limit
continue
}
if lastIndex > 0 && limit <= len(data[lastIndex:]) && remainingItemsLength == 0 {
_, err = m.esClientProvider.BulkInsert(data[lastIndex : lastIndex+limit])
if err != nil {
ch <- err
return
}
if lastIndex+limit < len(data[lastIndex:]) {
lastIndex += limit
} else {
remainingItemsLength = len(data[lastIndex:])
}
} else {
// handle cases where remaining messages are less than the limit
_, err = m.esClientProvider.BulkInsert(data[lastIndex:])
if err != nil {
ch <- err
return
}
// invalidate loop
lastIndex = sizeOfData + 1
}
}
}
// handle data for small docs
// es bulk upload limit is 1000
if m.EnrichSize >= sizeOfData {
if sizeOfData <= 1000 {
_, err = m.esClientProvider.BulkInsert(data)
if err != nil {
ch <- err
return
}
}
}
m.NumberOfRawMessages = sizeOfData
log.Println("DONE WITH RAW ENRICHMENT")
}
ch <- nil
}()
return ch
}
func (m *Manager) enrich(enricher *Enricher, lastActionCachePostfix string) <-chan error {
ch := make(chan error)
resultC := make(chan result, 0)
jobs := make(chan *RawMessage, m.NumberOfRawMessages)
go func() {
enrichID := "enrich"
query := map[string]interface{}{
"query": map[string]interface{}{
"term": map[string]interface{}{
"id": map[string]string{
"value": enrichID},
},
},
}
val := &TopHits{}
err := m.esClientProvider.Get(fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), query, val)
query = map[string]interface{}{
"size": 10000,
"query": map[string]interface{}{
"bool": map[string]interface{}{
"must": []map[string]interface{}{},
},
},
"sort": []map[string]interface{}{
{
"metadata__updated_on": map[string]string{
"order": "desc",
},
},
},
}
var lastEnrich time.Time
if err == nil && len(val.Hits.Hits) > 0 {
lastEnrich = val.Hits.Hits[0].Source.ChangedAt
}
from := timeLib.GetOldestDate(m.FromDate, &lastEnrich)
log.Println("From: ", from)
conditions := map[string]interface{}{
"range": map[string]interface{}{
"metadata__updated_on": map[string]interface{}{
"gte": (from).Format(time.RFC3339),
},
},
}
query["query"].(map[string]interface{})["bool"].(map[string]interface{})["must"] = conditions
results := m.EnrichSize
offset := 0
query["size"] = m.EnrichSize
for results == m.EnrichSize {
// make pagination to get the specified size of documents with offset
query["from"] = offset
bites, err := m.fetcher.ElasticSearchProvider.Search(fmt.Sprintf("%s-raw", m.ESIndex), query)
if err != nil {
ch <- nil
return
}
var topHits *RawHits
err = json.Unmarshal(bites, &topHits)
if err != nil {
ch <- nil
return
}
data := make([]elastic.BulkData, 0)
for w := 1; w <= m.MaxWorkers; w++ {
go m.enrichWorker(w, jobs, resultC)
}
for _, hit := range topHits.Hits.Hits {
nHitSource := hit.Source
if lastEnrich.Before(hit.Source.ChangedAt) {
jobs <- &nHitSource
}
}
close(jobs)
for a := 1; a <= len(topHits.Hits.Hits); a++ {
res := <-resultC
log.Printf("[main] task %d has been finished with result message id %+v", res.id, res.enrichedItem.MessageID)
data = append(data, elastic.BulkData{IndexName: m.ESIndex, ID: res.enrichedItem.UUID, Data: res.enrichedItem})
}
log.Println("LEN ENRICH DATA : ", len(data))
results = len(data)
// setting mapping and create index if not exists
if offset == 0 {
_, err := m.esClientProvider.CreateIndex(m.ESIndex, PiperRichMapping)
if err != nil {
ch <- err
return
}
}
if len(data) > 0 {
// Update changed at in elastic cache index
cacheDoc, _ := data[len(data)-1].Data.(*EnrichedMessage)
updateChan := HitSource{ID: enrichID, ChangedAt: cacheDoc.ChangedAt}
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), ID: enrichID, Data: updateChan})
// Insert enriched data to elasticsearch
_, err = m.esClientProvider.BulkInsert(data)
if err != nil {
ch <- err
return
}
}
results = len(data)
offset += results
}
log.Println("DONE WITH RICH ENRICHMENT")
ch <- nil
}()
return ch
}
// enrichWorker spins up workers to enrich messages
func (m *Manager) enrichWorker(workerID int, jobs <-chan *RawMessage, results chan<- result) {
for j := range jobs {
log.Printf("worker %+v started job %+v", workerID, j.UUID)
enrichedItem, err := m.enricher.EnrichMessage(j, time.Now().UTC())
// quit app if error isn't nil
if err != nil {
os.Exit(1)
}
time.Sleep(time.Second)
log.Printf("worker %+v finished job %+v", workerID, j.UUID)
results <- result{id: workerID, enrichedItem: enrichedItem}
}
}
// AddTask adds task to worker pool
func (m *Manager) AddTask(task func()) {
m.workerPool.queuedTaskC <- task
}
// run starts the tasks in the worker pool queue
func (m *Manager) run() {
for i := 0; i < m.workerPool.MaxWorker; i++ {
wID := i + 1
//log.Printf("[workerPool] worker %d spawned", wID)
go func(workerID int) {
for task := range m.workerPool.queuedTaskC {
log.Printf("[workerPool] worker %d is processing task", wID)
task()
log.Printf("[workerPool] worker %d has finished processing task", wID)
}
}(wID)
}
}
func buildServices(m *Manager) (*Fetcher, *Enricher, ESClientProvider, error) {
httpClientProvider := http.NewClientProvider(m.HTTPTimeout)
params := &Params{
BackendVersion: m.FetcherBackendVersion,
}
esClientProvider, err := elastic.NewClientProvider(&elastic.Params{
URL: m.ESUrl,
Username: m.ESUsername,
Password: m.ESPassword,
})
if err != nil {
return nil, nil, nil, err
}
esCacheClientProvider, err := elastic.NewClientProvider(&elastic.Params{
URL: m.ESCacheURL,
Username: m.ESCacheUsername,
Password: m.ESCachePassword,
})
// Initialize fetcher object to get data from piper mail archive link
fetcher := NewFetcher(params, httpClientProvider, esClientProvider)
slackProvider := slack.New(m.WebHookURL)
appNameVersion := fmt.Sprintf("%s-%v", build.AppName, strconv.FormatInt(time.Now().Unix(), 10))
auth0Client, err := auth0.NewAuth0Client(
m.Environment,
m.AuthGrantType,
m.AuthClientID,
m.AuthClientSecret,
m.AuthAudience,
m.Auth0URL,
httpClientProvider,
esCacheClientProvider,
&slackProvider,
appNameVersion)
affiliationsClientProvider, err := libAffiliations.NewAffiliationsClient(m.AffBaseURL, m.Slug, httpClientProvider, esCacheClientProvider, auth0Client, &slackProvider)
if err != nil {
return nil, nil, nil, err
}
//Initialize enrich object to enrich raw data
enricher := NewEnricher(m.EnricherBackendVersion, esClientProvider, affiliationsClientProvider)
return fetcher, enricher, esClientProvider, err
}
// getGroupName extracts a pipermail group name from the given mailing list url
func getGroupName(targetURL string) (string, error) {
u, err := url.Parse(targetURL)
if err != nil {
return "", err
}
path := u.Path
if strings.HasPrefix(path, "/") {
path = strings.TrimPrefix(path, "/")
}
if strings.HasSuffix(path, "/") {
path = strings.TrimSuffix(path, "/")
}
path = strings.ReplaceAll(path, "/", "-")
return path, nil
}
| Sync | identifier_name |
manager.go | package pipermail
import (
"encoding/json"
"fmt"
"log"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/LF-Engineering/da-ds/build"
"github.com/LF-Engineering/dev-analytics-libraries/auth0"
"github.com/LF-Engineering/dev-analytics-libraries/slack"
"github.com/LF-Engineering/dev-analytics-libraries/elastic"
timeLib "github.com/LF-Engineering/dev-analytics-libraries/time"
"github.com/LF-Engineering/dev-analytics-libraries/http"
libAffiliations "github.com/LF-Engineering/dev-analytics-libraries/affiliation"
)
// TopHits result
type TopHits struct {
Hits Hits `json:"hits"`
}
// Hits result
type Hits struct {
Hits []NestedHits `json:"hits"`
}
// NestedHits is the actual hit data
type NestedHits struct {
ID string `json:"_id"`
Source HitSource `json:"_source"`
}
// HitSource is the document _source data
type HitSource struct {
ID string `json:"id"`
ChangedAt time.Time `json:"changed_at"`
}
// Manager describes piper mail manager
type Manager struct {
Endpoint string
Slug string
GroupName string
SHConnString string
FetcherBackendVersion string
EnricherBackendVersion string
Fetch bool
Enrich bool
ESUrl string
ESUsername string
ESPassword string
ESIndex string
FromDate *time.Time
HTTPTimeout time.Duration
Project string
FetchSize int
EnrichSize int
AffBaseURL string
ESCacheURL string
ESCacheUsername string
ESCachePassword string
AuthGrantType string
AuthClientID string
AuthClientSecret string
AuthAudience string
Auth0URL string
Environment string
WebHookURL string
MaxWorkers int
NumberOfRawMessages int
esClientProvider ESClientProvider
fetcher *Fetcher
enricher *Enricher
workerPool *workerPool
}
// workerPool ...
type workerPool struct {
MaxWorker int
queuedTaskC chan func()
}
// result worker pool result struct
type result struct {
id int
enrichedItem *EnrichedMessage
}
// NewManager initiates piper mail manager instance
func NewManager(endPoint, slug, shConnStr, fetcherBackendVersion, enricherBackendVersion string, fetch bool, enrich bool, eSUrl string, esUser string, esPassword string, esIndex string, fromDate *time.Time, project string, fetchSize int, enrichSize int, affBaseURL, esCacheURL, esCacheUsername, esCachePassword, authGrantType, authClientID, authClientSecret, authAudience, auth0URL, env, webHookURL string) (*Manager, error) {
mng := &Manager{
Endpoint: endPoint,
Slug: slug,
SHConnString: shConnStr,
FetcherBackendVersion: fetcherBackendVersion,
EnricherBackendVersion: enricherBackendVersion,
Fetch: fetch,
Enrich: enrich,
ESUrl: eSUrl,
ESUsername: esUser,
ESPassword: esPassword,
ESIndex: esIndex,
FromDate: fromDate,
HTTPTimeout: 60 * time.Second,
Project: project,
FetchSize: fetchSize,
EnrichSize: enrichSize,
AffBaseURL: affBaseURL,
ESCacheURL: esCacheURL,
ESCacheUsername: esCacheUsername,
ESCachePassword: esCachePassword,
AuthGrantType: authGrantType,
AuthClientID: authClientID,
AuthClientSecret: authClientSecret,
AuthAudience: authAudience,
Auth0URL: auth0URL,
Environment: env,
esClientProvider: nil,
fetcher: nil,
enricher: nil,
WebHookURL: webHookURL,
MaxWorkers: 1000,
}
fetcher, enricher, esClientProvider, err := buildServices(mng)
if err != nil {
return nil, err
}
groupName, err := getGroupName(endPoint)
if err != nil {
return nil, err
}
mng.fetcher = fetcher
mng.enricher = enricher
mng.esClientProvider = esClientProvider
mng.GroupName = groupName
mng.workerPool = &workerPool{
MaxWorker: MaxConcurrentRequests,
queuedTaskC: make(chan func()),
}
return mng, nil
}
// Sync runs piper mail fetch and enrich according to passed parameters
func (m *Manager) Sync() error {
lastActionCachePostfix := "-last-action-date-cache"
status := make(map[string]bool)
status["doneFetch"] = !m.Fetch
status["doneEnrich"] = !m.Enrich
fetchCh := m.fetch(m.fetcher, lastActionCachePostfix)
var err error
if status["doneFetch"] == false {
err = <-fetchCh
if err == nil {
status["doneFetch"] = true
}
time.Sleep(5 * time.Second)
}
if status["doneEnrich"] == false {
err = <-m.enrich(m.enricher, lastActionCachePostfix)
if err == nil {
status["doneEnrich"] = true
}
time.Sleep(5 * time.Second)
}
return nil
}
func (m *Manager) fetch(fetcher *Fetcher, lastActionCachePostfix string) <-chan error {
ch := make(chan error)
go func() {
fetchID := "fetch"
query := map[string]interface{}{
"query": map[string]interface{}{
"term": map[string]interface{}{
"id": map[string]string{
"value": fetchID},
},
},
}
val := &TopHits{}
err := m.esClientProvider.Get(fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), query, val)
now := time.Now().UTC()
var lastFetch *time.Time
if err == nil && len(val.Hits.Hits) > 0 {
lastFetch = &val.Hits.Hits[0].Source.ChangedAt
}
fromDate := m.FromDate
if fromDate == nil {
fromDate = &DefaultDateTime
}
from := timeLib.GetOldestDate(fromDate, lastFetch)
data := make([]elastic.BulkData, 0)
raw, err := fetcher.FetchItem(m.Slug, m.GroupName, m.Endpoint, *from, m.FetchSize, now)
if err != nil {
ch <- err
return
}
result := len(raw)
if result != 0 |
for _, message := range raw {
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s-raw", m.ESIndex), ID: message.UUID, Data: message})
}
// set mapping and create index if not exists
_, err = m.esClientProvider.CreateIndex(fmt.Sprintf("%s-raw", m.ESIndex), PipermailRawMapping)
if err != nil {
ch <- err
return
}
if len(data) > 0 {
// Update changed at in elastic cache index
cacheDoc, _ := data[len(data)-1].Data.(*RawMessage)
updateChan := HitSource{ID: fetchID, ChangedAt: cacheDoc.ChangedAt}
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), ID: fetchID, Data: updateChan})
// Insert raw data to elasticsearch
sizeOfData := len(data)
limit := 1000
if m.EnrichSize <= 1000 {
limit = m.EnrichSize
}
lastIndex := 0
remainingItemsLength := 0
log.Println("LEN RAW DATA : ", len(data))
// rate limit items to push to es to avoid the 413 error
if len(data) > m.EnrichSize {
for lastIndex < sizeOfData {
if lastIndex == 0 && limit <= len(data) {
_, err = m.esClientProvider.BulkInsert(data[:limit])
if err != nil {
ch <- err
return
}
lastIndex = limit
continue
}
if lastIndex > 0 && limit <= len(data[lastIndex:]) && remainingItemsLength == 0 {
_, err = m.esClientProvider.BulkInsert(data[lastIndex : lastIndex+limit])
if err != nil {
ch <- err
return
}
if lastIndex+limit < len(data[lastIndex:]) {
lastIndex += limit
} else {
remainingItemsLength = len(data[lastIndex:])
}
} else {
// handle cases where remaining messages are less than the limit
_, err = m.esClientProvider.BulkInsert(data[lastIndex:])
if err != nil {
ch <- err
return
}
// invalidate loop
lastIndex = sizeOfData + 1
}
}
}
// handle data for small docs
// es bulk upload limit is 1000
if m.EnrichSize >= sizeOfData {
if sizeOfData <= 1000 {
_, err = m.esClientProvider.BulkInsert(data)
if err != nil {
ch <- err
return
}
}
}
m.NumberOfRawMessages = sizeOfData
log.Println("DONE WITH RAW ENRICHMENT")
}
ch <- nil
}()
return ch
}
func (m *Manager) enrich(enricher *Enricher, lastActionCachePostfix string) <-chan error {
ch := make(chan error)
resultC := make(chan result, 0)
jobs := make(chan *RawMessage, m.NumberOfRawMessages)
go func() {
enrichID := "enrich"
query := map[string]interface{}{
"query": map[string]interface{}{
"term": map[string]interface{}{
"id": map[string]string{
"value": enrichID},
},
},
}
val := &TopHits{}
err := m.esClientProvider.Get(fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), query, val)
query = map[string]interface{}{
"size": 10000,
"query": map[string]interface{}{
"bool": map[string]interface{}{
"must": []map[string]interface{}{},
},
},
"sort": []map[string]interface{}{
{
"metadata__updated_on": map[string]string{
"order": "desc",
},
},
},
}
var lastEnrich time.Time
if err == nil && len(val.Hits.Hits) > 0 {
lastEnrich = val.Hits.Hits[0].Source.ChangedAt
}
from := timeLib.GetOldestDate(m.FromDate, &lastEnrich)
log.Println("From: ", from)
conditions := map[string]interface{}{
"range": map[string]interface{}{
"metadata__updated_on": map[string]interface{}{
"gte": (from).Format(time.RFC3339),
},
},
}
query["query"].(map[string]interface{})["bool"].(map[string]interface{})["must"] = conditions
results := m.EnrichSize
offset := 0
query["size"] = m.EnrichSize
for results == m.EnrichSize {
// make pagination to get the specified size of documents with offset
query["from"] = offset
bites, err := m.fetcher.ElasticSearchProvider.Search(fmt.Sprintf("%s-raw", m.ESIndex), query)
if err != nil {
ch <- nil
return
}
var topHits *RawHits
err = json.Unmarshal(bites, &topHits)
if err != nil {
ch <- nil
return
}
data := make([]elastic.BulkData, 0)
for w := 1; w <= m.MaxWorkers; w++ {
go m.enrichWorker(w, jobs, resultC)
}
for _, hit := range topHits.Hits.Hits {
nHitSource := hit.Source
if lastEnrich.Before(hit.Source.ChangedAt) {
jobs <- &nHitSource
}
}
close(jobs)
for a := 1; a <= len(topHits.Hits.Hits); a++ {
res := <-resultC
log.Printf("[main] task %d has been finished with result message id %+v", res.id, res.enrichedItem.MessageID)
data = append(data, elastic.BulkData{IndexName: m.ESIndex, ID: res.enrichedItem.UUID, Data: res.enrichedItem})
}
log.Println("LEN ENRICH DATA : ", len(data))
results = len(data)
// setting mapping and create index if not exists
if offset == 0 {
_, err := m.esClientProvider.CreateIndex(m.ESIndex, PiperRichMapping)
if err != nil {
ch <- err
return
}
}
if len(data) > 0 {
// Update changed at in elastic cache index
cacheDoc, _ := data[len(data)-1].Data.(*EnrichedMessage)
updateChan := HitSource{ID: enrichID, ChangedAt: cacheDoc.ChangedAt}
data = append(data, elastic.BulkData{IndexName: fmt.Sprintf("%s%s", m.ESIndex, lastActionCachePostfix), ID: enrichID, Data: updateChan})
// Insert enriched data to elasticsearch
_, err = m.esClientProvider.BulkInsert(data)
if err != nil {
ch <- err
return
}
}
results = len(data)
offset += results
}
log.Println("DONE WITH RICH ENRICHMENT")
ch <- nil
}()
return ch
}
// enrichWorker spins up workers to enrich messages
func (m *Manager) enrichWorker(workerID int, jobs <-chan *RawMessage, results chan<- result) {
for j := range jobs {
log.Printf("worker %+v started job %+v", workerID, j.UUID)
enrichedItem, err := m.enricher.EnrichMessage(j, time.Now().UTC())
// quit app if error isn't nil
if err != nil {
os.Exit(1)
}
time.Sleep(time.Second)
log.Printf("worker %+v finished job %+v", workerID, j.UUID)
results <- result{id: workerID, enrichedItem: enrichedItem}
}
}
// AddTask adds task to worker pool
func (m *Manager) AddTask(task func()) {
m.workerPool.queuedTaskC <- task
}
// run starts the tasks in the worker pool queue
func (m *Manager) run() {
for i := 0; i < m.workerPool.MaxWorker; i++ {
wID := i + 1
//log.Printf("[workerPool] worker %d spawned", wID)
go func(workerID int) {
for task := range m.workerPool.queuedTaskC {
log.Printf("[workerPool] worker %d is processing task", wID)
task()
log.Printf("[workerPool] worker %d has finished processing task", wID)
}
}(wID)
}
}
func buildServices(m *Manager) (*Fetcher, *Enricher, ESClientProvider, error) {
httpClientProvider := http.NewClientProvider(m.HTTPTimeout)
params := &Params{
BackendVersion: m.FetcherBackendVersion,
}
esClientProvider, err := elastic.NewClientProvider(&elastic.Params{
URL: m.ESUrl,
Username: m.ESUsername,
Password: m.ESPassword,
})
if err != nil {
return nil, nil, nil, err
}
esCacheClientProvider, err := elastic.NewClientProvider(&elastic.Params{
URL: m.ESCacheURL,
Username: m.ESCacheUsername,
Password: m.ESCachePassword,
})
// Initialize fetcher object to get data from piper mail archive link
fetcher := NewFetcher(params, httpClientProvider, esClientProvider)
slackProvider := slack.New(m.WebHookURL)
appNameVersion := fmt.Sprintf("%s-%v", build.AppName, strconv.FormatInt(time.Now().Unix(), 10))
auth0Client, err := auth0.NewAuth0Client(
m.Environment,
m.AuthGrantType,
m.AuthClientID,
m.AuthClientSecret,
m.AuthAudience,
m.Auth0URL,
httpClientProvider,
esCacheClientProvider,
&slackProvider,
appNameVersion)
affiliationsClientProvider, err := libAffiliations.NewAffiliationsClient(m.AffBaseURL, m.Slug, httpClientProvider, esCacheClientProvider, auth0Client, &slackProvider)
if err != nil {
return nil, nil, nil, err
}
//Initialize enrich object to enrich raw data
enricher := NewEnricher(m.EnricherBackendVersion, esClientProvider, affiliationsClientProvider)
return fetcher, enricher, esClientProvider, err
}
// getGroupName extracts a pipermail group name from the given mailing list url
func getGroupName(targetURL string) (string, error) {
u, err := url.Parse(targetURL)
if err != nil {
return "", err
}
path := u.Path
if strings.HasPrefix(path, "/") {
path = strings.TrimPrefix(path, "/")
}
if strings.HasSuffix(path, "/") {
path = strings.TrimSuffix(path, "/")
}
path = strings.ReplaceAll(path, "/", "-")
return path, nil
}
| {
from = &raw[len(raw)-1].ChangedAt
} | conditional_block |
main.py | from env import CausalEnv
from metrics import *
from enco_model import *
from enco_training import *
from heuristics import *
from policy import MLP, GAT
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Tuple
from collections import defaultdict
import json
from datetime import datetime
from statistics import mean
import os
from utils import track
from causal_graphs.graph_generation import generate_categorical_graph, get_graph_func
from causal_graphs.graph_definition import CausalDAG
def main(args: argparse.Namespace, dag: CausalDAG=None, policy=None):
|
def train(args, env, obs_dataloader, device, policy=None):
# initialize model of the causal structure
model, adj_matrix = init_model(args, device)
# initialize optimizers
model_optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_model, betas=args.betas_model)
gamma_optimizer = AdamGamma(adj_matrix.gamma, lr=args.lr_gamma, beta1=args.betas_gamma[0], beta2=args.betas_gamma[1])
theta_optimizer = AdamTheta(adj_matrix.theta, lr=args.lr_theta, beta1=args.betas_theta[0], beta2=args.betas_theta[1])
obs_data = env.reset(n_samples=args.n_obs_samples)
obs_dataloader = DataLoader(obs_data, batch_size=args.obs_batch_size, shuffle=True, drop_last=True)
int_dists = choose_distribution(args, obs_data)
# initialize CE loss module
loss_module = nn.CrossEntropyLoss()
# initialize Logger
logger = Logger(args)
logger.before_training(adj_matrix, env.dag)
log_probs_lst = []
reward_lst = []
distance = torch.sum(torch.abs(torch.from_numpy(env.dag.adj_matrix).float().to(device) - adj_matrix.edge_probs()))
# causal discovery training loop
for epoch in track(range(args.epochs), leave=False, desc="Epoch loop"):
# fit model to observational data (distribution fitting)
avg_loss = distribution_fitting(args,
model,
loss_module,
model_optimizer,
adj_matrix,
obs_dataloader)
# graph fitting
log_probs, reward = graph_fitting(args,
adj_matrix,
gamma_optimizer,
theta_optimizer,
model,
env,
epoch,
logger,
int_dists,
policy)
distance_new = torch.sum(torch.abs(torch.from_numpy(env.dag.adj_matrix).float().to(device) - adj_matrix.edge_probs()))
reward, distance = reward / 10. + (distance - distance_new), distance_new
log_probs_lst.append(log_probs)
reward_lst.append(reward.detach().item())
# logging
stop = logger.on_epoch_end(adj_matrix, torch.from_numpy(env.dag.adj_matrix), epoch)
# stop early if SHD is 0 for 3 epochs
if stop:
break
return log_probs_lst, reward_lst
def init_model(args: argparse.Namespace, device) -> Tuple[MultivarMLP, AdjacencyMatrix]:
"""Initializes a complete model of the causal structure, consisting of a
multivariable MLP which models the conditional distributions of the causal
variables, and gamma and theta values which define the adjacency matrix
of the causal graph.
Args:
args: Object from the argument parser that defines various settings of
the causal structure and discovery process.
Returns:
model: Multivariable MLP (modelling the conditional distributions).
gamma: Matrix of gamma values (determining edge probabilities).
theta: Matrix of theta values (determining edge directions).
"""
model = create_model(num_vars=args.num_variables,
num_categs=args.max_categories,
hidden_dims=args.hidden_dims,
actfn='leakyrelu')
if args.data_parallel:
device = torch.device("cuda:0")
print("Data parallel activated. Using %i GPUs" % torch.cuda.device_count())
model = nn.DataParallel(model)
else:
device = torch.device("cpu")
adj_matrix = AdjacencyMatrix(args.num_variables, device)
return model, adj_matrix
if __name__ == '__main__':
# collect cmd line args
parser = argparse.ArgumentParser()
# Settings
parser.add_argument('--data_parallel', default=True, type=bool, help='Use parallelization for efficiency')
parser.add_argument('--num_variables', default=25, type=int, help='Number of causal variables')
parser.add_argument('--min_categories', default=10, type=int, help='Minimum number of categories of a causal variable')
parser.add_argument('--max_categories', default=10, type=int, help='Maximum number of categories of a causal variable')
parser.add_argument('--n_obs_samples', default=100000, type=int, help='Number of observational samples from the joint distribution of a synthetic graph')
parser.add_argument('--epochs', default=30, type=int, help='Maximum number of interventions')
parser.add_argument('--graph_structure', type=str, nargs='+', default=['chain'], help='Structure of the true causal graph')
parser.add_argument('--heuristic', type=str, nargs='+', default=['uniform'], help='Heuristic used for choosing intervention nodes')
parser.add_argument('--temperature', default=10.0, type=float, help='Temperature used for sampling the intervention variable')
parser.add_argument('--full_test', default=True, type=bool, help='Full test run for comparison of all heuristics (fixed graphs)')
parser.add_argument('--edge_prob', default=0.4, help='Edge likelihood for generating a graph') # only used for "random" graph structure
parser.add_argument('--num_graphs', default=1, type=int, help='Number of graphs per structure')
parser.add_argument('--existing_dags', dest='existing_dags', action='store_true')
parser.add_argument('--generate_dags', dest='existing_dags', action='store_false')
parser.set_defaults(existing_dags=True)
# Distribution fitting (observational data)
parser.add_argument('--obs_batch_size', default=128, type=int, help='Batch size used for fitting the graph to observational data')
parser.add_argument('--obs_epochs', default=1000, type=int, help='Number of epochs for fitting the causal structure to observational data')
parser.add_argument('--hidden_dims', default=[64], type=list, nargs='+', help='Number of hidden units in each layer of the Multivariable MLP')
# Optimizers
parser.add_argument('--lr_model', default=5e-3, type=float, help='Learning rate for fitting the model to observational data')
parser.add_argument('--betas_model', default=(0.9,0.999), type=tuple, help='Betas used for Adam optimizer (model fitting)')
parser.add_argument('--lr_gamma', default=2e-2, type=float, help='Learning rate for updating gamma parameters')
parser.add_argument('--betas_gamma', default=(0.9,0.9), type=tuple, help='Betas used for Adam optimizer OR momentum used for SGD (gamma update)')
parser.add_argument('--lr_theta', default=1e-1, type=float, help='Learning rate for updating theta parameters')
parser.add_argument('--betas_theta', default=(0.9,0.999), type=tuple, help='Betas used for Adam Theta optimizer (theta update)')
# Graph fitting (interventional data)
parser.add_argument('--int_batch_size', default=128, type=int, help='Number of samples per intervention')
parser.add_argument('--int_epochs', default=100, type=int, help='Number of epochs for updating the graph gamma and theta parameters of the graph')
parser.add_argument('--int_dist', type=str, nargs='+', default=['uniform'], help='Categorical distribution used for sampling intervention values')
parser.add_argument('--lambda_sparse', default=0.004, type=float, help='Threshold for interpreting an edge as beneficial')
parser.add_argument('--K', default=100, help='Number of graph samples for gradient estimation')
parser.add_argument('--temp_int', default=[1], type=float, nargs='+', help='Temperature used for distribution of intervention values')
# Reinforcement Learning
parser.add_argument('--max_episodes', default=10000, type=int, help='Maximum number of episodes')
parser.add_argument('--learn_policy', dest='learn_policy', action='store_true')
parser.add_argument('--beta_baseline', default=0.5, type=float, help='Beta used for exponentialy weighted baseline average')
parser.set_defaults(learn_policy=True)
args: argparse.Namespace = parser.parse_args()
# test runs to compare different heuristics on the same graphs
if args.full_test:
dags = defaultdict(list)
argparse_dict = vars(args)
with open(datetime.today().strftime('tb_logs/%Y-%m-%d-%H-%M-hparams.json'), 'w') as fp:
json.dump(argparse_dict, fp)
for structure in args.graph_structure:
for i in range(args.num_graphs):
dag = generate_categorical_graph(num_vars=args.num_variables,
min_categs=args.min_categories,
max_categs=args.max_categories,
connected=True,
graph_func=get_graph_func(structure),
edge_prob=args.edge_prob,
use_nn=True)
if args.existing_dags:
for root, dirs, files in os.walk('dags'):
if structure not in root:
continue
if f'dag-{i}' not in root:
continue
for file in files:
if 'dag.pt' in file:
path = os.path.join(root, file)
break
else:
continue
break
dag = dag.load_from_file(path)
dags[structure].append(dag)
# for heuristic in args.heuristic:
for int_dist in args.int_dist:
if int_dist == 'uniform':
temp_int = [1]
else:
temp_int = args.temp_int
for temperature in temp_int:
for i, dag in enumerate(dags[structure]):
args.log_graph_structure = structure + "-dag-" + str(i) # for logging
args.log_heuristic = 'mlp-policy' # for logging
args.log_temp_int = temperature
args.log_int_dist = int_dist
main(args, dag) | """Executes a causal discovery algorithm on synthetic data from a sampled
DAG, using a specified heuristic for choosing intervention variables.
Args:
args: Object from the argument parser that defines various settings of
the causal structure and discovery process.
"""
# initialize the environment: create a graph and generate observational
# samples from the joint distribution of the graph variables
env = CausalEnv(num_vars=args.num_variables,
min_categs=args.min_categories,
max_categs=args.max_categories,
graph_structure=args.graph_structure,
edge_prob=args.edge_prob,
dag=dag)
obs_data = env.reset(n_samples=args.n_obs_samples)
obs_dataloader = DataLoader(obs_data, batch_size=args.obs_batch_size, shuffle=True, drop_last=True)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
# initialize policy learning
if args.learn_policy:
policy = MLP(args.num_variables, [512, 256, 128]).float()
policy = policy.to(device)
policy_optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
rewards_lst = []
for t in range(args.max_episodes):
policy_optimizer.zero_grad()
log_probs, reward = train(args, env, obs_dataloader, device, policy)
reward += [0] * (args.epochs - len(reward))
rewards_lst.append(reward)
baseline = args.beta_baseline * torch.Tensor(reward) + (1 - args.beta_baseline) * baseline if t != 0 else torch.Tensor(reward)
policy_loss = -torch.sum((torch.Tensor(reward[:len(log_probs)]) - baseline[:len(log_probs)]) * torch.cumsum(torch.tensor(log_probs, requires_grad=True), dim=0))
policy_loss.backward()
policy_optimizer.step()
print(torch.sum(torch.Tensor(reward)))
print(torch.mean(torch.sum(torch.tensor(rewards_lst), dim=-1)))
if torch.sum(torch.Tensor(reward)) >= max(torch.sum(torch.tensor(rewards_lst), dim=-1)):
print('\nSaving policy...')
torch.save(policy.state_dict(), 'policy_mlp.pth')
else:
train(args, env, obs_dataloader, device, policy) | identifier_body |
main.py | from env import CausalEnv
from metrics import *
from enco_model import *
from enco_training import *
from heuristics import *
from policy import MLP, GAT
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Tuple
from collections import defaultdict
import json
from datetime import datetime
from statistics import mean
import os
from utils import track
from causal_graphs.graph_generation import generate_categorical_graph, get_graph_func
from causal_graphs.graph_definition import CausalDAG
def main(args: argparse.Namespace, dag: CausalDAG=None, policy=None):
"""Executes a causal discovery algorithm on synthetic data from a sampled
DAG, using a specified heuristic for choosing intervention variables.
Args:
args: Object from the argument parser that defines various settings of
the causal structure and discovery process.
"""
# initialize the environment: create a graph and generate observational
# samples from the joint distribution of the graph variables
env = CausalEnv(num_vars=args.num_variables,
min_categs=args.min_categories,
max_categs=args.max_categories,
graph_structure=args.graph_structure,
edge_prob=args.edge_prob,
dag=dag)
obs_data = env.reset(n_samples=args.n_obs_samples)
obs_dataloader = DataLoader(obs_data, batch_size=args.obs_batch_size, shuffle=True, drop_last=True)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
# initialize policy learning
if args.learn_policy:
policy = MLP(args.num_variables, [512, 256, 128]).float()
policy = policy.to(device)
policy_optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
rewards_lst = []
for t in range(args.max_episodes):
policy_optimizer.zero_grad()
log_probs, reward = train(args, env, obs_dataloader, device, policy)
reward += [0] * (args.epochs - len(reward))
rewards_lst.append(reward)
baseline = args.beta_baseline * torch.Tensor(reward) + (1 - args.beta_baseline) * baseline if t != 0 else torch.Tensor(reward)
policy_loss = -torch.sum((torch.Tensor(reward[:len(log_probs)]) - baseline[:len(log_probs)]) * torch.cumsum(torch.tensor(log_probs, requires_grad=True), dim=0))
policy_loss.backward()
policy_optimizer.step()
print(torch.sum(torch.Tensor(reward)))
print(torch.mean(torch.sum(torch.tensor(rewards_lst), dim=-1)))
if torch.sum(torch.Tensor(reward)) >= max(torch.sum(torch.tensor(rewards_lst), dim=-1)):
print('\nSaving policy...')
torch.save(policy.state_dict(), 'policy_mlp.pth')
else:
train(args, env, obs_dataloader, device, policy)
def train(args, env, obs_dataloader, device, policy=None):
# initialize model of the causal structure
model, adj_matrix = init_model(args, device)
# initialize optimizers
model_optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_model, betas=args.betas_model)
gamma_optimizer = AdamGamma(adj_matrix.gamma, lr=args.lr_gamma, beta1=args.betas_gamma[0], beta2=args.betas_gamma[1])
theta_optimizer = AdamTheta(adj_matrix.theta, lr=args.lr_theta, beta1=args.betas_theta[0], beta2=args.betas_theta[1])
obs_data = env.reset(n_samples=args.n_obs_samples)
obs_dataloader = DataLoader(obs_data, batch_size=args.obs_batch_size, shuffle=True, drop_last=True)
int_dists = choose_distribution(args, obs_data)
# initialize CE loss module
loss_module = nn.CrossEntropyLoss()
# initialize Logger
logger = Logger(args)
logger.before_training(adj_matrix, env.dag)
log_probs_lst = []
reward_lst = []
distance = torch.sum(torch.abs(torch.from_numpy(env.dag.adj_matrix).float().to(device) - adj_matrix.edge_probs()))
# causal discovery training loop
for epoch in track(range(args.epochs), leave=False, desc="Epoch loop"):
# fit model to observational data (distribution fitting)
avg_loss = distribution_fitting(args,
model,
loss_module,
model_optimizer,
adj_matrix,
obs_dataloader)
# graph fitting
log_probs, reward = graph_fitting(args,
adj_matrix,
gamma_optimizer,
theta_optimizer,
model,
env,
epoch,
logger,
int_dists,
policy)
distance_new = torch.sum(torch.abs(torch.from_numpy(env.dag.adj_matrix).float().to(device) - adj_matrix.edge_probs()))
reward, distance = reward / 10. + (distance - distance_new), distance_new
log_probs_lst.append(log_probs)
reward_lst.append(reward.detach().item())
# logging
stop = logger.on_epoch_end(adj_matrix, torch.from_numpy(env.dag.adj_matrix), epoch)
# stop early if SHD is 0 for 3 epochs
if stop:
break
|
def init_model(args: argparse.Namespace, device) -> Tuple[MultivarMLP, AdjacencyMatrix]:
"""Initializes a complete model of the causal structure, consisting of a
multivariable MLP which models the conditional distributions of the causal
variables, and gamma and theta values which define the adjacency matrix
of the causal graph.
Args:
args: Object from the argument parser that defines various settings of
the causal structure and discovery process.
Returns:
model: Multivariable MLP (modelling the conditional distributions).
gamma: Matrix of gamma values (determining edge probabilities).
theta: Matrix of theta values (determining edge directions).
"""
model = create_model(num_vars=args.num_variables,
num_categs=args.max_categories,
hidden_dims=args.hidden_dims,
actfn='leakyrelu')
if args.data_parallel:
device = torch.device("cuda:0")
print("Data parallel activated. Using %i GPUs" % torch.cuda.device_count())
model = nn.DataParallel(model)
else:
device = torch.device("cpu")
adj_matrix = AdjacencyMatrix(args.num_variables, device)
return model, adj_matrix
if __name__ == '__main__':
# collect cmd line args
parser = argparse.ArgumentParser()
# Settings
parser.add_argument('--data_parallel', default=True, type=bool, help='Use parallelization for efficiency')
parser.add_argument('--num_variables', default=25, type=int, help='Number of causal variables')
parser.add_argument('--min_categories', default=10, type=int, help='Minimum number of categories of a causal variable')
parser.add_argument('--max_categories', default=10, type=int, help='Maximum number of categories of a causal variable')
parser.add_argument('--n_obs_samples', default=100000, type=int, help='Number of observational samples from the joint distribution of a synthetic graph')
parser.add_argument('--epochs', default=30, type=int, help='Maximum number of interventions')
parser.add_argument('--graph_structure', type=str, nargs='+', default=['chain'], help='Structure of the true causal graph')
parser.add_argument('--heuristic', type=str, nargs='+', default=['uniform'], help='Heuristic used for choosing intervention nodes')
parser.add_argument('--temperature', default=10.0, type=float, help='Temperature used for sampling the intervention variable')
parser.add_argument('--full_test', default=True, type=bool, help='Full test run for comparison of all heuristics (fixed graphs)')
parser.add_argument('--edge_prob', default=0.4, help='Edge likelihood for generating a graph') # only used for "random" graph structure
parser.add_argument('--num_graphs', default=1, type=int, help='Number of graphs per structure')
parser.add_argument('--existing_dags', dest='existing_dags', action='store_true')
parser.add_argument('--generate_dags', dest='existing_dags', action='store_false')
parser.set_defaults(existing_dags=True)
# Distribution fitting (observational data)
parser.add_argument('--obs_batch_size', default=128, type=int, help='Batch size used for fitting the graph to observational data')
parser.add_argument('--obs_epochs', default=1000, type=int, help='Number of epochs for fitting the causal structure to observational data')
parser.add_argument('--hidden_dims', default=[64], type=list, nargs='+', help='Number of hidden units in each layer of the Multivariable MLP')
# Optimizers
parser.add_argument('--lr_model', default=5e-3, type=float, help='Learning rate for fitting the model to observational data')
parser.add_argument('--betas_model', default=(0.9,0.999), type=tuple, help='Betas used for Adam optimizer (model fitting)')
parser.add_argument('--lr_gamma', default=2e-2, type=float, help='Learning rate for updating gamma parameters')
parser.add_argument('--betas_gamma', default=(0.9,0.9), type=tuple, help='Betas used for Adam optimizer OR momentum used for SGD (gamma update)')
parser.add_argument('--lr_theta', default=1e-1, type=float, help='Learning rate for updating theta parameters')
parser.add_argument('--betas_theta', default=(0.9,0.999), type=tuple, help='Betas used for Adam Theta optimizer (theta update)')
# Graph fitting (interventional data)
parser.add_argument('--int_batch_size', default=128, type=int, help='Number of samples per intervention')
parser.add_argument('--int_epochs', default=100, type=int, help='Number of epochs for updating the graph gamma and theta parameters of the graph')
parser.add_argument('--int_dist', type=str, nargs='+', default=['uniform'], help='Categorical distribution used for sampling intervention values')
parser.add_argument('--lambda_sparse', default=0.004, type=float, help='Threshold for interpreting an edge as beneficial')
parser.add_argument('--K', default=100, help='Number of graph samples for gradient estimation')
parser.add_argument('--temp_int', default=[1], type=float, nargs='+', help='Temperature used for distribution of intervention values')
# Reinforcement Learning
parser.add_argument('--max_episodes', default=10000, type=int, help='Maximum number of episodes')
parser.add_argument('--learn_policy', dest='learn_policy', action='store_true')
parser.add_argument('--beta_baseline', default=0.5, type=float, help='Beta used for exponentialy weighted baseline average')
parser.set_defaults(learn_policy=True)
args: argparse.Namespace = parser.parse_args()
# test runs to compare different heuristics on the same graphs
if args.full_test:
dags = defaultdict(list)
argparse_dict = vars(args)
with open(datetime.today().strftime('tb_logs/%Y-%m-%d-%H-%M-hparams.json'), 'w') as fp:
json.dump(argparse_dict, fp)
for structure in args.graph_structure:
for i in range(args.num_graphs):
dag = generate_categorical_graph(num_vars=args.num_variables,
min_categs=args.min_categories,
max_categs=args.max_categories,
connected=True,
graph_func=get_graph_func(structure),
edge_prob=args.edge_prob,
use_nn=True)
if args.existing_dags:
for root, dirs, files in os.walk('dags'):
if structure not in root:
continue
if f'dag-{i}' not in root:
continue
for file in files:
if 'dag.pt' in file:
path = os.path.join(root, file)
break
else:
continue
break
dag = dag.load_from_file(path)
dags[structure].append(dag)
# for heuristic in args.heuristic:
for int_dist in args.int_dist:
if int_dist == 'uniform':
temp_int = [1]
else:
temp_int = args.temp_int
for temperature in temp_int:
for i, dag in enumerate(dags[structure]):
args.log_graph_structure = structure + "-dag-" + str(i) # for logging
args.log_heuristic = 'mlp-policy' # for logging
args.log_temp_int = temperature
args.log_int_dist = int_dist
main(args, dag) | return log_probs_lst, reward_lst
| random_line_split |
main.py | from env import CausalEnv
from metrics import *
from enco_model import *
from enco_training import *
from heuristics import *
from policy import MLP, GAT
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Tuple
from collections import defaultdict
import json
from datetime import datetime
from statistics import mean
import os
from utils import track
from causal_graphs.graph_generation import generate_categorical_graph, get_graph_func
from causal_graphs.graph_definition import CausalDAG
def main(args: argparse.Namespace, dag: CausalDAG=None, policy=None):
"""Executes a causal discovery algorithm on synthetic data from a sampled
DAG, using a specified heuristic for choosing intervention variables.
Args:
args: Object from the argument parser that defines various settings of
the causal structure and discovery process.
"""
# initialize the environment: create a graph and generate observational
# samples from the joint distribution of the graph variables
env = CausalEnv(num_vars=args.num_variables,
min_categs=args.min_categories,
max_categs=args.max_categories,
graph_structure=args.graph_structure,
edge_prob=args.edge_prob,
dag=dag)
obs_data = env.reset(n_samples=args.n_obs_samples)
obs_dataloader = DataLoader(obs_data, batch_size=args.obs_batch_size, shuffle=True, drop_last=True)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
# initialize policy learning
if args.learn_policy:
policy = MLP(args.num_variables, [512, 256, 128]).float()
policy = policy.to(device)
policy_optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
rewards_lst = []
for t in range(args.max_episodes):
policy_optimizer.zero_grad()
log_probs, reward = train(args, env, obs_dataloader, device, policy)
reward += [0] * (args.epochs - len(reward))
rewards_lst.append(reward)
baseline = args.beta_baseline * torch.Tensor(reward) + (1 - args.beta_baseline) * baseline if t != 0 else torch.Tensor(reward)
policy_loss = -torch.sum((torch.Tensor(reward[:len(log_probs)]) - baseline[:len(log_probs)]) * torch.cumsum(torch.tensor(log_probs, requires_grad=True), dim=0))
policy_loss.backward()
policy_optimizer.step()
print(torch.sum(torch.Tensor(reward)))
print(torch.mean(torch.sum(torch.tensor(rewards_lst), dim=-1)))
if torch.sum(torch.Tensor(reward)) >= max(torch.sum(torch.tensor(rewards_lst), dim=-1)):
print('\nSaving policy...')
torch.save(policy.state_dict(), 'policy_mlp.pth')
else:
|
def train(args, env, obs_dataloader, device, policy=None):
# initialize model of the causal structure
model, adj_matrix = init_model(args, device)
# initialize optimizers
model_optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_model, betas=args.betas_model)
gamma_optimizer = AdamGamma(adj_matrix.gamma, lr=args.lr_gamma, beta1=args.betas_gamma[0], beta2=args.betas_gamma[1])
theta_optimizer = AdamTheta(adj_matrix.theta, lr=args.lr_theta, beta1=args.betas_theta[0], beta2=args.betas_theta[1])
obs_data = env.reset(n_samples=args.n_obs_samples)
obs_dataloader = DataLoader(obs_data, batch_size=args.obs_batch_size, shuffle=True, drop_last=True)
int_dists = choose_distribution(args, obs_data)
# initialize CE loss module
loss_module = nn.CrossEntropyLoss()
# initialize Logger
logger = Logger(args)
logger.before_training(adj_matrix, env.dag)
log_probs_lst = []
reward_lst = []
distance = torch.sum(torch.abs(torch.from_numpy(env.dag.adj_matrix).float().to(device) - adj_matrix.edge_probs()))
# causal discovery training loop
for epoch in track(range(args.epochs), leave=False, desc="Epoch loop"):
# fit model to observational data (distribution fitting)
avg_loss = distribution_fitting(args,
model,
loss_module,
model_optimizer,
adj_matrix,
obs_dataloader)
# graph fitting
log_probs, reward = graph_fitting(args,
adj_matrix,
gamma_optimizer,
theta_optimizer,
model,
env,
epoch,
logger,
int_dists,
policy)
distance_new = torch.sum(torch.abs(torch.from_numpy(env.dag.adj_matrix).float().to(device) - adj_matrix.edge_probs()))
reward, distance = reward / 10. + (distance - distance_new), distance_new
log_probs_lst.append(log_probs)
reward_lst.append(reward.detach().item())
# logging
stop = logger.on_epoch_end(adj_matrix, torch.from_numpy(env.dag.adj_matrix), epoch)
# stop early if SHD is 0 for 3 epochs
if stop:
break
return log_probs_lst, reward_lst
def init_model(args: argparse.Namespace, device) -> Tuple[MultivarMLP, AdjacencyMatrix]:
"""Initializes a complete model of the causal structure, consisting of a
multivariable MLP which models the conditional distributions of the causal
variables, and gamma and theta values which define the adjacency matrix
of the causal graph.
Args:
args: Object from the argument parser that defines various settings of
the causal structure and discovery process.
Returns:
model: Multivariable MLP (modelling the conditional distributions).
gamma: Matrix of gamma values (determining edge probabilities).
theta: Matrix of theta values (determining edge directions).
"""
model = create_model(num_vars=args.num_variables,
num_categs=args.max_categories,
hidden_dims=args.hidden_dims,
actfn='leakyrelu')
if args.data_parallel:
device = torch.device("cuda:0")
print("Data parallel activated. Using %i GPUs" % torch.cuda.device_count())
model = nn.DataParallel(model)
else:
device = torch.device("cpu")
adj_matrix = AdjacencyMatrix(args.num_variables, device)
return model, adj_matrix
if __name__ == '__main__':
# collect cmd line args
parser = argparse.ArgumentParser()
# Settings
parser.add_argument('--data_parallel', default=True, type=bool, help='Use parallelization for efficiency')
parser.add_argument('--num_variables', default=25, type=int, help='Number of causal variables')
parser.add_argument('--min_categories', default=10, type=int, help='Minimum number of categories of a causal variable')
parser.add_argument('--max_categories', default=10, type=int, help='Maximum number of categories of a causal variable')
parser.add_argument('--n_obs_samples', default=100000, type=int, help='Number of observational samples from the joint distribution of a synthetic graph')
parser.add_argument('--epochs', default=30, type=int, help='Maximum number of interventions')
parser.add_argument('--graph_structure', type=str, nargs='+', default=['chain'], help='Structure of the true causal graph')
parser.add_argument('--heuristic', type=str, nargs='+', default=['uniform'], help='Heuristic used for choosing intervention nodes')
parser.add_argument('--temperature', default=10.0, type=float, help='Temperature used for sampling the intervention variable')
parser.add_argument('--full_test', default=True, type=bool, help='Full test run for comparison of all heuristics (fixed graphs)')
parser.add_argument('--edge_prob', default=0.4, help='Edge likelihood for generating a graph') # only used for "random" graph structure
parser.add_argument('--num_graphs', default=1, type=int, help='Number of graphs per structure')
parser.add_argument('--existing_dags', dest='existing_dags', action='store_true')
parser.add_argument('--generate_dags', dest='existing_dags', action='store_false')
parser.set_defaults(existing_dags=True)
# Distribution fitting (observational data)
parser.add_argument('--obs_batch_size', default=128, type=int, help='Batch size used for fitting the graph to observational data')
parser.add_argument('--obs_epochs', default=1000, type=int, help='Number of epochs for fitting the causal structure to observational data')
parser.add_argument('--hidden_dims', default=[64], type=list, nargs='+', help='Number of hidden units in each layer of the Multivariable MLP')
# Optimizers
parser.add_argument('--lr_model', default=5e-3, type=float, help='Learning rate for fitting the model to observational data')
parser.add_argument('--betas_model', default=(0.9,0.999), type=tuple, help='Betas used for Adam optimizer (model fitting)')
parser.add_argument('--lr_gamma', default=2e-2, type=float, help='Learning rate for updating gamma parameters')
parser.add_argument('--betas_gamma', default=(0.9,0.9), type=tuple, help='Betas used for Adam optimizer OR momentum used for SGD (gamma update)')
parser.add_argument('--lr_theta', default=1e-1, type=float, help='Learning rate for updating theta parameters')
parser.add_argument('--betas_theta', default=(0.9,0.999), type=tuple, help='Betas used for Adam Theta optimizer (theta update)')
# Graph fitting (interventional data)
parser.add_argument('--int_batch_size', default=128, type=int, help='Number of samples per intervention')
parser.add_argument('--int_epochs', default=100, type=int, help='Number of epochs for updating the graph gamma and theta parameters of the graph')
parser.add_argument('--int_dist', type=str, nargs='+', default=['uniform'], help='Categorical distribution used for sampling intervention values')
parser.add_argument('--lambda_sparse', default=0.004, type=float, help='Threshold for interpreting an edge as beneficial')
parser.add_argument('--K', default=100, help='Number of graph samples for gradient estimation')
parser.add_argument('--temp_int', default=[1], type=float, nargs='+', help='Temperature used for distribution of intervention values')
# Reinforcement Learning
parser.add_argument('--max_episodes', default=10000, type=int, help='Maximum number of episodes')
parser.add_argument('--learn_policy', dest='learn_policy', action='store_true')
parser.add_argument('--beta_baseline', default=0.5, type=float, help='Beta used for exponentialy weighted baseline average')
parser.set_defaults(learn_policy=True)
args: argparse.Namespace = parser.parse_args()
# test runs to compare different heuristics on the same graphs
if args.full_test:
dags = defaultdict(list)
argparse_dict = vars(args)
with open(datetime.today().strftime('tb_logs/%Y-%m-%d-%H-%M-hparams.json'), 'w') as fp:
json.dump(argparse_dict, fp)
for structure in args.graph_structure:
for i in range(args.num_graphs):
dag = generate_categorical_graph(num_vars=args.num_variables,
min_categs=args.min_categories,
max_categs=args.max_categories,
connected=True,
graph_func=get_graph_func(structure),
edge_prob=args.edge_prob,
use_nn=True)
if args.existing_dags:
for root, dirs, files in os.walk('dags'):
if structure not in root:
continue
if f'dag-{i}' not in root:
continue
for file in files:
if 'dag.pt' in file:
path = os.path.join(root, file)
break
else:
continue
break
dag = dag.load_from_file(path)
dags[structure].append(dag)
# for heuristic in args.heuristic:
for int_dist in args.int_dist:
if int_dist == 'uniform':
temp_int = [1]
else:
temp_int = args.temp_int
for temperature in temp_int:
for i, dag in enumerate(dags[structure]):
args.log_graph_structure = structure + "-dag-" + str(i) # for logging
args.log_heuristic = 'mlp-policy' # for logging
args.log_temp_int = temperature
args.log_int_dist = int_dist
main(args, dag) | train(args, env, obs_dataloader, device, policy) | conditional_block |
main.py | from env import CausalEnv
from metrics import *
from enco_model import *
from enco_training import *
from heuristics import *
from policy import MLP, GAT
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Tuple
from collections import defaultdict
import json
from datetime import datetime
from statistics import mean
import os
from utils import track
from causal_graphs.graph_generation import generate_categorical_graph, get_graph_func
from causal_graphs.graph_definition import CausalDAG
def | (args: argparse.Namespace, dag: CausalDAG=None, policy=None):
"""Executes a causal discovery algorithm on synthetic data from a sampled
DAG, using a specified heuristic for choosing intervention variables.
Args:
args: Object from the argument parser that defines various settings of
the causal structure and discovery process.
"""
# initialize the environment: create a graph and generate observational
# samples from the joint distribution of the graph variables
env = CausalEnv(num_vars=args.num_variables,
min_categs=args.min_categories,
max_categs=args.max_categories,
graph_structure=args.graph_structure,
edge_prob=args.edge_prob,
dag=dag)
obs_data = env.reset(n_samples=args.n_obs_samples)
obs_dataloader = DataLoader(obs_data, batch_size=args.obs_batch_size, shuffle=True, drop_last=True)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
# initialize policy learning
if args.learn_policy:
policy = MLP(args.num_variables, [512, 256, 128]).float()
policy = policy.to(device)
policy_optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
rewards_lst = []
for t in range(args.max_episodes):
policy_optimizer.zero_grad()
log_probs, reward = train(args, env, obs_dataloader, device, policy)
reward += [0] * (args.epochs - len(reward))
rewards_lst.append(reward)
baseline = args.beta_baseline * torch.Tensor(reward) + (1 - args.beta_baseline) * baseline if t != 0 else torch.Tensor(reward)
policy_loss = -torch.sum((torch.Tensor(reward[:len(log_probs)]) - baseline[:len(log_probs)]) * torch.cumsum(torch.tensor(log_probs, requires_grad=True), dim=0))
policy_loss.backward()
policy_optimizer.step()
print(torch.sum(torch.Tensor(reward)))
print(torch.mean(torch.sum(torch.tensor(rewards_lst), dim=-1)))
if torch.sum(torch.Tensor(reward)) >= max(torch.sum(torch.tensor(rewards_lst), dim=-1)):
print('\nSaving policy...')
torch.save(policy.state_dict(), 'policy_mlp.pth')
else:
train(args, env, obs_dataloader, device, policy)
def train(args, env, obs_dataloader, device, policy=None):
# initialize model of the causal structure
model, adj_matrix = init_model(args, device)
# initialize optimizers
model_optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_model, betas=args.betas_model)
gamma_optimizer = AdamGamma(adj_matrix.gamma, lr=args.lr_gamma, beta1=args.betas_gamma[0], beta2=args.betas_gamma[1])
theta_optimizer = AdamTheta(adj_matrix.theta, lr=args.lr_theta, beta1=args.betas_theta[0], beta2=args.betas_theta[1])
obs_data = env.reset(n_samples=args.n_obs_samples)
obs_dataloader = DataLoader(obs_data, batch_size=args.obs_batch_size, shuffle=True, drop_last=True)
int_dists = choose_distribution(args, obs_data)
# initialize CE loss module
loss_module = nn.CrossEntropyLoss()
# initialize Logger
logger = Logger(args)
logger.before_training(adj_matrix, env.dag)
log_probs_lst = []
reward_lst = []
distance = torch.sum(torch.abs(torch.from_numpy(env.dag.adj_matrix).float().to(device) - adj_matrix.edge_probs()))
# causal discovery training loop
for epoch in track(range(args.epochs), leave=False, desc="Epoch loop"):
# fit model to observational data (distribution fitting)
avg_loss = distribution_fitting(args,
model,
loss_module,
model_optimizer,
adj_matrix,
obs_dataloader)
# graph fitting
log_probs, reward = graph_fitting(args,
adj_matrix,
gamma_optimizer,
theta_optimizer,
model,
env,
epoch,
logger,
int_dists,
policy)
distance_new = torch.sum(torch.abs(torch.from_numpy(env.dag.adj_matrix).float().to(device) - adj_matrix.edge_probs()))
reward, distance = reward / 10. + (distance - distance_new), distance_new
log_probs_lst.append(log_probs)
reward_lst.append(reward.detach().item())
# logging
stop = logger.on_epoch_end(adj_matrix, torch.from_numpy(env.dag.adj_matrix), epoch)
# stop early if SHD is 0 for 3 epochs
if stop:
break
return log_probs_lst, reward_lst
def init_model(args: argparse.Namespace, device) -> Tuple[MultivarMLP, AdjacencyMatrix]:
"""Initializes a complete model of the causal structure, consisting of a
multivariable MLP which models the conditional distributions of the causal
variables, and gamma and theta values which define the adjacency matrix
of the causal graph.
Args:
args: Object from the argument parser that defines various settings of
the causal structure and discovery process.
Returns:
model: Multivariable MLP (modelling the conditional distributions).
gamma: Matrix of gamma values (determining edge probabilities).
theta: Matrix of theta values (determining edge directions).
"""
model = create_model(num_vars=args.num_variables,
num_categs=args.max_categories,
hidden_dims=args.hidden_dims,
actfn='leakyrelu')
if args.data_parallel:
device = torch.device("cuda:0")
print("Data parallel activated. Using %i GPUs" % torch.cuda.device_count())
model = nn.DataParallel(model)
else:
device = torch.device("cpu")
adj_matrix = AdjacencyMatrix(args.num_variables, device)
return model, adj_matrix
if __name__ == '__main__':
# collect cmd line args
parser = argparse.ArgumentParser()
# Settings
parser.add_argument('--data_parallel', default=True, type=bool, help='Use parallelization for efficiency')
parser.add_argument('--num_variables', default=25, type=int, help='Number of causal variables')
parser.add_argument('--min_categories', default=10, type=int, help='Minimum number of categories of a causal variable')
parser.add_argument('--max_categories', default=10, type=int, help='Maximum number of categories of a causal variable')
parser.add_argument('--n_obs_samples', default=100000, type=int, help='Number of observational samples from the joint distribution of a synthetic graph')
parser.add_argument('--epochs', default=30, type=int, help='Maximum number of interventions')
parser.add_argument('--graph_structure', type=str, nargs='+', default=['chain'], help='Structure of the true causal graph')
parser.add_argument('--heuristic', type=str, nargs='+', default=['uniform'], help='Heuristic used for choosing intervention nodes')
parser.add_argument('--temperature', default=10.0, type=float, help='Temperature used for sampling the intervention variable')
parser.add_argument('--full_test', default=True, type=bool, help='Full test run for comparison of all heuristics (fixed graphs)')
parser.add_argument('--edge_prob', default=0.4, help='Edge likelihood for generating a graph') # only used for "random" graph structure
parser.add_argument('--num_graphs', default=1, type=int, help='Number of graphs per structure')
parser.add_argument('--existing_dags', dest='existing_dags', action='store_true')
parser.add_argument('--generate_dags', dest='existing_dags', action='store_false')
parser.set_defaults(existing_dags=True)
# Distribution fitting (observational data)
parser.add_argument('--obs_batch_size', default=128, type=int, help='Batch size used for fitting the graph to observational data')
parser.add_argument('--obs_epochs', default=1000, type=int, help='Number of epochs for fitting the causal structure to observational data')
parser.add_argument('--hidden_dims', default=[64], type=list, nargs='+', help='Number of hidden units in each layer of the Multivariable MLP')
# Optimizers
parser.add_argument('--lr_model', default=5e-3, type=float, help='Learning rate for fitting the model to observational data')
parser.add_argument('--betas_model', default=(0.9,0.999), type=tuple, help='Betas used for Adam optimizer (model fitting)')
parser.add_argument('--lr_gamma', default=2e-2, type=float, help='Learning rate for updating gamma parameters')
parser.add_argument('--betas_gamma', default=(0.9,0.9), type=tuple, help='Betas used for Adam optimizer OR momentum used for SGD (gamma update)')
parser.add_argument('--lr_theta', default=1e-1, type=float, help='Learning rate for updating theta parameters')
parser.add_argument('--betas_theta', default=(0.9,0.999), type=tuple, help='Betas used for Adam Theta optimizer (theta update)')
# Graph fitting (interventional data)
parser.add_argument('--int_batch_size', default=128, type=int, help='Number of samples per intervention')
parser.add_argument('--int_epochs', default=100, type=int, help='Number of epochs for updating the graph gamma and theta parameters of the graph')
parser.add_argument('--int_dist', type=str, nargs='+', default=['uniform'], help='Categorical distribution used for sampling intervention values')
parser.add_argument('--lambda_sparse', default=0.004, type=float, help='Threshold for interpreting an edge as beneficial')
parser.add_argument('--K', default=100, help='Number of graph samples for gradient estimation')
parser.add_argument('--temp_int', default=[1], type=float, nargs='+', help='Temperature used for distribution of intervention values')
# Reinforcement Learning
parser.add_argument('--max_episodes', default=10000, type=int, help='Maximum number of episodes')
parser.add_argument('--learn_policy', dest='learn_policy', action='store_true')
parser.add_argument('--beta_baseline', default=0.5, type=float, help='Beta used for exponentialy weighted baseline average')
parser.set_defaults(learn_policy=True)
args: argparse.Namespace = parser.parse_args()
# test runs to compare different heuristics on the same graphs
if args.full_test:
dags = defaultdict(list)
argparse_dict = vars(args)
with open(datetime.today().strftime('tb_logs/%Y-%m-%d-%H-%M-hparams.json'), 'w') as fp:
json.dump(argparse_dict, fp)
for structure in args.graph_structure:
for i in range(args.num_graphs):
dag = generate_categorical_graph(num_vars=args.num_variables,
min_categs=args.min_categories,
max_categs=args.max_categories,
connected=True,
graph_func=get_graph_func(structure),
edge_prob=args.edge_prob,
use_nn=True)
if args.existing_dags:
for root, dirs, files in os.walk('dags'):
if structure not in root:
continue
if f'dag-{i}' not in root:
continue
for file in files:
if 'dag.pt' in file:
path = os.path.join(root, file)
break
else:
continue
break
dag = dag.load_from_file(path)
dags[structure].append(dag)
# for heuristic in args.heuristic:
for int_dist in args.int_dist:
if int_dist == 'uniform':
temp_int = [1]
else:
temp_int = args.temp_int
for temperature in temp_int:
for i, dag in enumerate(dags[structure]):
args.log_graph_structure = structure + "-dag-" + str(i) # for logging
args.log_heuristic = 'mlp-policy' # for logging
args.log_temp_int = temperature
args.log_int_dist = int_dist
main(args, dag) | main | identifier_name |
srt.rs | use std::{
fmt::{self, Display, Formatter},
{collections::BTreeMap, convert::TryFrom, time::Duration},
};
use bitflags::bitflags;
use bytes::{Buf, BufMut};
use log::warn;
use crate::{options::SrtVersion, packet::PacketParseError};
/// The SRT-specific control packets
/// These are `Packet::Custom` types
#[derive(Clone, Eq, PartialEq)]
pub enum SrtControlPacket {
/// SRT handshake reject
/// ID = 0
Reject,
/// SRT handshake request
/// ID = 1
HandshakeRequest(SrtHandshake),
/// SRT handshake response
/// ID = 2
HandshakeResponse(SrtHandshake),
/// Key manager request
/// ID = 3
KeyRefreshRequest(KeyingMaterialMessage),
/// Key manager response
/// ID = 4
KeyRefreshResponse(KeyingMaterialMessage),
/// Stream identifier
/// ID = 5
StreamId(String),
/// Congestion control type. Often "live" or "file"
/// ID = 6
Congestion(String),
/// ID = 7
/// Filter seems to be a string of
/// comma-separted key-value pairs like:
/// a:b,c:d
Filter(FilterSpec),
// ID = 8
Group {
ty: GroupType,
flags: GroupFlags,
weight: u16,
},
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FilterSpec(pub BTreeMap<String, String>);
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum GroupType {
Undefined,
Broadcast,
MainBackup,
Balancing,
Multicast,
Unrecognized(u8),
}
bitflags! {
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct GroupFlags: u8 {
const MSG_SYNC = 1 << 6;
}
}
/// from https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/haicrypt/hcrypt_msg.h#L76-L96
/// or https://datatracker.ietf.org/doc/html/draft-sharabayko-srt-00#section-3.2.2
///
/// HaiCrypt KMmsg (Keying Material Message):
///
/// ```ignore,
/// 0 1 2 3
/// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x00 |0|Vers | PT | Sign | resv |KF |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x04 | KEKI |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x08 | Cipher | Auth | SE | Resv1 |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x0C | Resv2 | Slen/4 | Klen/4 |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x10 | Salt |
/// | ... |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// | Wrap |
/// | ... |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// ```
///
#[derive(Clone, Eq, PartialEq)]
pub struct KeyingMaterialMessage {
pub pt: PacketType, // TODO: i think this is always KeyingMaterial....
pub key_flags: KeyFlags,
pub keki: u32,
pub cipher: CipherType,
pub auth: Auth,
pub salt: Vec<u8>,
pub wrapped_keys: Vec<u8>,
}
impl From<GroupType> for u8 {
fn from(from: GroupType) -> u8 {
match from {
GroupType::Undefined => 0,
GroupType::Broadcast => 1,
GroupType::MainBackup => 2,
GroupType::Balancing => 3,
GroupType::Multicast => 4,
GroupType::Unrecognized(u) => u,
}
}
}
impl From<u8> for GroupType {
fn from(from: u8) -> GroupType {
match from {
0 => GroupType::Undefined,
1 => GroupType::Broadcast,
2 => GroupType::MainBackup,
3 => GroupType::Balancing,
4 => GroupType::Multicast,
u => GroupType::Unrecognized(u),
}
}
}
impl fmt::Debug for KeyingMaterialMessage {
fn | (&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("KeyingMaterialMessage")
.field("pt", &self.pt)
.field("key_flags", &self.key_flags)
.field("keki", &self.keki)
.field("cipher", &self.cipher)
.field("auth", &self.auth)
.finish()
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Auth {
None = 0,
}
impl TryFrom<u8> for Auth {
type Error = PacketParseError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0 => Ok(Auth::None),
e => Err(PacketParseError::BadAuth(e)),
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum StreamEncapsulation {
Udp = 1,
Srt = 2,
}
impl TryFrom<u8> for StreamEncapsulation {
type Error = PacketParseError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Ok(match value {
1 => StreamEncapsulation::Udp,
2 => StreamEncapsulation::Srt,
e => return Err(PacketParseError::BadStreamEncapsulation(e)),
})
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
// see htcryp_msg.h:43...
// 7: Reserved to discriminate MPEG-TS packet (0x47=sync byte).
pub enum PacketType {
MediaStream = 1, // Media Stream Message (MSmsg)
KeyingMaterial = 2, // Keying Material Message (KMmsg)
}
bitflags! {
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct KeyFlags : u8 {
const EVEN = 0b01;
const ODD = 0b10;
}
}
impl TryFrom<u8> for PacketType {
type Error = PacketParseError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
1 => Ok(PacketType::MediaStream),
2 => Ok(PacketType::KeyingMaterial),
err => Err(PacketParseError::BadKeyPacketType(err)),
}
}
}
/// from https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/haicrypt/hcrypt_msg.h#L121-L124
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum CipherType {
None = 0,
Ecb = 1,
Ctr = 2,
Cbc = 3,
}
/// The SRT handshake object
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct SrtHandshake {
/// The SRT version
/// Serialized just as the u32 that SrtVersion serialized to
pub version: SrtVersion,
/// SRT connection init flags
pub flags: SrtShakeFlags,
/// The peer's TSBPD latency (latency to send at)
/// This is serialized as the upper 16 bits of the third 32-bit word
/// source: https://github.com/Haivision/srt/blob/4f7f2beb2e1e306111b9b11402049a90cb6d3787/srtcore/core.cpp#L1341-L1353
pub send_latency: Duration,
/// The TSBPD latency (latency to recv at)
/// This is serialized as the lower 16 bits of the third 32-bit word
/// see csrtcc.cpp:132 in the reference implementation
pub recv_latency: Duration,
}
bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct SrtShakeFlags: u32 {
/// Timestamp-based Packet delivery real-time data sender
const TSBPDSND = 0x1;
/// Timestamp-based Packet delivery real-time data receiver
const TSBPDRCV = 0x2;
/// HaiCrypt AES-128/192/256-CTR
/// also represents if it supports the encryption flags in the data packet
const HAICRYPT = 0x4;
/// Drop real-time data packets too late to be processed in time
const TLPKTDROP = 0x8;
/// Periodic NAK report
const NAKREPORT = 0x10;
/// One bit in payload packet msgno is "retransmitted" flag
const REXMITFLG = 0x20;
/// This entity supports stream ID packets
const STREAM = 0x40;
/// Again not sure... TODO:
const PACKET_FILTER = 0x80;
// currently implemented flags
const SUPPORTED = Self::TSBPDSND.bits() | Self::TSBPDRCV.bits() | Self::HAICRYPT.bits() | Self::REXMITFLG.bits();
}
}
fn le_bytes_to_string(le_bytes: &mut impl Buf) -> Result<String, PacketParseError> {
if le_bytes.remaining() % 4 != 0 {
return Err(PacketParseError::NotEnoughData);
}
let mut str_bytes = Vec::with_capacity(le_bytes.remaining());
while le_bytes.remaining() > 4 {
str_bytes.extend(le_bytes.get_u32_le().to_be_bytes());
}
// make sure to skip padding bytes if any for the last word
match le_bytes.get_u32_le().to_be_bytes() {
[a, 0, 0, 0] => str_bytes.push(a),
[a, b, 0, 0] => str_bytes.extend([a, b]),
[a, b, c, 0] => str_bytes.extend([a, b, c]),
[a, b, c, d] => str_bytes.extend([a, b, c, d]),
}
String::from_utf8(str_bytes).map_err(|e| PacketParseError::StreamTypeNotUtf8(e.utf8_error()))
}
fn string_to_le_bytes(str: &str, into: &mut impl BufMut) {
let mut chunks = str.as_bytes().chunks_exact(4);
while let Some(&[a, b, c, d]) = chunks.next() {
into.put(&[d, c, b, a][..]);
}
// add padding bytes for the final word if needed
match *chunks.remainder() {
[a, b, c] => into.put(&[0, c, b, a][..]),
[a, b] => into.put(&[0, 0, b, a][..]),
[a] => into.put(&[0, 0, 0, a][..]),
[] => {} // exact multiple of 4
_ => unreachable!(),
}
}
impl Display for FilterSpec {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
for (i, (k, v)) in self.0.iter().enumerate() {
write!(f, "{k}:{v}")?;
if i != self.0.len() - 1 {
write!(f, ",")?;
}
}
Ok(())
}
}
impl SrtControlPacket {
pub fn parse<T: Buf>(
packet_type: u16,
buf: &mut T,
) -> Result<SrtControlPacket, PacketParseError> {
use self::SrtControlPacket::*;
match packet_type {
0 => Ok(Reject),
1 => Ok(HandshakeRequest(SrtHandshake::parse(buf)?)),
2 => Ok(HandshakeResponse(SrtHandshake::parse(buf)?)),
3 => Ok(KeyRefreshRequest(KeyingMaterialMessage::parse(buf)?)),
4 => Ok(KeyRefreshResponse(KeyingMaterialMessage::parse(buf)?)),
5 => {
// the stream id string is stored as 32-bit little endian words
// https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3
le_bytes_to_string(buf).map(StreamId)
}
6 => le_bytes_to_string(buf).map(Congestion),
// Filter
7 => {
let filter_str = le_bytes_to_string(buf)?;
Ok(Filter(FilterSpec(
filter_str
.split(',')
.map(|kv| {
let mut colon_split_iter = kv.split(':');
let k = colon_split_iter
.next()
.ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?;
let v = colon_split_iter
.next()
.ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?;
// only one colon
if colon_split_iter.next().is_some() {
return Err(PacketParseError::BadFilter(filter_str.clone()));
}
Ok((k.to_string(), v.to_string()))
})
.collect::<Result<_, _>>()?,
)))
}
8 => {
let ty = buf.get_u8().into();
let flags = GroupFlags::from_bits_truncate(buf.get_u8());
let weight = buf.get_u16_le();
Ok(Group { ty, flags, weight })
}
_ => Err(PacketParseError::UnsupportedSrtExtensionType(packet_type)),
}
}
/// Get the value to fill the reserved area with
pub fn type_id(&self) -> u16 {
use self::SrtControlPacket::*;
match self {
Reject => 0,
HandshakeRequest(_) => 1,
HandshakeResponse(_) => 2,
KeyRefreshRequest(_) => 3,
KeyRefreshResponse(_) => 4,
StreamId(_) => 5,
Congestion(_) => 6,
Filter(_) => 7,
Group { .. } => 8,
}
}
pub fn serialize<T: BufMut>(&self, into: &mut T) {
use self::SrtControlPacket::*;
match self {
HandshakeRequest(s) | HandshakeResponse(s) => {
s.serialize(into);
}
KeyRefreshRequest(k) | KeyRefreshResponse(k) => {
k.serialize(into);
}
Filter(filter) => {
string_to_le_bytes(&format!("{filter}"), into);
}
Group { ty, flags, weight } => {
into.put_u8((*ty).into());
into.put_u8(flags.bits());
into.put_u16_le(*weight);
}
Reject => {}
StreamId(str) | Congestion(str) => {
// the stream id string and congestion string is stored as 32-bit little endian words
// https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3
string_to_le_bytes(str, into);
}
}
}
// size in 32-bit words
pub fn size_words(&self) -> u16 {
use self::SrtControlPacket::*;
match self {
// 3 32-bit words, version, flags, latency
HandshakeRequest(_) | HandshakeResponse(_) => 3,
// 4 32-bit words + salt + key + wrap [2]
KeyRefreshRequest(ref k) | KeyRefreshResponse(ref k) => {
4 + k.salt.len() as u16 / 4 + k.wrapped_keys.len() as u16 / 4
}
Congestion(str) | StreamId(str) => ((str.len() + 3) / 4) as u16, // round up to nearest multiple of 4
// 1 32-bit word packed with type, flags, and weight
Group { .. } => 1,
Filter(filter) => ((format!("{filter}").len() + 3) / 4) as u16, // TODO: not optimial performace, but probably okay
_ => unimplemented!("{:?}", self),
}
}
}
impl SrtHandshake {
pub fn parse<T: Buf>(buf: &mut T) -> Result<SrtHandshake, PacketParseError> {
if buf.remaining() < 12 {
return Err(PacketParseError::NotEnoughData);
}
let version = SrtVersion::parse(buf.get_u32());
let shake_flags = buf.get_u32();
let flags = match SrtShakeFlags::from_bits(shake_flags) {
Some(i) => i,
None => {
warn!("Unrecognized SRT flags: 0b{:b}", shake_flags);
SrtShakeFlags::from_bits_truncate(shake_flags)
}
};
let peer_latency = buf.get_u16();
let latency = buf.get_u16();
Ok(SrtHandshake {
version,
flags,
send_latency: Duration::from_millis(u64::from(peer_latency)),
recv_latency: Duration::from_millis(u64::from(latency)),
})
}
pub fn serialize<T: BufMut>(&self, into: &mut T) {
into.put_u32(self.version.to_u32());
into.put_u32(self.flags.bits());
// upper 16 bits are peer latency
into.put_u16(self.send_latency.as_millis() as u16); // TODO: handle overflow
// lower 16 is latency
into.put_u16(self.recv_latency.as_millis() as u16); // TODO: handle overflow
}
}
impl KeyingMaterialMessage {
// from hcrypt_msg.h:39
// also const traits aren't a thing yet, so u16::from can't be used
const SIGN: u16 =
((b'H' - b'@') as u16) << 10 | ((b'A' - b'@') as u16) << 5 | (b'I' - b'@') as u16;
pub fn parse(buf: &mut impl Buf) -> Result<KeyingMaterialMessage, PacketParseError> {
// first 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// |0|Vers | PT | Sign | resv |KF |
// make sure there is enough data left in the buffer to at least get to the key flags and length, which tells us how long the packet will be
// that's 4x32bit words
if buf.remaining() < 4 * 4 {
return Err(PacketParseError::NotEnoughData);
}
let vers_pt = buf.get_u8();
// make sure the first bit is zero
if (vers_pt & 0b1000_0000) != 0 {
return Err(PacketParseError::BadSrtExtensionMessage);
}
// upper 4 bits are version
let version = vers_pt >> 4;
if version != 1 {
return Err(PacketParseError::BadSrtExtensionMessage);
}
// lower 4 bits are pt
let pt = PacketType::try_from(vers_pt & 0b0000_1111)?;
// next 16 bis are sign
let sign = buf.get_u16();
if sign != Self::SIGN {
return Err(PacketParseError::BadKeySign(sign));
}
// next 6 bits is reserved, then two bits of KF
let key_flags = KeyFlags::from_bits_truncate(buf.get_u8() & 0b0000_0011);
// second 32-bit word: keki
let keki = buf.get_u32();
// third 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Cipher | Auth | SE | Resv1 |
let cipher = CipherType::try_from(buf.get_u8())?;
let auth = Auth::try_from(buf.get_u8())?;
let se = StreamEncapsulation::try_from(buf.get_u8())?;
if se != StreamEncapsulation::Srt {
return Err(PacketParseError::StreamEncapsulationNotSrt);
}
let _resv1 = buf.get_u8();
// fourth 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Resv2 | Slen/4 | Klen/4 |
let _resv2 = buf.get_u16();
let salt_len = usize::from(buf.get_u8()) * 4;
let key_len = usize::from(buf.get_u8()) * 4;
// acceptable key lengths are 16, 24, and 32
match key_len {
// OK
16 | 24 | 32 => {}
// not
e => return Err(PacketParseError::BadCryptoLength(e as u32)),
}
// get the size of the packet to make sure that there is enough space
// salt + keys (there's a 1 for each in key flags, it's already been anded with 0b11 so max is 2), wrap data is 8 long
if buf.remaining() < salt_len + key_len * (key_flags.bits().count_ones() as usize) + 8 {
return Err(PacketParseError::NotEnoughData);
}
// the reference implmentation converts the whole thing to network order (bit endian) (in 32-bit words)
// so we need to make sure to do the same. Source:
// https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115
// after this, is the salt
let mut salt = vec![];
for _ in 0..salt_len / 4 {
salt.extend_from_slice(&buf.get_u32().to_be_bytes()[..]);
}
// then key[s]
let mut wrapped_keys = vec![];
for _ in 0..(key_len * key_flags.bits().count_ones() as usize + 8) / 4 {
wrapped_keys.extend_from_slice(&buf.get_u32().to_be_bytes()[..]);
}
Ok(KeyingMaterialMessage {
pt,
key_flags,
keki,
cipher,
auth,
salt,
wrapped_keys,
})
}
fn serialize<T: BufMut>(&self, into: &mut T) {
// first 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// |0|Vers | PT | Sign | resv |KF |
// version is 1
into.put_u8(1 << 4 | self.pt as u8);
into.put_u16(Self::SIGN);
// rightmost bit of KF is even, other is odd
into.put_u8(self.key_flags.bits());
// second 32-bit word: keki
into.put_u32(self.keki);
// third 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Cipher | Auth | SE | Resv1 |
into.put_u8(self.cipher as u8);
into.put_u8(self.auth as u8);
into.put_u8(StreamEncapsulation::Srt as u8);
into.put_u8(0); // resv1
// fourth 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Resv2 | Slen/4 | Klen/4 |
into.put_u16(0); // resv2
into.put_u8((self.salt.len() / 4) as u8);
// this unwrap is okay because we already panic above if both are None
let key_len = (self.wrapped_keys.len() - 8) / self.key_flags.bits().count_ones() as usize;
into.put_u8((key_len / 4) as u8);
// put the salt then key[s]
into.put(&self.salt[..]);
// the reference implmentation converts the whole thing to network order (big endian) (in 32-bit words)
// so we need to make sure to do the same. Source:
// https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115
for num in self.wrapped_keys[..].chunks(4) {
into.put_u32(u32::from_be_bytes([num[0], num[1], num[2], num[3]]));
}
}
}
impl fmt::Debug for SrtControlPacket {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SrtControlPacket::Reject => write!(f, "reject"),
SrtControlPacket::HandshakeRequest(req) => write!(f, "hsreq={req:?}"),
SrtControlPacket::HandshakeResponse(resp) => write!(f, "hsresp={resp:?}"),
SrtControlPacket::KeyRefreshRequest(req) => write!(f, "kmreq={req:?}"),
SrtControlPacket::KeyRefreshResponse(resp) => write!(f, "kmresp={resp:?}"),
SrtControlPacket::StreamId(sid) => write!(f, "streamid={sid}"),
SrtControlPacket::Congestion(ctype) => write!(f, "congestion={ctype}"),
SrtControlPacket::Filter(filter) => write!(f, "filter={filter:?}"),
SrtControlPacket::Group { ty, flags, weight } => {
write!(f, "group=({ty:?}, {flags:?}, {weight:?})")
}
}
}
}
impl TryFrom<u8> for CipherType {
type Error = PacketParseError;
fn try_from(from: u8) -> Result<CipherType, PacketParseError> {
match from {
0 => Ok(CipherType::None),
1 => Ok(CipherType::Ecb),
2 => Ok(CipherType::Ctr),
3 => Ok(CipherType::Cbc),
e => Err(PacketParseError::BadCipherKind(e)),
}
}
}
#[cfg(test)]
mod tests {
use super::{KeyingMaterialMessage, SrtControlPacket, SrtHandshake, SrtShakeFlags};
use crate::{options::*, packet::*};
use std::{io::Cursor, time::Duration};
#[test]
fn deser_ser_shake() {
let handshake = Packet::Control(ControlPacket {
timestamp: TimeStamp::from_micros(123_141),
dest_sockid: SocketId(123),
control_type: ControlTypes::Srt(SrtControlPacket::HandshakeRequest(SrtHandshake {
version: SrtVersion::CURRENT,
flags: SrtShakeFlags::empty(),
send_latency: Duration::from_millis(4000),
recv_latency: Duration::from_millis(3000),
})),
});
let mut buf = Vec::new();
handshake.serialize(&mut buf);
let deserialized = Packet::parse(&mut Cursor::new(buf), false).unwrap();
assert_eq!(handshake, deserialized);
}
#[test]
fn ser_deser_sid() {
let sid = Packet::Control(ControlPacket {
timestamp: TimeStamp::from_micros(123),
dest_sockid: SocketId(1234),
control_type: ControlTypes::Srt(SrtControlPacket::StreamId("Hellohelloheloo".into())),
});
let mut buf = Vec::new();
sid.serialize(&mut buf);
let deser = Packet::parse(&mut Cursor::new(buf), false).unwrap();
assert_eq!(sid, deser);
}
#[test]
fn srt_key_message_debug() {
let salt = b"\x00\x00\x00\x00\x00\x00\x00\x00\x85\x2c\x3c\xcd\x02\x65\x1a\x22";
let wrapped = b"U\x06\xe9\xfd\xdfd\xf1'nr\xf4\xe9f\x81#(\xb7\xb5D\x19{\x9b\xcdx";
let km = KeyingMaterialMessage {
pt: PacketType::KeyingMaterial,
key_flags: KeyFlags::EVEN,
keki: 0,
cipher: CipherType::Ctr,
auth: Auth::None,
salt: salt[..].into(),
wrapped_keys: wrapped[..].into(),
};
assert_eq!(format!("{km:?}"), "KeyingMaterialMessage { pt: KeyingMaterial, key_flags: KeyFlags(EVEN), keki: 0, cipher: Ctr, auth: None }")
}
}
| fmt | identifier_name |
srt.rs | use std::{
fmt::{self, Display, Formatter},
{collections::BTreeMap, convert::TryFrom, time::Duration},
};
use bitflags::bitflags;
use bytes::{Buf, BufMut};
use log::warn;
use crate::{options::SrtVersion, packet::PacketParseError};
/// The SRT-specific control packets
/// These are `Packet::Custom` types
#[derive(Clone, Eq, PartialEq)]
pub enum SrtControlPacket {
/// SRT handshake reject
/// ID = 0
Reject,
/// SRT handshake request
/// ID = 1
HandshakeRequest(SrtHandshake),
/// SRT handshake response
/// ID = 2
HandshakeResponse(SrtHandshake),
/// Key manager request
/// ID = 3
KeyRefreshRequest(KeyingMaterialMessage),
/// Key manager response
/// ID = 4
KeyRefreshResponse(KeyingMaterialMessage),
/// Stream identifier
/// ID = 5
StreamId(String),
/// Congestion control type. Often "live" or "file"
/// ID = 6
Congestion(String),
/// ID = 7
/// Filter seems to be a string of
/// comma-separted key-value pairs like:
/// a:b,c:d
Filter(FilterSpec),
// ID = 8
Group {
ty: GroupType,
flags: GroupFlags,
weight: u16,
},
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FilterSpec(pub BTreeMap<String, String>);
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum GroupType {
Undefined,
Broadcast,
MainBackup,
Balancing,
Multicast,
Unrecognized(u8),
}
bitflags! {
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct GroupFlags: u8 {
const MSG_SYNC = 1 << 6;
}
}
/// from https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/haicrypt/hcrypt_msg.h#L76-L96
/// or https://datatracker.ietf.org/doc/html/draft-sharabayko-srt-00#section-3.2.2
///
/// HaiCrypt KMmsg (Keying Material Message):
///
/// ```ignore,
/// 0 1 2 3
/// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x00 |0|Vers | PT | Sign | resv |KF |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x04 | KEKI |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x08 | Cipher | Auth | SE | Resv1 |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x0C | Resv2 | Slen/4 | Klen/4 |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x10 | Salt |
/// | ... |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// | Wrap |
/// | ... |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// ```
///
#[derive(Clone, Eq, PartialEq)]
pub struct KeyingMaterialMessage {
pub pt: PacketType, // TODO: i think this is always KeyingMaterial....
pub key_flags: KeyFlags,
pub keki: u32,
pub cipher: CipherType,
pub auth: Auth,
pub salt: Vec<u8>,
pub wrapped_keys: Vec<u8>,
}
impl From<GroupType> for u8 {
fn from(from: GroupType) -> u8 {
match from {
GroupType::Undefined => 0,
GroupType::Broadcast => 1,
GroupType::MainBackup => 2,
GroupType::Balancing => 3,
GroupType::Multicast => 4,
GroupType::Unrecognized(u) => u,
}
}
}
impl From<u8> for GroupType {
fn from(from: u8) -> GroupType {
match from {
0 => GroupType::Undefined,
1 => GroupType::Broadcast,
2 => GroupType::MainBackup,
3 => GroupType::Balancing,
4 => GroupType::Multicast,
u => GroupType::Unrecognized(u),
}
}
}
impl fmt::Debug for KeyingMaterialMessage {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("KeyingMaterialMessage")
.field("pt", &self.pt)
.field("key_flags", &self.key_flags)
.field("keki", &self.keki)
.field("cipher", &self.cipher)
.field("auth", &self.auth)
.finish()
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Auth {
None = 0,
}
impl TryFrom<u8> for Auth {
type Error = PacketParseError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0 => Ok(Auth::None),
e => Err(PacketParseError::BadAuth(e)),
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum StreamEncapsulation {
Udp = 1,
Srt = 2,
}
impl TryFrom<u8> for StreamEncapsulation {
type Error = PacketParseError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Ok(match value {
1 => StreamEncapsulation::Udp,
2 => StreamEncapsulation::Srt,
e => return Err(PacketParseError::BadStreamEncapsulation(e)),
})
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
// see htcryp_msg.h:43...
// 7: Reserved to discriminate MPEG-TS packet (0x47=sync byte).
pub enum PacketType {
MediaStream = 1, // Media Stream Message (MSmsg)
KeyingMaterial = 2, // Keying Material Message (KMmsg)
}
bitflags! {
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct KeyFlags : u8 {
const EVEN = 0b01;
const ODD = 0b10;
}
}
impl TryFrom<u8> for PacketType {
type Error = PacketParseError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
1 => Ok(PacketType::MediaStream),
2 => Ok(PacketType::KeyingMaterial),
err => Err(PacketParseError::BadKeyPacketType(err)),
}
}
}
/// from https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/haicrypt/hcrypt_msg.h#L121-L124
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum CipherType {
None = 0,
Ecb = 1,
Ctr = 2,
Cbc = 3,
}
/// The SRT handshake object
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct SrtHandshake {
/// The SRT version
/// Serialized just as the u32 that SrtVersion serialized to
pub version: SrtVersion,
/// SRT connection init flags
pub flags: SrtShakeFlags,
/// The peer's TSBPD latency (latency to send at)
/// This is serialized as the upper 16 bits of the third 32-bit word
/// source: https://github.com/Haivision/srt/blob/4f7f2beb2e1e306111b9b11402049a90cb6d3787/srtcore/core.cpp#L1341-L1353
pub send_latency: Duration,
/// The TSBPD latency (latency to recv at)
/// This is serialized as the lower 16 bits of the third 32-bit word
/// see csrtcc.cpp:132 in the reference implementation
pub recv_latency: Duration,
}
bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct SrtShakeFlags: u32 {
/// Timestamp-based Packet delivery real-time data sender
const TSBPDSND = 0x1;
/// Timestamp-based Packet delivery real-time data receiver
const TSBPDRCV = 0x2;
/// HaiCrypt AES-128/192/256-CTR
/// also represents if it supports the encryption flags in the data packet
const HAICRYPT = 0x4;
/// Drop real-time data packets too late to be processed in time
const TLPKTDROP = 0x8;
/// Periodic NAK report
const NAKREPORT = 0x10;
/// One bit in payload packet msgno is "retransmitted" flag
const REXMITFLG = 0x20;
/// This entity supports stream ID packets
const STREAM = 0x40;
/// Again not sure... TODO:
const PACKET_FILTER = 0x80;
// currently implemented flags
const SUPPORTED = Self::TSBPDSND.bits() | Self::TSBPDRCV.bits() | Self::HAICRYPT.bits() | Self::REXMITFLG.bits();
}
}
fn le_bytes_to_string(le_bytes: &mut impl Buf) -> Result<String, PacketParseError> {
if le_bytes.remaining() % 4 != 0 {
return Err(PacketParseError::NotEnoughData);
}
let mut str_bytes = Vec::with_capacity(le_bytes.remaining());
while le_bytes.remaining() > 4 {
str_bytes.extend(le_bytes.get_u32_le().to_be_bytes());
}
// make sure to skip padding bytes if any for the last word
match le_bytes.get_u32_le().to_be_bytes() {
[a, 0, 0, 0] => str_bytes.push(a),
[a, b, 0, 0] => str_bytes.extend([a, b]),
[a, b, c, 0] => str_bytes.extend([a, b, c]),
[a, b, c, d] => str_bytes.extend([a, b, c, d]),
}
String::from_utf8(str_bytes).map_err(|e| PacketParseError::StreamTypeNotUtf8(e.utf8_error()))
}
fn string_to_le_bytes(str: &str, into: &mut impl BufMut) {
let mut chunks = str.as_bytes().chunks_exact(4);
while let Some(&[a, b, c, d]) = chunks.next() {
into.put(&[d, c, b, a][..]);
}
// add padding bytes for the final word if needed
match *chunks.remainder() {
[a, b, c] => into.put(&[0, c, b, a][..]),
[a, b] => into.put(&[0, 0, b, a][..]),
[a] => into.put(&[0, 0, 0, a][..]),
[] => {} // exact multiple of 4
_ => unreachable!(),
}
}
impl Display for FilterSpec {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
for (i, (k, v)) in self.0.iter().enumerate() {
write!(f, "{k}:{v}")?;
if i != self.0.len() - 1 {
write!(f, ",")?;
}
}
Ok(())
}
}
impl SrtControlPacket {
pub fn parse<T: Buf>(
packet_type: u16,
buf: &mut T,
) -> Result<SrtControlPacket, PacketParseError> {
use self::SrtControlPacket::*;
match packet_type {
0 => Ok(Reject),
1 => Ok(HandshakeRequest(SrtHandshake::parse(buf)?)),
2 => Ok(HandshakeResponse(SrtHandshake::parse(buf)?)),
3 => Ok(KeyRefreshRequest(KeyingMaterialMessage::parse(buf)?)),
4 => Ok(KeyRefreshResponse(KeyingMaterialMessage::parse(buf)?)),
5 => {
// the stream id string is stored as 32-bit little endian words
// https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3
le_bytes_to_string(buf).map(StreamId)
}
6 => le_bytes_to_string(buf).map(Congestion),
// Filter
7 => {
let filter_str = le_bytes_to_string(buf)?;
Ok(Filter(FilterSpec(
filter_str
.split(',')
.map(|kv| {
let mut colon_split_iter = kv.split(':');
let k = colon_split_iter
.next()
.ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?;
let v = colon_split_iter
.next()
.ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?;
// only one colon
if colon_split_iter.next().is_some() {
return Err(PacketParseError::BadFilter(filter_str.clone()));
}
Ok((k.to_string(), v.to_string()))
})
.collect::<Result<_, _>>()?,
)))
}
8 => {
let ty = buf.get_u8().into();
let flags = GroupFlags::from_bits_truncate(buf.get_u8());
let weight = buf.get_u16_le();
Ok(Group { ty, flags, weight })
}
_ => Err(PacketParseError::UnsupportedSrtExtensionType(packet_type)),
}
}
/// Get the value to fill the reserved area with
pub fn type_id(&self) -> u16 {
use self::SrtControlPacket::*;
match self {
Reject => 0,
HandshakeRequest(_) => 1,
HandshakeResponse(_) => 2,
KeyRefreshRequest(_) => 3,
KeyRefreshResponse(_) => 4,
StreamId(_) => 5,
Congestion(_) => 6,
Filter(_) => 7,
Group { .. } => 8,
}
}
pub fn serialize<T: BufMut>(&self, into: &mut T) {
use self::SrtControlPacket::*;
match self {
HandshakeRequest(s) | HandshakeResponse(s) => {
s.serialize(into);
}
KeyRefreshRequest(k) | KeyRefreshResponse(k) => {
k.serialize(into);
}
Filter(filter) => {
string_to_le_bytes(&format!("{filter}"), into);
}
Group { ty, flags, weight } => {
into.put_u8((*ty).into());
into.put_u8(flags.bits());
into.put_u16_le(*weight);
}
Reject => {}
StreamId(str) | Congestion(str) => {
// the stream id string and congestion string is stored as 32-bit little endian words
// https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3
string_to_le_bytes(str, into);
}
}
}
// size in 32-bit words
pub fn size_words(&self) -> u16 {
use self::SrtControlPacket::*;
match self {
// 3 32-bit words, version, flags, latency
HandshakeRequest(_) | HandshakeResponse(_) => 3,
// 4 32-bit words + salt + key + wrap [2]
KeyRefreshRequest(ref k) | KeyRefreshResponse(ref k) => {
4 + k.salt.len() as u16 / 4 + k.wrapped_keys.len() as u16 / 4
}
Congestion(str) | StreamId(str) => ((str.len() + 3) / 4) as u16, // round up to nearest multiple of 4
// 1 32-bit word packed with type, flags, and weight
Group { .. } => 1,
Filter(filter) => ((format!("{filter}").len() + 3) / 4) as u16, // TODO: not optimial performace, but probably okay
_ => unimplemented!("{:?}", self),
}
}
}
impl SrtHandshake {
pub fn parse<T: Buf>(buf: &mut T) -> Result<SrtHandshake, PacketParseError> {
if buf.remaining() < 12 {
return Err(PacketParseError::NotEnoughData);
}
let version = SrtVersion::parse(buf.get_u32());
let shake_flags = buf.get_u32();
let flags = match SrtShakeFlags::from_bits(shake_flags) {
Some(i) => i,
None => {
warn!("Unrecognized SRT flags: 0b{:b}", shake_flags);
SrtShakeFlags::from_bits_truncate(shake_flags)
}
};
let peer_latency = buf.get_u16();
let latency = buf.get_u16();
Ok(SrtHandshake {
version,
flags,
send_latency: Duration::from_millis(u64::from(peer_latency)),
recv_latency: Duration::from_millis(u64::from(latency)),
})
}
pub fn serialize<T: BufMut>(&self, into: &mut T) {
into.put_u32(self.version.to_u32());
into.put_u32(self.flags.bits());
// upper 16 bits are peer latency
into.put_u16(self.send_latency.as_millis() as u16); // TODO: handle overflow
// lower 16 is latency
into.put_u16(self.recv_latency.as_millis() as u16); // TODO: handle overflow
}
}
impl KeyingMaterialMessage {
// from hcrypt_msg.h:39
// also const traits aren't a thing yet, so u16::from can't be used
const SIGN: u16 =
((b'H' - b'@') as u16) << 10 | ((b'A' - b'@') as u16) << 5 | (b'I' - b'@') as u16;
pub fn parse(buf: &mut impl Buf) -> Result<KeyingMaterialMessage, PacketParseError> {
// first 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// |0|Vers | PT | Sign | resv |KF |
// make sure there is enough data left in the buffer to at least get to the key flags and length, which tells us how long the packet will be
// that's 4x32bit words
if buf.remaining() < 4 * 4 {
return Err(PacketParseError::NotEnoughData);
}
let vers_pt = buf.get_u8();
// make sure the first bit is zero
if (vers_pt & 0b1000_0000) != 0 {
return Err(PacketParseError::BadSrtExtensionMessage);
}
// upper 4 bits are version
let version = vers_pt >> 4;
if version != 1 {
return Err(PacketParseError::BadSrtExtensionMessage);
}
// lower 4 bits are pt
let pt = PacketType::try_from(vers_pt & 0b0000_1111)?;
// next 16 bis are sign
let sign = buf.get_u16();
if sign != Self::SIGN {
return Err(PacketParseError::BadKeySign(sign));
}
// next 6 bits is reserved, then two bits of KF
let key_flags = KeyFlags::from_bits_truncate(buf.get_u8() & 0b0000_0011);
// second 32-bit word: keki
let keki = buf.get_u32();
// third 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Cipher | Auth | SE | Resv1 |
let cipher = CipherType::try_from(buf.get_u8())?;
let auth = Auth::try_from(buf.get_u8())?;
let se = StreamEncapsulation::try_from(buf.get_u8())?;
if se != StreamEncapsulation::Srt {
return Err(PacketParseError::StreamEncapsulationNotSrt);
}
let _resv1 = buf.get_u8();
// fourth 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Resv2 | Slen/4 | Klen/4 |
let _resv2 = buf.get_u16();
let salt_len = usize::from(buf.get_u8()) * 4;
let key_len = usize::from(buf.get_u8()) * 4;
// acceptable key lengths are 16, 24, and 32
match key_len {
// OK
16 | 24 | 32 => {}
// not
e => return Err(PacketParseError::BadCryptoLength(e as u32)),
}
// get the size of the packet to make sure that there is enough space
// salt + keys (there's a 1 for each in key flags, it's already been anded with 0b11 so max is 2), wrap data is 8 long
if buf.remaining() < salt_len + key_len * (key_flags.bits().count_ones() as usize) + 8 {
return Err(PacketParseError::NotEnoughData);
}
// the reference implmentation converts the whole thing to network order (bit endian) (in 32-bit words)
// so we need to make sure to do the same. Source:
// https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115
// after this, is the salt
let mut salt = vec![];
for _ in 0..salt_len / 4 {
salt.extend_from_slice(&buf.get_u32().to_be_bytes()[..]);
}
// then key[s]
let mut wrapped_keys = vec![];
for _ in 0..(key_len * key_flags.bits().count_ones() as usize + 8) / 4 {
wrapped_keys.extend_from_slice(&buf.get_u32().to_be_bytes()[..]);
}
Ok(KeyingMaterialMessage {
pt,
key_flags,
keki,
cipher,
auth,
salt,
wrapped_keys,
})
}
fn serialize<T: BufMut>(&self, into: &mut T) {
// first 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// |0|Vers | PT | Sign | resv |KF |
// version is 1
into.put_u8(1 << 4 | self.pt as u8);
into.put_u16(Self::SIGN);
// rightmost bit of KF is even, other is odd
into.put_u8(self.key_flags.bits());
// second 32-bit word: keki
into.put_u32(self.keki);
// third 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Cipher | Auth | SE | Resv1 |
into.put_u8(self.cipher as u8);
into.put_u8(self.auth as u8);
into.put_u8(StreamEncapsulation::Srt as u8);
into.put_u8(0); // resv1
// fourth 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Resv2 | Slen/4 | Klen/4 |
into.put_u16(0); // resv2
into.put_u8((self.salt.len() / 4) as u8);
// this unwrap is okay because we already panic above if both are None
let key_len = (self.wrapped_keys.len() - 8) / self.key_flags.bits().count_ones() as usize;
into.put_u8((key_len / 4) as u8);
// put the salt then key[s]
into.put(&self.salt[..]);
// the reference implmentation converts the whole thing to network order (big endian) (in 32-bit words)
// so we need to make sure to do the same. Source:
// https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115
for num in self.wrapped_keys[..].chunks(4) {
into.put_u32(u32::from_be_bytes([num[0], num[1], num[2], num[3]]));
}
}
}
impl fmt::Debug for SrtControlPacket {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SrtControlPacket::Reject => write!(f, "reject"),
SrtControlPacket::HandshakeRequest(req) => write!(f, "hsreq={req:?}"),
SrtControlPacket::HandshakeResponse(resp) => write!(f, "hsresp={resp:?}"),
SrtControlPacket::KeyRefreshRequest(req) => write!(f, "kmreq={req:?}"),
SrtControlPacket::KeyRefreshResponse(resp) => write!(f, "kmresp={resp:?}"),
SrtControlPacket::StreamId(sid) => write!(f, "streamid={sid}"),
SrtControlPacket::Congestion(ctype) => write!(f, "congestion={ctype}"),
SrtControlPacket::Filter(filter) => write!(f, "filter={filter:?}"),
SrtControlPacket::Group { ty, flags, weight } => {
write!(f, "group=({ty:?}, {flags:?}, {weight:?})")
}
}
}
}
impl TryFrom<u8> for CipherType {
type Error = PacketParseError;
fn try_from(from: u8) -> Result<CipherType, PacketParseError> {
match from {
0 => Ok(CipherType::None),
1 => Ok(CipherType::Ecb),
2 => Ok(CipherType::Ctr),
3 => Ok(CipherType::Cbc),
e => Err(PacketParseError::BadCipherKind(e)),
}
}
}
#[cfg(test)]
mod tests {
use super::{KeyingMaterialMessage, SrtControlPacket, SrtHandshake, SrtShakeFlags};
use crate::{options::*, packet::*};
use std::{io::Cursor, time::Duration};
#[test]
fn deser_ser_shake() {
let handshake = Packet::Control(ControlPacket {
timestamp: TimeStamp::from_micros(123_141),
dest_sockid: SocketId(123),
control_type: ControlTypes::Srt(SrtControlPacket::HandshakeRequest(SrtHandshake {
version: SrtVersion::CURRENT,
flags: SrtShakeFlags::empty(),
send_latency: Duration::from_millis(4000),
recv_latency: Duration::from_millis(3000),
})),
});
let mut buf = Vec::new();
handshake.serialize(&mut buf);
let deserialized = Packet::parse(&mut Cursor::new(buf), false).unwrap();
assert_eq!(handshake, deserialized);
}
#[test]
fn ser_deser_sid() {
let sid = Packet::Control(ControlPacket {
timestamp: TimeStamp::from_micros(123),
dest_sockid: SocketId(1234),
control_type: ControlTypes::Srt(SrtControlPacket::StreamId("Hellohelloheloo".into())),
});
let mut buf = Vec::new();
sid.serialize(&mut buf);
let deser = Packet::parse(&mut Cursor::new(buf), false).unwrap();
assert_eq!(sid, deser);
}
#[test]
fn srt_key_message_debug() |
}
| {
let salt = b"\x00\x00\x00\x00\x00\x00\x00\x00\x85\x2c\x3c\xcd\x02\x65\x1a\x22";
let wrapped = b"U\x06\xe9\xfd\xdfd\xf1'nr\xf4\xe9f\x81#(\xb7\xb5D\x19{\x9b\xcdx";
let km = KeyingMaterialMessage {
pt: PacketType::KeyingMaterial,
key_flags: KeyFlags::EVEN,
keki: 0,
cipher: CipherType::Ctr,
auth: Auth::None,
salt: salt[..].into(),
wrapped_keys: wrapped[..].into(),
};
assert_eq!(format!("{km:?}"), "KeyingMaterialMessage { pt: KeyingMaterial, key_flags: KeyFlags(EVEN), keki: 0, cipher: Ctr, auth: None }")
} | identifier_body |
srt.rs | use std::{
fmt::{self, Display, Formatter},
{collections::BTreeMap, convert::TryFrom, time::Duration},
};
use bitflags::bitflags;
use bytes::{Buf, BufMut};
use log::warn;
use crate::{options::SrtVersion, packet::PacketParseError};
/// The SRT-specific control packets
/// These are `Packet::Custom` types
#[derive(Clone, Eq, PartialEq)]
pub enum SrtControlPacket {
/// SRT handshake reject
/// ID = 0
Reject,
/// SRT handshake request
/// ID = 1
HandshakeRequest(SrtHandshake),
/// SRT handshake response
/// ID = 2
HandshakeResponse(SrtHandshake),
/// Key manager request
/// ID = 3
KeyRefreshRequest(KeyingMaterialMessage),
/// Key manager response
/// ID = 4
KeyRefreshResponse(KeyingMaterialMessage),
/// Stream identifier
/// ID = 5
StreamId(String),
/// Congestion control type. Often "live" or "file"
/// ID = 6
Congestion(String),
/// ID = 7
/// Filter seems to be a string of
/// comma-separted key-value pairs like:
/// a:b,c:d
Filter(FilterSpec),
// ID = 8
Group {
ty: GroupType,
flags: GroupFlags,
weight: u16,
},
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FilterSpec(pub BTreeMap<String, String>);
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum GroupType {
Undefined,
Broadcast,
MainBackup,
Balancing,
Multicast,
Unrecognized(u8),
}
bitflags! {
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct GroupFlags: u8 {
const MSG_SYNC = 1 << 6;
}
}
/// from https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/haicrypt/hcrypt_msg.h#L76-L96
/// or https://datatracker.ietf.org/doc/html/draft-sharabayko-srt-00#section-3.2.2
///
/// HaiCrypt KMmsg (Keying Material Message):
///
/// ```ignore,
/// 0 1 2 3
/// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x00 |0|Vers | PT | Sign | resv |KF |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x04 | KEKI |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x08 | Cipher | Auth | SE | Resv1 |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x0C | Resv2 | Slen/4 | Klen/4 |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// +0x10 | Salt |
/// | ... |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// | Wrap |
/// | ... |
/// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
/// ```
///
#[derive(Clone, Eq, PartialEq)]
pub struct KeyingMaterialMessage {
pub pt: PacketType, // TODO: i think this is always KeyingMaterial....
pub key_flags: KeyFlags,
pub keki: u32,
pub cipher: CipherType,
pub auth: Auth,
pub salt: Vec<u8>,
pub wrapped_keys: Vec<u8>,
}
impl From<GroupType> for u8 {
fn from(from: GroupType) -> u8 {
match from {
GroupType::Undefined => 0,
GroupType::Broadcast => 1,
GroupType::MainBackup => 2,
GroupType::Balancing => 3,
GroupType::Multicast => 4,
GroupType::Unrecognized(u) => u,
}
}
}
impl From<u8> for GroupType {
fn from(from: u8) -> GroupType {
match from {
0 => GroupType::Undefined,
1 => GroupType::Broadcast,
2 => GroupType::MainBackup,
3 => GroupType::Balancing,
4 => GroupType::Multicast,
u => GroupType::Unrecognized(u),
}
}
}
impl fmt::Debug for KeyingMaterialMessage {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("KeyingMaterialMessage")
.field("pt", &self.pt)
.field("key_flags", &self.key_flags)
.field("keki", &self.keki)
.field("cipher", &self.cipher)
.field("auth", &self.auth)
.finish()
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Auth {
None = 0,
}
impl TryFrom<u8> for Auth {
type Error = PacketParseError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0 => Ok(Auth::None),
e => Err(PacketParseError::BadAuth(e)),
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum StreamEncapsulation {
Udp = 1,
Srt = 2,
}
impl TryFrom<u8> for StreamEncapsulation {
type Error = PacketParseError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Ok(match value {
1 => StreamEncapsulation::Udp,
2 => StreamEncapsulation::Srt,
e => return Err(PacketParseError::BadStreamEncapsulation(e)),
})
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
// see htcryp_msg.h:43...
// 7: Reserved to discriminate MPEG-TS packet (0x47=sync byte).
pub enum PacketType {
MediaStream = 1, // Media Stream Message (MSmsg)
KeyingMaterial = 2, // Keying Material Message (KMmsg)
}
bitflags! {
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct KeyFlags : u8 {
const EVEN = 0b01;
const ODD = 0b10;
}
}
impl TryFrom<u8> for PacketType {
type Error = PacketParseError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
1 => Ok(PacketType::MediaStream),
2 => Ok(PacketType::KeyingMaterial),
err => Err(PacketParseError::BadKeyPacketType(err)),
} |
/// from https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/haicrypt/hcrypt_msg.h#L121-L124
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum CipherType {
None = 0,
Ecb = 1,
Ctr = 2,
Cbc = 3,
}
/// The SRT handshake object
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct SrtHandshake {
/// The SRT version
/// Serialized just as the u32 that SrtVersion serialized to
pub version: SrtVersion,
/// SRT connection init flags
pub flags: SrtShakeFlags,
/// The peer's TSBPD latency (latency to send at)
/// This is serialized as the upper 16 bits of the third 32-bit word
/// source: https://github.com/Haivision/srt/blob/4f7f2beb2e1e306111b9b11402049a90cb6d3787/srtcore/core.cpp#L1341-L1353
pub send_latency: Duration,
/// The TSBPD latency (latency to recv at)
/// This is serialized as the lower 16 bits of the third 32-bit word
/// see csrtcc.cpp:132 in the reference implementation
pub recv_latency: Duration,
}
bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct SrtShakeFlags: u32 {
/// Timestamp-based Packet delivery real-time data sender
const TSBPDSND = 0x1;
/// Timestamp-based Packet delivery real-time data receiver
const TSBPDRCV = 0x2;
/// HaiCrypt AES-128/192/256-CTR
/// also represents if it supports the encryption flags in the data packet
const HAICRYPT = 0x4;
/// Drop real-time data packets too late to be processed in time
const TLPKTDROP = 0x8;
/// Periodic NAK report
const NAKREPORT = 0x10;
/// One bit in payload packet msgno is "retransmitted" flag
const REXMITFLG = 0x20;
/// This entity supports stream ID packets
const STREAM = 0x40;
/// Again not sure... TODO:
const PACKET_FILTER = 0x80;
// currently implemented flags
const SUPPORTED = Self::TSBPDSND.bits() | Self::TSBPDRCV.bits() | Self::HAICRYPT.bits() | Self::REXMITFLG.bits();
}
}
fn le_bytes_to_string(le_bytes: &mut impl Buf) -> Result<String, PacketParseError> {
if le_bytes.remaining() % 4 != 0 {
return Err(PacketParseError::NotEnoughData);
}
let mut str_bytes = Vec::with_capacity(le_bytes.remaining());
while le_bytes.remaining() > 4 {
str_bytes.extend(le_bytes.get_u32_le().to_be_bytes());
}
// make sure to skip padding bytes if any for the last word
match le_bytes.get_u32_le().to_be_bytes() {
[a, 0, 0, 0] => str_bytes.push(a),
[a, b, 0, 0] => str_bytes.extend([a, b]),
[a, b, c, 0] => str_bytes.extend([a, b, c]),
[a, b, c, d] => str_bytes.extend([a, b, c, d]),
}
String::from_utf8(str_bytes).map_err(|e| PacketParseError::StreamTypeNotUtf8(e.utf8_error()))
}
fn string_to_le_bytes(str: &str, into: &mut impl BufMut) {
let mut chunks = str.as_bytes().chunks_exact(4);
while let Some(&[a, b, c, d]) = chunks.next() {
into.put(&[d, c, b, a][..]);
}
// add padding bytes for the final word if needed
match *chunks.remainder() {
[a, b, c] => into.put(&[0, c, b, a][..]),
[a, b] => into.put(&[0, 0, b, a][..]),
[a] => into.put(&[0, 0, 0, a][..]),
[] => {} // exact multiple of 4
_ => unreachable!(),
}
}
impl Display for FilterSpec {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
for (i, (k, v)) in self.0.iter().enumerate() {
write!(f, "{k}:{v}")?;
if i != self.0.len() - 1 {
write!(f, ",")?;
}
}
Ok(())
}
}
impl SrtControlPacket {
pub fn parse<T: Buf>(
packet_type: u16,
buf: &mut T,
) -> Result<SrtControlPacket, PacketParseError> {
use self::SrtControlPacket::*;
match packet_type {
0 => Ok(Reject),
1 => Ok(HandshakeRequest(SrtHandshake::parse(buf)?)),
2 => Ok(HandshakeResponse(SrtHandshake::parse(buf)?)),
3 => Ok(KeyRefreshRequest(KeyingMaterialMessage::parse(buf)?)),
4 => Ok(KeyRefreshResponse(KeyingMaterialMessage::parse(buf)?)),
5 => {
// the stream id string is stored as 32-bit little endian words
// https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3
le_bytes_to_string(buf).map(StreamId)
}
6 => le_bytes_to_string(buf).map(Congestion),
// Filter
7 => {
let filter_str = le_bytes_to_string(buf)?;
Ok(Filter(FilterSpec(
filter_str
.split(',')
.map(|kv| {
let mut colon_split_iter = kv.split(':');
let k = colon_split_iter
.next()
.ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?;
let v = colon_split_iter
.next()
.ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?;
// only one colon
if colon_split_iter.next().is_some() {
return Err(PacketParseError::BadFilter(filter_str.clone()));
}
Ok((k.to_string(), v.to_string()))
})
.collect::<Result<_, _>>()?,
)))
}
8 => {
let ty = buf.get_u8().into();
let flags = GroupFlags::from_bits_truncate(buf.get_u8());
let weight = buf.get_u16_le();
Ok(Group { ty, flags, weight })
}
_ => Err(PacketParseError::UnsupportedSrtExtensionType(packet_type)),
}
}
/// Get the value to fill the reserved area with
pub fn type_id(&self) -> u16 {
use self::SrtControlPacket::*;
match self {
Reject => 0,
HandshakeRequest(_) => 1,
HandshakeResponse(_) => 2,
KeyRefreshRequest(_) => 3,
KeyRefreshResponse(_) => 4,
StreamId(_) => 5,
Congestion(_) => 6,
Filter(_) => 7,
Group { .. } => 8,
}
}
pub fn serialize<T: BufMut>(&self, into: &mut T) {
use self::SrtControlPacket::*;
match self {
HandshakeRequest(s) | HandshakeResponse(s) => {
s.serialize(into);
}
KeyRefreshRequest(k) | KeyRefreshResponse(k) => {
k.serialize(into);
}
Filter(filter) => {
string_to_le_bytes(&format!("{filter}"), into);
}
Group { ty, flags, weight } => {
into.put_u8((*ty).into());
into.put_u8(flags.bits());
into.put_u16_le(*weight);
}
Reject => {}
StreamId(str) | Congestion(str) => {
// the stream id string and congestion string is stored as 32-bit little endian words
// https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3
string_to_le_bytes(str, into);
}
}
}
// size in 32-bit words
pub fn size_words(&self) -> u16 {
use self::SrtControlPacket::*;
match self {
// 3 32-bit words, version, flags, latency
HandshakeRequest(_) | HandshakeResponse(_) => 3,
// 4 32-bit words + salt + key + wrap [2]
KeyRefreshRequest(ref k) | KeyRefreshResponse(ref k) => {
4 + k.salt.len() as u16 / 4 + k.wrapped_keys.len() as u16 / 4
}
Congestion(str) | StreamId(str) => ((str.len() + 3) / 4) as u16, // round up to nearest multiple of 4
// 1 32-bit word packed with type, flags, and weight
Group { .. } => 1,
Filter(filter) => ((format!("{filter}").len() + 3) / 4) as u16, // TODO: not optimial performace, but probably okay
_ => unimplemented!("{:?}", self),
}
}
}
impl SrtHandshake {
pub fn parse<T: Buf>(buf: &mut T) -> Result<SrtHandshake, PacketParseError> {
if buf.remaining() < 12 {
return Err(PacketParseError::NotEnoughData);
}
let version = SrtVersion::parse(buf.get_u32());
let shake_flags = buf.get_u32();
let flags = match SrtShakeFlags::from_bits(shake_flags) {
Some(i) => i,
None => {
warn!("Unrecognized SRT flags: 0b{:b}", shake_flags);
SrtShakeFlags::from_bits_truncate(shake_flags)
}
};
let peer_latency = buf.get_u16();
let latency = buf.get_u16();
Ok(SrtHandshake {
version,
flags,
send_latency: Duration::from_millis(u64::from(peer_latency)),
recv_latency: Duration::from_millis(u64::from(latency)),
})
}
pub fn serialize<T: BufMut>(&self, into: &mut T) {
into.put_u32(self.version.to_u32());
into.put_u32(self.flags.bits());
// upper 16 bits are peer latency
into.put_u16(self.send_latency.as_millis() as u16); // TODO: handle overflow
// lower 16 is latency
into.put_u16(self.recv_latency.as_millis() as u16); // TODO: handle overflow
}
}
impl KeyingMaterialMessage {
// from hcrypt_msg.h:39
// also const traits aren't a thing yet, so u16::from can't be used
const SIGN: u16 =
((b'H' - b'@') as u16) << 10 | ((b'A' - b'@') as u16) << 5 | (b'I' - b'@') as u16;
pub fn parse(buf: &mut impl Buf) -> Result<KeyingMaterialMessage, PacketParseError> {
// first 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// |0|Vers | PT | Sign | resv |KF |
// make sure there is enough data left in the buffer to at least get to the key flags and length, which tells us how long the packet will be
// that's 4x32bit words
if buf.remaining() < 4 * 4 {
return Err(PacketParseError::NotEnoughData);
}
let vers_pt = buf.get_u8();
// make sure the first bit is zero
if (vers_pt & 0b1000_0000) != 0 {
return Err(PacketParseError::BadSrtExtensionMessage);
}
// upper 4 bits are version
let version = vers_pt >> 4;
if version != 1 {
return Err(PacketParseError::BadSrtExtensionMessage);
}
// lower 4 bits are pt
let pt = PacketType::try_from(vers_pt & 0b0000_1111)?;
// next 16 bis are sign
let sign = buf.get_u16();
if sign != Self::SIGN {
return Err(PacketParseError::BadKeySign(sign));
}
// next 6 bits is reserved, then two bits of KF
let key_flags = KeyFlags::from_bits_truncate(buf.get_u8() & 0b0000_0011);
// second 32-bit word: keki
let keki = buf.get_u32();
// third 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Cipher | Auth | SE | Resv1 |
let cipher = CipherType::try_from(buf.get_u8())?;
let auth = Auth::try_from(buf.get_u8())?;
let se = StreamEncapsulation::try_from(buf.get_u8())?;
if se != StreamEncapsulation::Srt {
return Err(PacketParseError::StreamEncapsulationNotSrt);
}
let _resv1 = buf.get_u8();
// fourth 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Resv2 | Slen/4 | Klen/4 |
let _resv2 = buf.get_u16();
let salt_len = usize::from(buf.get_u8()) * 4;
let key_len = usize::from(buf.get_u8()) * 4;
// acceptable key lengths are 16, 24, and 32
match key_len {
// OK
16 | 24 | 32 => {}
// not
e => return Err(PacketParseError::BadCryptoLength(e as u32)),
}
// get the size of the packet to make sure that there is enough space
// salt + keys (there's a 1 for each in key flags, it's already been anded with 0b11 so max is 2), wrap data is 8 long
if buf.remaining() < salt_len + key_len * (key_flags.bits().count_ones() as usize) + 8 {
return Err(PacketParseError::NotEnoughData);
}
// the reference implmentation converts the whole thing to network order (bit endian) (in 32-bit words)
// so we need to make sure to do the same. Source:
// https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115
// after this, is the salt
let mut salt = vec![];
for _ in 0..salt_len / 4 {
salt.extend_from_slice(&buf.get_u32().to_be_bytes()[..]);
}
// then key[s]
let mut wrapped_keys = vec![];
for _ in 0..(key_len * key_flags.bits().count_ones() as usize + 8) / 4 {
wrapped_keys.extend_from_slice(&buf.get_u32().to_be_bytes()[..]);
}
Ok(KeyingMaterialMessage {
pt,
key_flags,
keki,
cipher,
auth,
salt,
wrapped_keys,
})
}
fn serialize<T: BufMut>(&self, into: &mut T) {
// first 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// |0|Vers | PT | Sign | resv |KF |
// version is 1
into.put_u8(1 << 4 | self.pt as u8);
into.put_u16(Self::SIGN);
// rightmost bit of KF is even, other is odd
into.put_u8(self.key_flags.bits());
// second 32-bit word: keki
into.put_u32(self.keki);
// third 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Cipher | Auth | SE | Resv1 |
into.put_u8(self.cipher as u8);
into.put_u8(self.auth as u8);
into.put_u8(StreamEncapsulation::Srt as u8);
into.put_u8(0); // resv1
// fourth 32-bit word:
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
// | Resv2 | Slen/4 | Klen/4 |
into.put_u16(0); // resv2
into.put_u8((self.salt.len() / 4) as u8);
// this unwrap is okay because we already panic above if both are None
let key_len = (self.wrapped_keys.len() - 8) / self.key_flags.bits().count_ones() as usize;
into.put_u8((key_len / 4) as u8);
// put the salt then key[s]
into.put(&self.salt[..]);
// the reference implmentation converts the whole thing to network order (big endian) (in 32-bit words)
// so we need to make sure to do the same. Source:
// https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115
for num in self.wrapped_keys[..].chunks(4) {
into.put_u32(u32::from_be_bytes([num[0], num[1], num[2], num[3]]));
}
}
}
impl fmt::Debug for SrtControlPacket {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SrtControlPacket::Reject => write!(f, "reject"),
SrtControlPacket::HandshakeRequest(req) => write!(f, "hsreq={req:?}"),
SrtControlPacket::HandshakeResponse(resp) => write!(f, "hsresp={resp:?}"),
SrtControlPacket::KeyRefreshRequest(req) => write!(f, "kmreq={req:?}"),
SrtControlPacket::KeyRefreshResponse(resp) => write!(f, "kmresp={resp:?}"),
SrtControlPacket::StreamId(sid) => write!(f, "streamid={sid}"),
SrtControlPacket::Congestion(ctype) => write!(f, "congestion={ctype}"),
SrtControlPacket::Filter(filter) => write!(f, "filter={filter:?}"),
SrtControlPacket::Group { ty, flags, weight } => {
write!(f, "group=({ty:?}, {flags:?}, {weight:?})")
}
}
}
}
impl TryFrom<u8> for CipherType {
type Error = PacketParseError;
fn try_from(from: u8) -> Result<CipherType, PacketParseError> {
match from {
0 => Ok(CipherType::None),
1 => Ok(CipherType::Ecb),
2 => Ok(CipherType::Ctr),
3 => Ok(CipherType::Cbc),
e => Err(PacketParseError::BadCipherKind(e)),
}
}
}
#[cfg(test)]
mod tests {
use super::{KeyingMaterialMessage, SrtControlPacket, SrtHandshake, SrtShakeFlags};
use crate::{options::*, packet::*};
use std::{io::Cursor, time::Duration};
#[test]
fn deser_ser_shake() {
let handshake = Packet::Control(ControlPacket {
timestamp: TimeStamp::from_micros(123_141),
dest_sockid: SocketId(123),
control_type: ControlTypes::Srt(SrtControlPacket::HandshakeRequest(SrtHandshake {
version: SrtVersion::CURRENT,
flags: SrtShakeFlags::empty(),
send_latency: Duration::from_millis(4000),
recv_latency: Duration::from_millis(3000),
})),
});
let mut buf = Vec::new();
handshake.serialize(&mut buf);
let deserialized = Packet::parse(&mut Cursor::new(buf), false).unwrap();
assert_eq!(handshake, deserialized);
}
#[test]
fn ser_deser_sid() {
let sid = Packet::Control(ControlPacket {
timestamp: TimeStamp::from_micros(123),
dest_sockid: SocketId(1234),
control_type: ControlTypes::Srt(SrtControlPacket::StreamId("Hellohelloheloo".into())),
});
let mut buf = Vec::new();
sid.serialize(&mut buf);
let deser = Packet::parse(&mut Cursor::new(buf), false).unwrap();
assert_eq!(sid, deser);
}
#[test]
fn srt_key_message_debug() {
let salt = b"\x00\x00\x00\x00\x00\x00\x00\x00\x85\x2c\x3c\xcd\x02\x65\x1a\x22";
let wrapped = b"U\x06\xe9\xfd\xdfd\xf1'nr\xf4\xe9f\x81#(\xb7\xb5D\x19{\x9b\xcdx";
let km = KeyingMaterialMessage {
pt: PacketType::KeyingMaterial,
key_flags: KeyFlags::EVEN,
keki: 0,
cipher: CipherType::Ctr,
auth: Auth::None,
salt: salt[..].into(),
wrapped_keys: wrapped[..].into(),
};
assert_eq!(format!("{km:?}"), "KeyingMaterialMessage { pt: KeyingMaterial, key_flags: KeyFlags(EVEN), keki: 0, cipher: Ctr, auth: None }")
}
} | }
} | random_line_split |
play_blackjack.py | # -*- coding: utf-8 -*-
"""
Black Jack
Version: Alpha 0.1
Created on Wed Jan 2 23:41:46 2019
@author: Jason Bubenicek
"""
import random
from IPython.display import clear_output
import os
from colorama import init
init()
from colorama import Fore
PLAYERS = []
HOUSE = []
def cls():
# https://stackoverflow.com/questions/517970/how-to-clear-the-interpreter-console
os.system('cls' if os.name=='nt' else 'clear')
class Card():
'''
Usage:
Create instances of each card in the deck.
As you hand them out, make sure to appropriately set
the .visible attribute to True or False based on
whether or not it should be shown to all players, such as
the Dealer's 2nd Card, or
'''
# Use the 'visible' attribute to determine
# whether or not to show this card to the
# players.
hidden = False
def __init__(self,suit,name,value, color, suit_no, name_short):
self.suit = suit
self.name = name
self.value = value
self.color = color
self.suit_no = suit_no
self.name_short = name_short
self.suit_icon = '♥♦♣♠'[suit_no]
if self.color == "Red":
self.suit_icon = Fore.RED + self.suit_icon + Fore.WHITE
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {self.name_short:<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {self.suit_icon} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {self.name_short:>2} |'
self.card_line7 = f'└───────┘'
def set_hidden(self):
self.hidden = True
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {"?":<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {"?"} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {"?":>2} |'
self.card_line7 = f'└───────┘'
def set_visible(self):
self.hidden = False
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {self.name_short:<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {self.suit_icon} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {self.name_short:>2} |'
self.card_line7 = f'└───────┘'
def show_card(self):
print(self.card_line1)
print(self.card_line2)
print(self.card_line3)
print(self.card_line4)
print(self.card_line5)
print(self.card_line6)
print(self.card_line7)
class Deck():
# Create a blank list to hold the cards that are
# added to this Deck
# cards = []
# These lists will be used to populate the Deck
suits = [("Hearts","Red",0),("Diamonds","Red",1),("Clubs","Black",2),("Spades","Black",3)]
names = [("Two",2,"2"),("Three",3,"3"),("Four",4,"4"),("Five",5,"5"),("Six",6,"6"),("Seven",7,"7"),("Eight",8,"8"),("Nine",9,"9"),("Ten",10,"10"),("Jack",10,"J"),("Queen",10,"Q"),("King",10,"K"),("Ace",11,"A")]
def __init__(self):
self.cards = []
# When the Deck class is instanstiated, populate it with a fresh
# deck of cards
for suit in self.suits:
for name in self.names:
display_suit, color, suit_no = suit
display_name, value, name_short = name
card = Card(suit, display_name, value, color,suit_no, name_short)
self.cards.append(card)
# Shuffle the desk to randomize the next card.
random.shuffle(self.cards)
def hit(self,player):
# Select the next card in the deck.
# card = self.cards.pop()
card = self.cards[0]
card.set_visible()
player.add_card(card)
# Once the card has been given out, we need to make sure to
# remove it from the Deck, so that no one else gets the same
# card.
del self.cards[0]
def deal(self,at_table):
'''
Usage:
At the beginning of the game, deal the cards to each player.
'''
for card_number in [1,2]:
for player in at_table:
card = self.cards[0]
if player.player == "Dealer" and card_number == 2:
# The Dealer shows their 2nd card to all players. Let's
# make it visible.
card.set_hidden()
player.add_card(card)
# Once the card has been given out, we need to make sure to
# remove it from the Deck, so that no one else gets the same
# card.
del self.cards[0]
def show_deck(self):
for card in self.cards:
print(card.show_card())
# Use the print() command to get a message that contains the
# number of cards remaining in the deck. This is good to ensure
# that you are removing cards from the Deck as you hand them out
# to players.
def __str__(self):
return f"There are {len(self.cards)} in the deck"
# This will provide you a numeric count of cards in the deck. You
# should use this to ensure that the deck has been populated with
# the standard 52 and as you hand them out that the number is reducing
# appropriately.
def __len__(self):
return len(self.cards)
class Hand():
'''
Usage:
You should create an instance of Hand for each player in the game. It will
contain a list of the cards that the player has in their hand. Initially, the
.cards attribute will have 0 cards. You can use the .add_card() method to
pass in a card.
'''
cards = []
busted = False
amount_bet = 0
def __init__(self, player, amount_bet):
self.cards = []
self.player = player
self.amount_bet = amount_bet
# print("Hand created.")
def add_card(self, card):
'''
Pass in a Card object and don't forget to remove it from the deck afterward.
'''
self.cards.append(card)
# print(f"Card added, {self.player} now has {len(self.cards)} in their hand.")
def hand_total(self):
'''
Usage:
Call .hand_total to get a numeric representation of the current Hand value.
'''
aces = 0
hand_value = 0
for card in self.cards:
if not card.hidden:
hand_value += card.value
if card.name == "Ace":
aces += 1
if hand_value <= 21:
# print(f"NOT BUST: Hand value is: {hand_value}")
return hand_value
elif hand_value > 21 and aces == 0:
# print(f"BUST: This hand has a value of {hand_value}, which is over 21 and there are {aces} Aces.")
return hand_value
else:
# print(f"This hand has a value of {hand_value}, but has {aces} Ace(s), will attempt to convert to 1")
# while aces > 0:
while aces:
# This hand as at least 1 Ace, we will attempt to convert it to a 1 and then
| if this Hand is in BUST state or not.
'''
if self.hand_total() > 21:
self.busted = True
return True
else:
self.busted = False
return False
def show_hand(self):
for card in self.cards:
print(card.card_line1, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line2, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line3, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line4, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line5, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line6, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line7, sep=' ', end='', flush=True)
print(f"\n{self.player} Total: {self.hand_total()}")
def __str__(self):
hand_value = 0
hand_display = []
print(f"{self.player} has the following cards:")
print("===============================================")
for card in self.cards:
hand_value += card.value
# print(f"{card.name} of {card.suit} with a value of {str(card.value)} and a visibility of {str(card.visible)}")
hand_display.append(f"{card.name} {card.suit[0]}")
return str(f"{self.player}: {self.hand_value()}")
def __len__(self):
'''
Usage:
Use the len() function to return the number of cards in the Hand
'''
return len(self.cards)
def check_for_naturals(dealer, player):
'''
Usage:
Use this function after the hands have been dealt by calling
it with each player and the dealer hand. It will return one
of four values; Both, Dealer, Player, None
'''
result = None
if dealer.hand_total() == 21 and player.hand_total == 21:
result = "Both"
elif dealer.hand_total() == 21:
result = "Dealer"
elif player.hand_total() == 21:
result = "Player"
else:
result = "None"
return result
class Player():
win_loss_amount = 0
current_hand = None
previous_hands = []
def __init__(self, name):
self.name = name
def set_current_hand(self, hand):
Player.current_hand = hand
def set_previous_hand(self,hand):
Player.previous_hands.append(hand)
def get_winnings(self):
result = 0
for hand in self.previous_hands:
result += hand.amount_won
return result
def __len__(self):
return len(self.previous_hands)
def __str__(self):
return f"{self.name} has played {len(self.previous_hands)} and won/loss: ${self.get_winnings()}."
def dealer_play(dealer, d):
# Make sure that all of the dealer cards are now visible and
# able to be counted.
for card in dealer.cards:
card.set_visible()
# Have the dealer continue to Hit until their hand is greater than
# 17 and less than 21 (in other words, haven't BUSTED)
while 17 > dealer.hand_total() < 21:
# print(f"{dealer.player} has to HIT with Hand of {dealer.hand_total()}.")
# hit(dealer, d)
d.hit(dealer)
clear_output()
cls()
dealer.show_hand()
if dealer.check_bust():
print(f"{dealer.player} busted!")
# x = input("Ready to proceed")
def player_play(player, dealer, d):
clear_output()
cls()
dealer.show_hand()
player.show_hand()
hit_continue = True
while player.hand_total() < 21 and hit_continue:
response = ""
while response.lower() not in ['y','yes', 'n', 'no']:
response = input(f"{player.player}: Would you like a card? Enter 'Yes', 'No'").lower()
if response[0] == 'y':
print(f"{player.player} wants card")
# hit(player, d)
d.hit(player)
clear_output()
cls()
dealer.show_hand()
player.show_hand()
if player.check_bust():
print("Sorry, you busted")
hit_continue = False
else:
print(f"{player.player} wants to 'stay'")
hit_continue = False
def get_bets(player_hands):
for player_hand in player_hands:
bet = 0
while bet not in range(20,100):
try:
bet = int(input(f"{player_hand.player}, how much would you like to bet? ($20 to $100)?"))
except:
print("You must enter a numeric value between 20 and 100.")
bet = 0
player_hand.amount_bet = bet
def play():
# Get the Deck populated and shuffled
d = Deck()
if input("Would you like to review the deck, first (y/n)?") == "y":
d.show_deck()
input("Press any key to continue.")
# Generate a list of Hands one for each players
playing_hands = []
for hand_id in range(0,len(PLAYERS)):
playing_hands.append(Hand(f"{PLAYERS[hand_id].name}", 20))
dealer_hand = Hand("Dealer", 0)
# Populate the player_hands and the players (including the Dealer) in the at_table lists
player_hands = []
at_table = []
for playing_hand in playing_hands:
player_hands.append(playing_hand)
at_table.append(playing_hand)
at_table.append(dealer_hand)
# Deal the cards to the players.
# deal(at_table, d)
d.deal(at_table)
# Ask each player to determine how much they are going to bet this round.
get_bets(player_hands)
# Iterate through each of the player, and allow
# them to select as many cards as they'd like
# until they BUST or STAY
# print(f"# of Players playing the Dealer is: {len(players)}")
# a = input("Ready to proceed")
for player_hand in player_hands:
player_play(player_hand,dealer_hand, d)
dealer_play(dealer_hand, d)
# Check to see who won
clear_output()
cls()
dealer_hand.show_hand()
player_index = 0
for player_hand in player_hands:
player_hand.show_hand()
print("\n===============================================")
print(f"Checking {player_hand.player}'s results.")
if player_hand.busted:
print(f"{player_hand.player} BUSTED!")
print(f"Removing ${player_hand.amount_bet} from {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount -= player_hand.amount_bet
HOUSE[0].win_loss_amount += player_hand.amount_bet
elif ((dealer_hand.busted) and (not player_hand.busted)) or ((not player_hand.busted) and player_hand.hand_total() > dealer_hand.hand_total()):
print(f"{player_hand.player} Won!")
print(f"Adding ${player_hand.amount_bet} to {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount += player_hand.amount_bet
HOUSE[0].win_loss_amount -= player_hand.amount_bet
elif (not player_hand.busted) and player_hand.hand_total() == dealer_hand.hand_total():
print(f"Push!")
else:
print("Dealer Won!")
print(f"Removing ${player_hand.amount_bet} from {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount -= player_hand.amount_bet
HOUSE[0].win_loss_amount += player_hand.amount_bet
player_index += 1
print("Game over.")
def start_game():
'''
Usage:
Call start_game() to start the game.
'''
ready = ""
clear_output()
cls()
print("=========================================================================================================")
print("= =")
print("= =")
print("= Black Jack =")
print("= At the famous, Casino Bub =")
print("= =")
print("= =")
print("= =")
print("=========================================================================================================\n\n\n")
# Ask how many players
# Setup player objects
player_player1 = Player("Jason Bubnicek")
player_player2 = Player("Dayna Bubenicek")
PLAYERS.append(player_player1)
PLAYERS.append(player_player2)
HOUSE.append(Player("Dealer"))
# Continue to ask if the player would like to play until
# they indicate that they want to Quit the game.
while ready.lower() not in ["q", "quit", "exit", "end"]:
print(f"House has won: ${HOUSE[0].win_loss_amount}")
for player in PLAYERS:
print(f"{player.name} has won: ${player.win_loss_amount}")
ready = input("Would you like to play? ('Yes', 'No', 'Quit')")
if ready.lower() in ["y", "yes"]:
print("Let's play Black Jack")
# input("Right before play")
play()
ready = ""
print("Thanks for playing!")
if __name__ == "__main__":
start_game()
| # sum the hand again to see if it is 21 or less. This will be repeated
# if there are more Aces in the hand and the sum total remains over 21.
aces -= 1
hand_value -= 10
if hand_value <= 21:
# print(f"NOT BUST: Hand value is: {hand_value}")
return hand_value
elif hand_value > 21 and aces == 0:
# print(f"BUST: This hand after changing the Aces to a value 1, is now {hand_value}, which is over 21 and it has {aces} Aces left.")
return hand_value
else:
# print("Changing another Ace to 1")
continue
def check_bust(self):
'''
Usage:
Call .check_bust() to see | conditional_block |
play_blackjack.py | # -*- coding: utf-8 -*-
"""
Black Jack
Version: Alpha 0.1
Created on Wed Jan 2 23:41:46 2019
@author: Jason Bubenicek
"""
import random
from IPython.display import clear_output
import os
from colorama import init
init()
from colorama import Fore
PLAYERS = []
HOUSE = []
def cls():
# https://stackoverflow.com/questions/517970/how-to-clear-the-interpreter-console
os.system('cls' if os.name=='nt' else 'clear')
class Card():
'''
Usage:
Create instances of each card in the deck.
As you hand them out, make sure to appropriately set
the .visible attribute to True or False based on
whether or not it should be shown to all players, such as
the Dealer's 2nd Card, or
'''
# Use the 'visible' attribute to determine
# whether or not to show this card to the
# players.
hidden = False
def __init__(self,suit,name,value, color, suit_no, name_short):
self.suit = suit
self.name = name
self.value = value
self.color = color
self.suit_no = suit_no
self.name_short = name_short
self.suit_icon = '♥♦♣♠'[suit_no]
if self.color == "Red":
self.suit_icon = Fore.RED + self.suit_icon + Fore.WHITE
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {self.name_short:<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {self.suit_icon} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {self.name_short:>2} |'
self.card_line7 = f'└───────┘'
def set_hidden(self):
self.hidden = True
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {"?":<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {"?"} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {"?":>2} |'
self.card_line7 = f'└───────┘'
def set_visible(self):
self.hidden = False
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {self.name_short:<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {self.suit_icon} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {self.name_short:>2} |'
self.card_line7 = f'└───────┘'
def show_card(self):
print(self.card_line1)
print(self.card_line2)
print(self.card_line3)
print(self.card_line4)
print(self.card_line5)
print(self.card_line6)
print(self.card_line7)
class Deck():
# Create a blank list to hold the cards that are
# added to this Deck
# cards = []
| # These lists will be used to populate the Deck
suits = [("Hearts","Red",0),("Diamonds","Red",1),("Clubs","Black",2),("Spades","Black",3)]
names = [("Two",2,"2"),("Three",3,"3"),("Four",4,"4"),("Five",5,"5"),("Six",6,"6"),("Seven",7,"7"),("Eight",8,"8"),("Nine",9,"9"),("Ten",10,"10"),("Jack",10,"J"),("Queen",10,"Q"),("King",10,"K"),("Ace",11,"A")]
def __init__(self):
self.cards = []
# When the Deck class is instanstiated, populate it with a fresh
# deck of cards
for suit in self.suits:
for name in self.names:
display_suit, color, suit_no = suit
display_name, value, name_short = name
card = Card(suit, display_name, value, color,suit_no, name_short)
self.cards.append(card)
# Shuffle the desk to randomize the next card.
random.shuffle(self.cards)
def hit(self,player):
# Select the next card in the deck.
# card = self.cards.pop()
card = self.cards[0]
card.set_visible()
player.add_card(card)
# Once the card has been given out, we need to make sure to
# remove it from the Deck, so that no one else gets the same
# card.
del self.cards[0]
def deal(self,at_table):
'''
Usage:
At the beginning of the game, deal the cards to each player.
'''
for card_number in [1,2]:
for player in at_table:
card = self.cards[0]
if player.player == "Dealer" and card_number == 2:
# The Dealer shows their 2nd card to all players. Let's
# make it visible.
card.set_hidden()
player.add_card(card)
# Once the card has been given out, we need to make sure to
# remove it from the Deck, so that no one else gets the same
# card.
del self.cards[0]
def show_deck(self):
for card in self.cards:
print(card.show_card())
# Use the print() command to get a message that contains the
# number of cards remaining in the deck. This is good to ensure
# that you are removing cards from the Deck as you hand them out
# to players.
def __str__(self):
return f"There are {len(self.cards)} in the deck"
# This will provide you a numeric count of cards in the deck. You
# should use this to ensure that the deck has been populated with
# the standard 52 and as you hand them out that the number is reducing
# appropriately.
def __len__(self):
return len(self.cards)
class Hand():
'''
Usage:
You should create an instance of Hand for each player in the game. It will
contain a list of the cards that the player has in their hand. Initially, the
.cards attribute will have 0 cards. You can use the .add_card() method to
pass in a card.
'''
cards = []
busted = False
amount_bet = 0
def __init__(self, player, amount_bet):
self.cards = []
self.player = player
self.amount_bet = amount_bet
# print("Hand created.")
def add_card(self, card):
'''
Pass in a Card object and don't forget to remove it from the deck afterward.
'''
self.cards.append(card)
# print(f"Card added, {self.player} now has {len(self.cards)} in their hand.")
def hand_total(self):
'''
Usage:
Call .hand_total to get a numeric representation of the current Hand value.
'''
aces = 0
hand_value = 0
for card in self.cards:
if not card.hidden:
hand_value += card.value
if card.name == "Ace":
aces += 1
if hand_value <= 21:
# print(f"NOT BUST: Hand value is: {hand_value}")
return hand_value
elif hand_value > 21 and aces == 0:
# print(f"BUST: This hand has a value of {hand_value}, which is over 21 and there are {aces} Aces.")
return hand_value
else:
# print(f"This hand has a value of {hand_value}, but has {aces} Ace(s), will attempt to convert to 1")
# while aces > 0:
while aces:
# This hand as at least 1 Ace, we will attempt to convert it to a 1 and then
# sum the hand again to see if it is 21 or less. This will be repeated
# if there are more Aces in the hand and the sum total remains over 21.
aces -= 1
hand_value -= 10
if hand_value <= 21:
# print(f"NOT BUST: Hand value is: {hand_value}")
return hand_value
elif hand_value > 21 and aces == 0:
# print(f"BUST: This hand after changing the Aces to a value 1, is now {hand_value}, which is over 21 and it has {aces} Aces left.")
return hand_value
else:
# print("Changing another Ace to 1")
continue
def check_bust(self):
'''
Usage:
Call .check_bust() to see if this Hand is in BUST state or not.
'''
if self.hand_total() > 21:
self.busted = True
return True
else:
self.busted = False
return False
def show_hand(self):
for card in self.cards:
print(card.card_line1, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line2, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line3, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line4, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line5, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line6, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line7, sep=' ', end='', flush=True)
print(f"\n{self.player} Total: {self.hand_total()}")
def __str__(self):
hand_value = 0
hand_display = []
print(f"{self.player} has the following cards:")
print("===============================================")
for card in self.cards:
hand_value += card.value
# print(f"{card.name} of {card.suit} with a value of {str(card.value)} and a visibility of {str(card.visible)}")
hand_display.append(f"{card.name} {card.suit[0]}")
return str(f"{self.player}: {self.hand_value()}")
def __len__(self):
'''
Usage:
Use the len() function to return the number of cards in the Hand
'''
return len(self.cards)
def check_for_naturals(dealer, player):
'''
Usage:
Use this function after the hands have been dealt by calling
it with each player and the dealer hand. It will return one
of four values; Both, Dealer, Player, None
'''
result = None
if dealer.hand_total() == 21 and player.hand_total == 21:
result = "Both"
elif dealer.hand_total() == 21:
result = "Dealer"
elif player.hand_total() == 21:
result = "Player"
else:
result = "None"
return result
class Player():
win_loss_amount = 0
current_hand = None
previous_hands = []
def __init__(self, name):
self.name = name
def set_current_hand(self, hand):
Player.current_hand = hand
def set_previous_hand(self,hand):
Player.previous_hands.append(hand)
def get_winnings(self):
result = 0
for hand in self.previous_hands:
result += hand.amount_won
return result
def __len__(self):
return len(self.previous_hands)
def __str__(self):
return f"{self.name} has played {len(self.previous_hands)} and won/loss: ${self.get_winnings()}."
def dealer_play(dealer, d):
# Make sure that all of the dealer cards are now visible and
# able to be counted.
for card in dealer.cards:
card.set_visible()
# Have the dealer continue to Hit until their hand is greater than
# 17 and less than 21 (in other words, haven't BUSTED)
while 17 > dealer.hand_total() < 21:
# print(f"{dealer.player} has to HIT with Hand of {dealer.hand_total()}.")
# hit(dealer, d)
d.hit(dealer)
clear_output()
cls()
dealer.show_hand()
if dealer.check_bust():
print(f"{dealer.player} busted!")
# x = input("Ready to proceed")
def player_play(player, dealer, d):
clear_output()
cls()
dealer.show_hand()
player.show_hand()
hit_continue = True
while player.hand_total() < 21 and hit_continue:
response = ""
while response.lower() not in ['y','yes', 'n', 'no']:
response = input(f"{player.player}: Would you like a card? Enter 'Yes', 'No'").lower()
if response[0] == 'y':
print(f"{player.player} wants card")
# hit(player, d)
d.hit(player)
clear_output()
cls()
dealer.show_hand()
player.show_hand()
if player.check_bust():
print("Sorry, you busted")
hit_continue = False
else:
print(f"{player.player} wants to 'stay'")
hit_continue = False
def get_bets(player_hands):
for player_hand in player_hands:
bet = 0
while bet not in range(20,100):
try:
bet = int(input(f"{player_hand.player}, how much would you like to bet? ($20 to $100)?"))
except:
print("You must enter a numeric value between 20 and 100.")
bet = 0
player_hand.amount_bet = bet
def play():
# Get the Deck populated and shuffled
d = Deck()
if input("Would you like to review the deck, first (y/n)?") == "y":
d.show_deck()
input("Press any key to continue.")
# Generate a list of Hands one for each players
playing_hands = []
for hand_id in range(0,len(PLAYERS)):
playing_hands.append(Hand(f"{PLAYERS[hand_id].name}", 20))
dealer_hand = Hand("Dealer", 0)
# Populate the player_hands and the players (including the Dealer) in the at_table lists
player_hands = []
at_table = []
for playing_hand in playing_hands:
player_hands.append(playing_hand)
at_table.append(playing_hand)
at_table.append(dealer_hand)
# Deal the cards to the players.
# deal(at_table, d)
d.deal(at_table)
# Ask each player to determine how much they are going to bet this round.
get_bets(player_hands)
# Iterate through each of the player, and allow
# them to select as many cards as they'd like
# until they BUST or STAY
# print(f"# of Players playing the Dealer is: {len(players)}")
# a = input("Ready to proceed")
for player_hand in player_hands:
player_play(player_hand,dealer_hand, d)
dealer_play(dealer_hand, d)
# Check to see who won
clear_output()
cls()
dealer_hand.show_hand()
player_index = 0
for player_hand in player_hands:
player_hand.show_hand()
print("\n===============================================")
print(f"Checking {player_hand.player}'s results.")
if player_hand.busted:
print(f"{player_hand.player} BUSTED!")
print(f"Removing ${player_hand.amount_bet} from {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount -= player_hand.amount_bet
HOUSE[0].win_loss_amount += player_hand.amount_bet
elif ((dealer_hand.busted) and (not player_hand.busted)) or ((not player_hand.busted) and player_hand.hand_total() > dealer_hand.hand_total()):
print(f"{player_hand.player} Won!")
print(f"Adding ${player_hand.amount_bet} to {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount += player_hand.amount_bet
HOUSE[0].win_loss_amount -= player_hand.amount_bet
elif (not player_hand.busted) and player_hand.hand_total() == dealer_hand.hand_total():
print(f"Push!")
else:
print("Dealer Won!")
print(f"Removing ${player_hand.amount_bet} from {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount -= player_hand.amount_bet
HOUSE[0].win_loss_amount += player_hand.amount_bet
player_index += 1
print("Game over.")
def start_game():
'''
Usage:
Call start_game() to start the game.
'''
ready = ""
clear_output()
cls()
print("=========================================================================================================")
print("= =")
print("= =")
print("= Black Jack =")
print("= At the famous, Casino Bub =")
print("= =")
print("= =")
print("= =")
print("=========================================================================================================\n\n\n")
# Ask how many players
# Setup player objects
player_player1 = Player("Jason Bubnicek")
player_player2 = Player("Dayna Bubenicek")
PLAYERS.append(player_player1)
PLAYERS.append(player_player2)
HOUSE.append(Player("Dealer"))
# Continue to ask if the player would like to play until
# they indicate that they want to Quit the game.
while ready.lower() not in ["q", "quit", "exit", "end"]:
print(f"House has won: ${HOUSE[0].win_loss_amount}")
for player in PLAYERS:
print(f"{player.name} has won: ${player.win_loss_amount}")
ready = input("Would you like to play? ('Yes', 'No', 'Quit')")
if ready.lower() in ["y", "yes"]:
print("Let's play Black Jack")
# input("Right before play")
play()
ready = ""
print("Thanks for playing!")
if __name__ == "__main__":
start_game()
| identifier_name | |
play_blackjack.py | # -*- coding: utf-8 -*-
"""
Black Jack
Version: Alpha 0.1
Created on Wed Jan 2 23:41:46 2019
@author: Jason Bubenicek
"""
import random
from IPython.display import clear_output
import os
from colorama import init
init()
from colorama import Fore
PLAYERS = []
HOUSE = []
def cls():
# https://stackoverflow.com/questions/517970/how-to-clear-the-interpreter-console
os.system('cls' if os.name=='nt' else 'clear')
class Card():
'''
Usage:
Create instances of each card in the deck.
As you hand them out, make sure to appropriately set
the .visible attribute to True or False based on
whether or not it should be shown to all players, such as
the Dealer's 2nd Card, or
'''
# Use the 'visible' attribute to determine
# whether or not to show this card to the
# players.
hidden = False
def __init__(self,suit,name,value, color, suit_no, name_short):
self.suit = suit
self.name = name
self.value = value
self.color = color
self.suit_no = suit_no
self.name_short = name_short
self.suit_icon = '♥♦♣♠'[suit_no]
if self.color == "Red":
self.suit_icon = Fore.RED + self.suit_icon + Fore.WHITE
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {self.name_short:<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {self.suit_icon} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {self.name_short:>2} |'
self.card_line7 = f'└───────┘'
def set_hidden(self):
self.hidden = True
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {"?":<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {"?"} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {"?":>2} |'
self.card_line7 = f'└───────┘'
def set_visible(self):
self.hidden = False
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {self.name_short:<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {self.suit_icon} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {self.name_short:>2} |'
self.card_line7 = f'└───────┘'
def show_card(self):
print(self.card_line1)
print(self.card_line2)
print(self.card_line3)
print(self.card_line4)
print(self.card_line5)
print(self.card_line6)
print(self.card_line7)
class Deck():
# Create a blank list to hold the cards that are
# added to this Deck
# cards = []
# These lists will be used to populate the Deck
suits = [("Hearts","Red",0),("Diamonds","Red",1),("Clubs","Black",2),("Spades","Black",3)]
names = [("Two",2,"2"),("Three",3,"3"),("Four",4,"4"),("Five",5,"5"),("Six",6,"6"),("Seven",7,"7"),("Eight",8,"8"),("Nine",9,"9"),("Ten",10,"10"),("Jack",10,"J"),("Queen",10,"Q"),("King",10,"K"),("Ace",11,"A")]
def __init__(self):
self.cards = []
# When the Deck class is instanstiated, populate it with a fresh
# deck of cards
for suit in self.suits:
for name in self.names:
display_suit, color, suit_no = suit
display_name, value, name_short = name
card = Card(suit, display_name, value, color,suit_no, name_short)
self.cards.append(card)
# Shuffle the desk to randomize the next card.
random.shuffle(self.cards)
def hit(self,player):
# Select the next card in the deck.
# card = self.cards.pop()
card = self.cards[0]
card.set_visible()
player.add_card(card)
# Once the card has been given out, we need to make sure to
# remove it from the Deck, so that no one else gets the same
# card.
del self.cards[0]
def deal(self,at_table):
'''
Usage:
At the beginning of the game, deal the cards to each player.
'''
for card_number in [1,2]:
for player in at_table:
card = self.cards[0]
if player.player == "Dealer" and card_number == 2:
# The Dealer shows their 2nd card to all players. Let's
# make it visible.
card.set_hidden()
player.add_card(card)
# Once the card has been given out, we need to make sure to
# remove it from the Deck, so that no one else gets the same
# card.
del self.cards[0]
def show_deck(self):
for card in self.cards:
print(card.show_card())
# Use the print() command to get a message that contains the
# number of cards remaining in the deck. This is good to ensure
# that you are removing cards from the Deck as you hand them out
# to players.
def __str__(self):
return f"There are {len(self.cards)} in the deck"
# This will provide you a numeric count of cards in the deck. You
# should use this to ensure that the deck has been populated with
# the standard 52 and as you hand them out that the number is reducing
# appropriately.
def __len__(self):
return len(self.cards)
class Hand():
'''
Usage:
You should create an instance of Hand for each player in the game. It will
contain a list of the cards that the player has in their hand. Initially, the
.cards attribute will have 0 cards. You can use the .add_card() method to
pass in a card.
'''
cards = []
busted = False
amount_bet = 0
def __init__(self, player, amount_bet):
self.cards = []
self.player = player
self.amount_bet = amount_bet
# print("Hand created.")
def add_card(self, card):
'''
Pass in a Card object and don't forget to remove it from the deck afterward.
'''
self.cards.append(card)
# print(f"Card added, {self.player} now has {len(self.cards)} in their hand.")
def hand_total(self):
'''
Usage:
Call .hand_total to get a numeric representation of the current Hand value.
'''
aces = 0
hand_value = 0
for card in self.cards:
if not card.hidden:
hand_value += card.value
if card.name == "Ace":
aces += 1
if hand_value <= 21:
# print(f"NOT BUST: Hand value is: {hand_value}")
return hand_value
elif hand_value > 21 and aces == 0:
# print(f"BUST: This hand has a value of {hand_value}, which is over 21 and there are {aces} Aces.")
return hand_value
else:
# print(f"This hand has a value of {hand_value}, but has {aces} Ace(s), will attempt to convert to 1")
# while aces > 0:
while aces:
# This hand as at least 1 Ace, we will attempt to convert it to a 1 and then
# sum the hand again to see if it is 21 or less. This will be repeated
# if there are more Aces in the hand and the sum total remains over 21.
aces -= 1
hand_value -= 10
if hand_value <= 21:
# print(f"NOT BUST: Hand value is: {hand_value}")
return hand_value
elif hand_value > 21 and aces == 0:
# print(f"BUST: This hand after changing the Aces to a value 1, is now {hand_value}, which is over 21 and it has {aces} Aces left.")
return hand_value
else:
# print("Changing another Ace to 1")
continue
def check_bust(self):
'''
Usage:
Call .check_bust() to see if this Hand is in BUST state or not.
'''
if self.hand_total() > 21:
self.busted = True
return True
else:
self.busted = False
return False
def show_hand(self):
for card in self.cards:
print(card.card_line1, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line2, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line3, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line4, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line5, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line6, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line7, sep=' ', end='', flush=True)
print(f"\n{self.player} Total: {self.hand_total()}")
def __str__(self):
hand_value = 0
hand_display = []
print(f"{self.player} has the following cards:")
print("===============================================")
for card in self.cards:
hand_value += card.value
# print(f"{card.name} of {card.suit} with a value of {str(card.value)} and a visibility of {str(card.visible)}")
hand_display.append(f"{card.name} {card.suit[0]}")
return str(f"{self.player}: {self.hand_value()}")
def __len__(self):
'''
Usage:
Use the len() function to return the number of cards in the Hand
'''
return len(self.cards)
def check_for_naturals(dealer, player):
'''
Usage:
Use this function after the hands have been dealt by calling
it with each player and the dealer hand. It will return one
of four values; Both, Dealer, Player, None
'''
result = None
if dealer.hand_total() == 21 and player.hand_total == 21:
result = "Both"
elif dealer.hand_total() == 21:
result = "Dealer"
elif player.hand_total() == 21:
result = "Player"
else:
result = "None"
return result
class Player():
win_loss_amount = 0
current_hand = None
previous_hands = []
def __init__(self, name):
self.name = name
def set_current_hand(self, hand):
Player.current_hand = hand
def set_previous_hand(self,hand):
Player.previous_hands.append(hand)
def get_winnings(self):
result = 0
for hand in self.previous_hands:
result += hand.amount_won
return result
def __len__(self):
return len(self.previous_hands)
def __str__(self):
return f"{self.name} has played {len(self.previous_hands)} and won/loss: ${self.get_winnings()}."
def dealer_play(dealer, d):
# Make sure that all of the dealer cards are now visible and
# able to be counted.
for card in dealer.cards:
card.set_visible()
# Have the dealer continue to Hit until their hand is greater than
# 17 and less than 21 (in other words, haven't BUSTED)
while 17 > dealer.hand_total() < 21:
# print(f"{dealer.player} has to HIT with Hand of {dealer.hand_total()}.")
# hit(dealer, d)
d.hit(dealer)
clear_output()
cls()
dealer.show_hand()
if dealer.check_bust():
print(f"{dealer.player} busted!")
# x = input("Ready to proceed")
def player_play(player, dealer, d):
clear_output()
cls()
dealer.show_hand()
player.show_hand()
hit_continue = True
while player.hand_total() < 21 and hit_continue:
response = ""
while response.lower() not in ['y','yes', 'n', 'no']:
response = input(f"{player.player}: Would you like a card? Enter 'Yes', 'No'").lower()
if response[0] == 'y':
print(f"{player.player} wants card")
# hit(player, d)
d.hit(player)
clear_output()
cls()
dealer.show_hand()
player.show_hand()
if player.check_bust():
print("Sorry, you busted")
hit_continue = False
else:
print(f"{player.player} wants to 'stay'")
hit_continue = False
def get_bets(player_hands):
for player_hand in player_hands:
bet = 0
while bet not in range(20,100):
tr | view the deck, first (y/n)?") == "y":
d.show_deck()
input("Press any key to continue.")
# Generate a list of Hands one for each players
playing_hands = []
for hand_id in range(0,len(PLAYERS)):
playing_hands.append(Hand(f"{PLAYERS[hand_id].name}", 20))
dealer_hand = Hand("Dealer", 0)
# Populate the player_hands and the players (including the Dealer) in the at_table lists
player_hands = []
at_table = []
for playing_hand in playing_hands:
player_hands.append(playing_hand)
at_table.append(playing_hand)
at_table.append(dealer_hand)
# Deal the cards to the players.
# deal(at_table, d)
d.deal(at_table)
# Ask each player to determine how much they are going to bet this round.
get_bets(player_hands)
# Iterate through each of the player, and allow
# them to select as many cards as they'd like
# until they BUST or STAY
# print(f"# of Players playing the Dealer is: {len(players)}")
# a = input("Ready to proceed")
for player_hand in player_hands:
player_play(player_hand,dealer_hand, d)
dealer_play(dealer_hand, d)
# Check to see who won
clear_output()
cls()
dealer_hand.show_hand()
player_index = 0
for player_hand in player_hands:
player_hand.show_hand()
print("\n===============================================")
print(f"Checking {player_hand.player}'s results.")
if player_hand.busted:
print(f"{player_hand.player} BUSTED!")
print(f"Removing ${player_hand.amount_bet} from {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount -= player_hand.amount_bet
HOUSE[0].win_loss_amount += player_hand.amount_bet
elif ((dealer_hand.busted) and (not player_hand.busted)) or ((not player_hand.busted) and player_hand.hand_total() > dealer_hand.hand_total()):
print(f"{player_hand.player} Won!")
print(f"Adding ${player_hand.amount_bet} to {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount += player_hand.amount_bet
HOUSE[0].win_loss_amount -= player_hand.amount_bet
elif (not player_hand.busted) and player_hand.hand_total() == dealer_hand.hand_total():
print(f"Push!")
else:
print("Dealer Won!")
print(f"Removing ${player_hand.amount_bet} from {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount -= player_hand.amount_bet
HOUSE[0].win_loss_amount += player_hand.amount_bet
player_index += 1
print("Game over.")
def start_game():
'''
Usage:
Call start_game() to start the game.
'''
ready = ""
clear_output()
cls()
print("=========================================================================================================")
print("= =")
print("= =")
print("= Black Jack =")
print("= At the famous, Casino Bub =")
print("= =")
print("= =")
print("= =")
print("=========================================================================================================\n\n\n")
# Ask how many players
# Setup player objects
player_player1 = Player("Jason Bubnicek")
player_player2 = Player("Dayna Bubenicek")
PLAYERS.append(player_player1)
PLAYERS.append(player_player2)
HOUSE.append(Player("Dealer"))
# Continue to ask if the player would like to play until
# they indicate that they want to Quit the game.
while ready.lower() not in ["q", "quit", "exit", "end"]:
print(f"House has won: ${HOUSE[0].win_loss_amount}")
for player in PLAYERS:
print(f"{player.name} has won: ${player.win_loss_amount}")
ready = input("Would you like to play? ('Yes', 'No', 'Quit')")
if ready.lower() in ["y", "yes"]:
print("Let's play Black Jack")
# input("Right before play")
play()
ready = ""
print("Thanks for playing!")
if __name__ == "__main__":
start_game()
| y:
bet = int(input(f"{player_hand.player}, how much would you like to bet? ($20 to $100)?"))
except:
print("You must enter a numeric value between 20 and 100.")
bet = 0
player_hand.amount_bet = bet
def play():
# Get the Deck populated and shuffled
d = Deck()
if input("Would you like to re | identifier_body |
play_blackjack.py | # -*- coding: utf-8 -*-
"""
Black Jack
Version: Alpha 0.1
Created on Wed Jan 2 23:41:46 2019
@author: Jason Bubenicek
"""
import random
from IPython.display import clear_output
import os
from colorama import init
init()
from colorama import Fore
PLAYERS = []
HOUSE = []
def cls():
# https://stackoverflow.com/questions/517970/how-to-clear-the-interpreter-console
os.system('cls' if os.name=='nt' else 'clear')
class Card():
'''
Usage:
Create instances of each card in the deck.
As you hand them out, make sure to appropriately set
the .visible attribute to True or False based on
whether or not it should be shown to all players, such as
the Dealer's 2nd Card, or
'''
# Use the 'visible' attribute to determine
# whether or not to show this card to the
# players.
hidden = False
def __init__(self,suit,name,value, color, suit_no, name_short):
self.suit = suit
self.name = name
self.value = value
self.color = color
self.suit_no = suit_no
self.name_short = name_short
self.suit_icon = '♥♦♣♠'[suit_no]
if self.color == "Red":
self.suit_icon = Fore.RED + self.suit_icon + Fore.WHITE
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {self.name_short:<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {self.suit_icon} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {self.name_short:>2} |'
self.card_line7 = f'└───────┘'
def set_hidden(self):
self.hidden = True
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {"?":<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {"?"} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {"?":>2} |'
self.card_line7 = f'└───────┘'
def set_visible(self):
self.hidden = False
self.card_line1 = f'┌───────┐'
self.card_line2 = f'| {self.name_short:<2} |'
self.card_line3 = f'| |'
self.card_line4 = f'| {self.suit_icon} |'
self.card_line5 = f'| |'
self.card_line6 = f'| {self.name_short:>2} |'
self.card_line7 = f'└───────┘'
| print(self.card_line3)
print(self.card_line4)
print(self.card_line5)
print(self.card_line6)
print(self.card_line7)
class Deck():
# Create a blank list to hold the cards that are
# added to this Deck
# cards = []
# These lists will be used to populate the Deck
suits = [("Hearts","Red",0),("Diamonds","Red",1),("Clubs","Black",2),("Spades","Black",3)]
names = [("Two",2,"2"),("Three",3,"3"),("Four",4,"4"),("Five",5,"5"),("Six",6,"6"),("Seven",7,"7"),("Eight",8,"8"),("Nine",9,"9"),("Ten",10,"10"),("Jack",10,"J"),("Queen",10,"Q"),("King",10,"K"),("Ace",11,"A")]
def __init__(self):
self.cards = []
# When the Deck class is instanstiated, populate it with a fresh
# deck of cards
for suit in self.suits:
for name in self.names:
display_suit, color, suit_no = suit
display_name, value, name_short = name
card = Card(suit, display_name, value, color,suit_no, name_short)
self.cards.append(card)
# Shuffle the desk to randomize the next card.
random.shuffle(self.cards)
def hit(self,player):
# Select the next card in the deck.
# card = self.cards.pop()
card = self.cards[0]
card.set_visible()
player.add_card(card)
# Once the card has been given out, we need to make sure to
# remove it from the Deck, so that no one else gets the same
# card.
del self.cards[0]
def deal(self,at_table):
'''
Usage:
At the beginning of the game, deal the cards to each player.
'''
for card_number in [1,2]:
for player in at_table:
card = self.cards[0]
if player.player == "Dealer" and card_number == 2:
# The Dealer shows their 2nd card to all players. Let's
# make it visible.
card.set_hidden()
player.add_card(card)
# Once the card has been given out, we need to make sure to
# remove it from the Deck, so that no one else gets the same
# card.
del self.cards[0]
def show_deck(self):
for card in self.cards:
print(card.show_card())
# Use the print() command to get a message that contains the
# number of cards remaining in the deck. This is good to ensure
# that you are removing cards from the Deck as you hand them out
# to players.
def __str__(self):
return f"There are {len(self.cards)} in the deck"
# This will provide you a numeric count of cards in the deck. You
# should use this to ensure that the deck has been populated with
# the standard 52 and as you hand them out that the number is reducing
# appropriately.
def __len__(self):
return len(self.cards)
class Hand():
'''
Usage:
You should create an instance of Hand for each player in the game. It will
contain a list of the cards that the player has in their hand. Initially, the
.cards attribute will have 0 cards. You can use the .add_card() method to
pass in a card.
'''
cards = []
busted = False
amount_bet = 0
def __init__(self, player, amount_bet):
self.cards = []
self.player = player
self.amount_bet = amount_bet
# print("Hand created.")
def add_card(self, card):
'''
Pass in a Card object and don't forget to remove it from the deck afterward.
'''
self.cards.append(card)
# print(f"Card added, {self.player} now has {len(self.cards)} in their hand.")
def hand_total(self):
'''
Usage:
Call .hand_total to get a numeric representation of the current Hand value.
'''
aces = 0
hand_value = 0
for card in self.cards:
if not card.hidden:
hand_value += card.value
if card.name == "Ace":
aces += 1
if hand_value <= 21:
# print(f"NOT BUST: Hand value is: {hand_value}")
return hand_value
elif hand_value > 21 and aces == 0:
# print(f"BUST: This hand has a value of {hand_value}, which is over 21 and there are {aces} Aces.")
return hand_value
else:
# print(f"This hand has a value of {hand_value}, but has {aces} Ace(s), will attempt to convert to 1")
# while aces > 0:
while aces:
# This hand as at least 1 Ace, we will attempt to convert it to a 1 and then
# sum the hand again to see if it is 21 or less. This will be repeated
# if there are more Aces in the hand and the sum total remains over 21.
aces -= 1
hand_value -= 10
if hand_value <= 21:
# print(f"NOT BUST: Hand value is: {hand_value}")
return hand_value
elif hand_value > 21 and aces == 0:
# print(f"BUST: This hand after changing the Aces to a value 1, is now {hand_value}, which is over 21 and it has {aces} Aces left.")
return hand_value
else:
# print("Changing another Ace to 1")
continue
def check_bust(self):
'''
Usage:
Call .check_bust() to see if this Hand is in BUST state or not.
'''
if self.hand_total() > 21:
self.busted = True
return True
else:
self.busted = False
return False
def show_hand(self):
for card in self.cards:
print(card.card_line1, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line2, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line3, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line4, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line5, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line6, sep=' ', end='', flush=True)
print()
for card in self.cards:
print(card.card_line7, sep=' ', end='', flush=True)
print(f"\n{self.player} Total: {self.hand_total()}")
def __str__(self):
hand_value = 0
hand_display = []
print(f"{self.player} has the following cards:")
print("===============================================")
for card in self.cards:
hand_value += card.value
# print(f"{card.name} of {card.suit} with a value of {str(card.value)} and a visibility of {str(card.visible)}")
hand_display.append(f"{card.name} {card.suit[0]}")
return str(f"{self.player}: {self.hand_value()}")
def __len__(self):
'''
Usage:
Use the len() function to return the number of cards in the Hand
'''
return len(self.cards)
def check_for_naturals(dealer, player):
'''
Usage:
Use this function after the hands have been dealt by calling
it with each player and the dealer hand. It will return one
of four values; Both, Dealer, Player, None
'''
result = None
if dealer.hand_total() == 21 and player.hand_total == 21:
result = "Both"
elif dealer.hand_total() == 21:
result = "Dealer"
elif player.hand_total() == 21:
result = "Player"
else:
result = "None"
return result
class Player():
win_loss_amount = 0
current_hand = None
previous_hands = []
def __init__(self, name):
self.name = name
def set_current_hand(self, hand):
Player.current_hand = hand
def set_previous_hand(self,hand):
Player.previous_hands.append(hand)
def get_winnings(self):
result = 0
for hand in self.previous_hands:
result += hand.amount_won
return result
def __len__(self):
return len(self.previous_hands)
def __str__(self):
return f"{self.name} has played {len(self.previous_hands)} and won/loss: ${self.get_winnings()}."
def dealer_play(dealer, d):
# Make sure that all of the dealer cards are now visible and
# able to be counted.
for card in dealer.cards:
card.set_visible()
# Have the dealer continue to Hit until their hand is greater than
# 17 and less than 21 (in other words, haven't BUSTED)
while 17 > dealer.hand_total() < 21:
# print(f"{dealer.player} has to HIT with Hand of {dealer.hand_total()}.")
# hit(dealer, d)
d.hit(dealer)
clear_output()
cls()
dealer.show_hand()
if dealer.check_bust():
print(f"{dealer.player} busted!")
# x = input("Ready to proceed")
def player_play(player, dealer, d):
clear_output()
cls()
dealer.show_hand()
player.show_hand()
hit_continue = True
while player.hand_total() < 21 and hit_continue:
response = ""
while response.lower() not in ['y','yes', 'n', 'no']:
response = input(f"{player.player}: Would you like a card? Enter 'Yes', 'No'").lower()
if response[0] == 'y':
print(f"{player.player} wants card")
# hit(player, d)
d.hit(player)
clear_output()
cls()
dealer.show_hand()
player.show_hand()
if player.check_bust():
print("Sorry, you busted")
hit_continue = False
else:
print(f"{player.player} wants to 'stay'")
hit_continue = False
def get_bets(player_hands):
for player_hand in player_hands:
bet = 0
while bet not in range(20,100):
try:
bet = int(input(f"{player_hand.player}, how much would you like to bet? ($20 to $100)?"))
except:
print("You must enter a numeric value between 20 and 100.")
bet = 0
player_hand.amount_bet = bet
def play():
# Get the Deck populated and shuffled
d = Deck()
if input("Would you like to review the deck, first (y/n)?") == "y":
d.show_deck()
input("Press any key to continue.")
# Generate a list of Hands one for each players
playing_hands = []
for hand_id in range(0,len(PLAYERS)):
playing_hands.append(Hand(f"{PLAYERS[hand_id].name}", 20))
dealer_hand = Hand("Dealer", 0)
# Populate the player_hands and the players (including the Dealer) in the at_table lists
player_hands = []
at_table = []
for playing_hand in playing_hands:
player_hands.append(playing_hand)
at_table.append(playing_hand)
at_table.append(dealer_hand)
# Deal the cards to the players.
# deal(at_table, d)
d.deal(at_table)
# Ask each player to determine how much they are going to bet this round.
get_bets(player_hands)
# Iterate through each of the player, and allow
# them to select as many cards as they'd like
# until they BUST or STAY
# print(f"# of Players playing the Dealer is: {len(players)}")
# a = input("Ready to proceed")
for player_hand in player_hands:
player_play(player_hand,dealer_hand, d)
dealer_play(dealer_hand, d)
# Check to see who won
clear_output()
cls()
dealer_hand.show_hand()
player_index = 0
for player_hand in player_hands:
player_hand.show_hand()
print("\n===============================================")
print(f"Checking {player_hand.player}'s results.")
if player_hand.busted:
print(f"{player_hand.player} BUSTED!")
print(f"Removing ${player_hand.amount_bet} from {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount -= player_hand.amount_bet
HOUSE[0].win_loss_amount += player_hand.amount_bet
elif ((dealer_hand.busted) and (not player_hand.busted)) or ((not player_hand.busted) and player_hand.hand_total() > dealer_hand.hand_total()):
print(f"{player_hand.player} Won!")
print(f"Adding ${player_hand.amount_bet} to {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount += player_hand.amount_bet
HOUSE[0].win_loss_amount -= player_hand.amount_bet
elif (not player_hand.busted) and player_hand.hand_total() == dealer_hand.hand_total():
print(f"Push!")
else:
print("Dealer Won!")
print(f"Removing ${player_hand.amount_bet} from {PLAYERS[player_index].name}'s Win/Loss balance.")
PLAYERS[player_index].win_loss_amount -= player_hand.amount_bet
HOUSE[0].win_loss_amount += player_hand.amount_bet
player_index += 1
print("Game over.")
def start_game():
'''
Usage:
Call start_game() to start the game.
'''
ready = ""
clear_output()
cls()
print("=========================================================================================================")
print("= =")
print("= =")
print("= Black Jack =")
print("= At the famous, Casino Bub =")
print("= =")
print("= =")
print("= =")
print("=========================================================================================================\n\n\n")
# Ask how many players
# Setup player objects
player_player1 = Player("Jason Bubnicek")
player_player2 = Player("Dayna Bubenicek")
PLAYERS.append(player_player1)
PLAYERS.append(player_player2)
HOUSE.append(Player("Dealer"))
# Continue to ask if the player would like to play until
# they indicate that they want to Quit the game.
while ready.lower() not in ["q", "quit", "exit", "end"]:
print(f"House has won: ${HOUSE[0].win_loss_amount}")
for player in PLAYERS:
print(f"{player.name} has won: ${player.win_loss_amount}")
ready = input("Would you like to play? ('Yes', 'No', 'Quit')")
if ready.lower() in ["y", "yes"]:
print("Let's play Black Jack")
# input("Right before play")
play()
ready = ""
print("Thanks for playing!")
if __name__ == "__main__":
start_game() | def show_card(self):
print(self.card_line1)
print(self.card_line2)
| random_line_split |
mod.rs | //! # Day 19: Go With The Flow
//!
//! With the Elves well on their way constructing the North Pole base, you turn
//! your attention back to understanding the inner workings of programming the
//! device.
//!
//! You can't help but notice that the device's opcodes don't contain any flow
//! control like jump instructions. The device's manual goes on to explain:
//!
//! "In programs where flow control is required, the instruction pointer can be
//! bound to a register so that it can be manipulated directly. This way,
//! setr/seti can function as absolute jumps, addr/addi can function as relative
//! jumps, and other opcodes can cause truly fascinating effects."
//!
//! This mechanism is achieved through a declaration like #ip 1, which would
//! modify register 1 so that accesses to it let the program indirectly access
//! the instruction pointer itself. To compensate for this kind of binding,
//! there are now six registers (numbered 0 through 5); the five not bound to
//! the instruction pointer behave as normal. Otherwise, the same rules apply as
//! the last time you worked with this device.
//!
//! When the instruction pointer is bound to a register, its value is written to
//! that register just before each instruction is executed, and the value of
//! that register is written back to the instruction pointer immediately after
//! each instruction finishes execution. Afterward, move to the next instruction
//! by adding one to the instruction pointer, even if the value in the
//! instruction pointer was just updated by an instruction. (Because of this,
//! instructions must effectively set the instruction pointer to the instruction
//! before the one they want executed next.)
//!
//! The instruction pointer is 0 during the first instruction, 1 during the
//! second, and so on. If the instruction pointer ever causes the device to
//! attempt to load an instruction outside the instructions defined in the
//! program, the program instead immediately halts. The instruction pointer
//! starts at 0.
//!
//! It turns out that this new information is already proving useful: the CPU in
//! the device is not very powerful, and a background process is occupying most
//! of its time. You dump the background process' declarations and instructions
//! to a file (your puzzle input), making sure to use the names of the opcodes
//! rather than the numbers.
//!
//! For example, suppose you have the following program:
//!
//! ```text
//! #ip 0
//! seti 5 0 1
//! seti 6 0 2
//! addi 0 1 0
//! addr 1 2 3
//! setr 1 0 0
//! seti 8 0 4
//! seti 9 0 5
//! ```
//!
//! When executed, the following instructions are executed. Each line contains
//! the value of the instruction pointer at the time the instruction started,
//! the values of the six registers before executing the instructions (in square
//! brackets), the instruction itself, and the values of the six registers after
//! executing the instruction (also in square brackets).
//!
//! ```text
//! ip=0 [0, 0, 0, 0, 0, 0] seti 5 0 1 [0, 5, 0, 0, 0, 0]
//! ip=1 [1, 5, 0, 0, 0, 0] seti 6 0 2 [1, 5, 6, 0, 0, 0]
//! ip=2 [2, 5, 6, 0, 0, 0] addi 0 1 0 [3, 5, 6, 0, 0, 0]
//! ip=4 [4, 5, 6, 0, 0, 0] setr 1 0 0 [5, 5, 6, 0, 0, 0]
//! ip=6 [6, 5, 6, 0, 0, 0] seti 9 0 5 [6, 5, 6, 0, 0, 9]
//! ```
//!
//! In detail, when running this program, the following events occur:
//!
//! * The first line (#ip 0) indicates that the instruction pointer should be
//! bound to register 0 in this program. This is not an instruction, and so
//! the value of the instruction pointer does not change during the processing
//! of this line.
//! * The instruction pointer contains 0, and so the first instruction is
//! executed (seti 5 0 1). It updates register 0 to the current instruction
//! pointer value (0), sets register 1 to 5, sets the instruction pointer to
//! the value of register 0 (which has no effect, as the instruction did not
//! modify register 0), and then adds one to the instruction pointer.
//! * The instruction pointer contains 1, and so the second instruction, seti 6
//! 0 2, is executed. This is very similar to the instruction before it: 6 is
//! stored in register 2, and the instruction pointer is left with the value
//! 2.
//! * The instruction pointer is 2, which points at the instruction addi 0 1 0.
//! This is like a relative jump: the value of the instruction pointer, 2, is
//! loaded into register 0. Then, addi finds the result of adding the value in
//! register 0 and the value 1, storing the result, 3, back in register 0.
//! Register 0 is then copied back to the instruction pointer, which will
//! cause it to end up 1 larger than it would have otherwise and skip the next
//! instruction (addr 1 2 3) entirely. Finally, 1 is added to the instruction
//! pointer.
//! * The instruction pointer is 4, so the instruction setr 1 0 0 is run. This
//! is like an absolute jump: it copies the value contained in register 1, 5,
//! into register 0, which causes it to end up in the instruction pointer. The
//! instruction pointer is then incremented, leaving it at 6.
//! * The instruction pointer is 6, so the instruction seti 9 0 5 stores 9 into
//! register 5. The instruction pointer is incremented, causing it to point
//! outside the program, and so the program ends.
//!
//! What value is left in register 0 when the background process halts?
//!
//! ## Part 2
//!
//! A new background process immediately spins up in its place. It appears
//! identical, but on closer inspection, you notice that this time, register 0
//! started with the value 1.
//!
//! What value is left in register 0 when this new background process halts?
//!
//! [Advent of Code 2018 - Day 19](https://adventofcode.com/2018/day/19)
use std::{
fmt::{self, Display},
iter::FromIterator,
ops::{Index, IndexMut},
};
use crate::day16::{Data, Mnemonic};
use self::Mnemonic::*;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Register([Data; 6]);
impl Display for Register {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[{}, {}, {}, {}, {}, {}]",
self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5]
)
}
}
impl Default for Register {
fn default() -> Self {
Register([0; 6])
}
}
impl From<[Data; 6]> for Register {
fn from(value: [Data; 6]) -> Self {
Register(value)
}
}
impl Index<Data> for Register {
type Output = Data;
fn index(&self, index: Data) -> &Self::Output {
&self.0[index as usize]
}
}
impl IndexMut<Data> for Register {
fn index_mut(&mut self, index: Data) -> &mut <Self as Index<Data>>::Output {
&mut self.0[index as usize]
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Instruction {
pub opcode: Mnemonic,
pub a: Data,
pub b: Data,
pub c: Data,
}
impl Display for Instruction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {} {} {}", self.opcode, self.a, self.b, self.c)
}
}
impl From<(Mnemonic, Data, Data, Data)> for Instruction {
fn from((opcode, a, b, c): (Mnemonic, Data, Data, Data)) -> Self {
Self { opcode, a, b, c }
}
}
impl Instruction {
pub fn new(opcode: Mnemonic, a: Data, b: Data, c: Data) -> Self {
Self { opcode, a, b, c }
}
}
pub type Addr = Data;
#[derive(Debug, Clone, PartialEq)]
pub struct Interpreter {
ip_reg: Addr,
ip: Addr,
}
impl Interpreter {
pub fn new(ip_reg: Addr) -> Self {
Self { ip_reg, ip: 0 }
}
#[inline]
pub fn execute(&mut self, instruction: Instruction, register: &mut Register) {
register[self.ip_reg] = self.ip;
execute_mnemonic(instruction, register);
self.ip = register[self.ip_reg] + 1;
}
pub fn run(&mut self, program: &[Instruction], register: &mut Register) -> Result<(), String> {
while let Some(&instruction) = program.get(self.ip as usize) {
if self.ip == 3 {
self.ip = optimized(register);
continue;
}
let _c_ip = self.ip;
self.execute(instruction, register);
//_trace(_c_ip, instruction, register);
}
Ok(())
}
}
fn execute_mnemonic(Instruction { opcode, a, b, c }: Instruction, reg: &mut Register) {
match opcode {
AddR => reg[c] = reg[a] + reg[b],
AddI => reg[c] = reg[a] + b,
MulR => reg[c] = reg[a] * reg[b],
MulI => reg[c] = reg[a] * b,
BanR => reg[c] = reg[a] & reg[b],
BanI => reg[c] = reg[a] & b,
BorR => reg[c] = reg[a] | reg[b],
BorI => reg[c] = reg[a] | b,
SetR => reg[c] = reg[a],
SetI => reg[c] = a,
GtIR => reg[c] = if a > reg[b] { 1 } else { 0 },
GtRI => reg[c] = if reg[a] > b { 1 } else { 0 },
GtRR => reg[c] = if reg[a] > reg[b] { 1 } else { 0 },
EqIR => reg[c] = if a == reg[b] { 1 } else { 0 },
EqRI => reg[c] = if reg[a] == b { 1 } else { 0 },
EqRR => reg[c] = if reg[a] == reg[b] { 1 } else { 0 },
}
}
#[inline]
fn _trace(ip: Addr, Instruction { opcode, a, b, c }: Instruction, reg: &Register) {
match opcode {
AddR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
AddI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
MulR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
MulI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BanR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BanI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BorR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BorI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
SetR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
SetI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
GtIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
GtRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
GtRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
EqIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
EqRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
EqRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Program {
ip_reg: Addr,
instructions: Vec<Instruction>,
}
impl Display for Program {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "#ip {}", self.ip_reg)?;
for instruction in &self.instructions {
writeln!(f, "{}", instruction)?;
}
Ok(())
}
}
impl Program {
pub fn new(ip_reg: Addr, instructions: impl IntoIterator<Item = Instruction>) -> Self {
Self {
ip_reg,
instructions: Vec::from_iter(instructions.into_iter()),
}
}
pub fn ip_reg(&self) -> Addr {
self.ip_reg
}
pub fn instructions(&self) -> &[Instruction] {
&self.instructions
}
}
#[aoc_generator(day19)]
pub fn parse(input: &str) -> Result<Program, String> {
let mut ip_reg = 6;
let mut instructions = Vec::with_capacity(16);
for line in input.lines() {
if line.starts_with("#ip") {
ip_reg = line[4..]
.trim()
.parse::<Addr>()
.map_err(|e| e.to_string())?;
} else {
let opc = line[0..4].parse()?;
let mut oprs = line[5..]
.split(' ')
.take(3)
.map(|s| s.trim().parse::<Data>().map_err(|e| e.to_string()));
let opr1 = oprs.next().unwrap()?;
let opr2 = oprs.next().unwrap()?;
let opr3 = oprs.next().unwrap()?;
instructions.push(Instruction::new(opc, opr1, opr2, opr3));
}
}
Ok(Program::new(ip_reg, instructions))
}
#[aoc(day19, part1)]
pub fn run_background_process(program: &Program) -> Data {
let mut interpreter = Interpreter::new(program.ip_reg);
let mut register = Register::default();
interpreter
.run(program.instructions(), &mut register)
.unwrap();
register[0]
}
#[aoc(day19, part2)]
pub fn run_background_process_2(program: &Program) -> Data {
let mut interpreter = Interpreter::new(program.ip_reg);
let mut register = Register::default();
register[0] = 1;
interpreter
.run(program.instructions(), &mut register)
.unwrap();
register[0]
}
/// Repeated loop:
///
/// ```text
/// 'L1: R1 = R5 * R2
/// if R4 == R1 then
/// R1 = 1
/// R0 = R5 + R0
/// else
/// R1 = 0
/// end if
/// R3 = R1 + R3
/// R2 = R2 + 1
/// if R2 > R4 then
/// R1 = 1
/// R3 = R3 + 1 // goto 'L2
/// else
/// R1 = 0
/// R3 = R3 + R1
/// R3 = 2 // goto 'L1
/// end if
/// 'L2:
/// ```
fn | (reg: &mut Register) -> Addr {
if reg[4] % reg[5] == 0 {
reg[0] = reg[5] + reg[0];
}
reg[2] = reg[4];
reg[1] = 0;
12
}
#[cfg(test)]
mod tests;
| optimized | identifier_name |
mod.rs | //! # Day 19: Go With The Flow
//!
//! With the Elves well on their way constructing the North Pole base, you turn
//! your attention back to understanding the inner workings of programming the
//! device.
//!
//! You can't help but notice that the device's opcodes don't contain any flow
//! control like jump instructions. The device's manual goes on to explain:
//!
//! "In programs where flow control is required, the instruction pointer can be
//! bound to a register so that it can be manipulated directly. This way,
//! setr/seti can function as absolute jumps, addr/addi can function as relative
//! jumps, and other opcodes can cause truly fascinating effects."
//!
//! This mechanism is achieved through a declaration like #ip 1, which would
//! modify register 1 so that accesses to it let the program indirectly access
//! the instruction pointer itself. To compensate for this kind of binding,
//! there are now six registers (numbered 0 through 5); the five not bound to
//! the instruction pointer behave as normal. Otherwise, the same rules apply as
//! the last time you worked with this device.
//!
//! When the instruction pointer is bound to a register, its value is written to
//! that register just before each instruction is executed, and the value of
//! that register is written back to the instruction pointer immediately after
//! each instruction finishes execution. Afterward, move to the next instruction
//! by adding one to the instruction pointer, even if the value in the
//! instruction pointer was just updated by an instruction. (Because of this,
//! instructions must effectively set the instruction pointer to the instruction
//! before the one they want executed next.)
//!
//! The instruction pointer is 0 during the first instruction, 1 during the
//! second, and so on. If the instruction pointer ever causes the device to
//! attempt to load an instruction outside the instructions defined in the
//! program, the program instead immediately halts. The instruction pointer
//! starts at 0.
//!
//! It turns out that this new information is already proving useful: the CPU in
//! the device is not very powerful, and a background process is occupying most
//! of its time. You dump the background process' declarations and instructions
//! to a file (your puzzle input), making sure to use the names of the opcodes
//! rather than the numbers.
//!
//! For example, suppose you have the following program:
//!
//! ```text
//! #ip 0
//! seti 5 0 1
//! seti 6 0 2
//! addi 0 1 0
//! addr 1 2 3
//! setr 1 0 0
//! seti 8 0 4
//! seti 9 0 5
//! ```
//!
//! When executed, the following instructions are executed. Each line contains
//! the value of the instruction pointer at the time the instruction started,
//! the values of the six registers before executing the instructions (in square
//! brackets), the instruction itself, and the values of the six registers after
//! executing the instruction (also in square brackets).
//!
//! ```text
//! ip=0 [0, 0, 0, 0, 0, 0] seti 5 0 1 [0, 5, 0, 0, 0, 0]
//! ip=1 [1, 5, 0, 0, 0, 0] seti 6 0 2 [1, 5, 6, 0, 0, 0]
//! ip=2 [2, 5, 6, 0, 0, 0] addi 0 1 0 [3, 5, 6, 0, 0, 0]
//! ip=4 [4, 5, 6, 0, 0, 0] setr 1 0 0 [5, 5, 6, 0, 0, 0]
//! ip=6 [6, 5, 6, 0, 0, 0] seti 9 0 5 [6, 5, 6, 0, 0, 9]
//! ```
//! | //! bound to register 0 in this program. This is not an instruction, and so
//! the value of the instruction pointer does not change during the processing
//! of this line.
//! * The instruction pointer contains 0, and so the first instruction is
//! executed (seti 5 0 1). It updates register 0 to the current instruction
//! pointer value (0), sets register 1 to 5, sets the instruction pointer to
//! the value of register 0 (which has no effect, as the instruction did not
//! modify register 0), and then adds one to the instruction pointer.
//! * The instruction pointer contains 1, and so the second instruction, seti 6
//! 0 2, is executed. This is very similar to the instruction before it: 6 is
//! stored in register 2, and the instruction pointer is left with the value
//! 2.
//! * The instruction pointer is 2, which points at the instruction addi 0 1 0.
//! This is like a relative jump: the value of the instruction pointer, 2, is
//! loaded into register 0. Then, addi finds the result of adding the value in
//! register 0 and the value 1, storing the result, 3, back in register 0.
//! Register 0 is then copied back to the instruction pointer, which will
//! cause it to end up 1 larger than it would have otherwise and skip the next
//! instruction (addr 1 2 3) entirely. Finally, 1 is added to the instruction
//! pointer.
//! * The instruction pointer is 4, so the instruction setr 1 0 0 is run. This
//! is like an absolute jump: it copies the value contained in register 1, 5,
//! into register 0, which causes it to end up in the instruction pointer. The
//! instruction pointer is then incremented, leaving it at 6.
//! * The instruction pointer is 6, so the instruction seti 9 0 5 stores 9 into
//! register 5. The instruction pointer is incremented, causing it to point
//! outside the program, and so the program ends.
//!
//! What value is left in register 0 when the background process halts?
//!
//! ## Part 2
//!
//! A new background process immediately spins up in its place. It appears
//! identical, but on closer inspection, you notice that this time, register 0
//! started with the value 1.
//!
//! What value is left in register 0 when this new background process halts?
//!
//! [Advent of Code 2018 - Day 19](https://adventofcode.com/2018/day/19)
use std::{
fmt::{self, Display},
iter::FromIterator,
ops::{Index, IndexMut},
};
use crate::day16::{Data, Mnemonic};
use self::Mnemonic::*;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Register([Data; 6]);
impl Display for Register {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[{}, {}, {}, {}, {}, {}]",
self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5]
)
}
}
impl Default for Register {
fn default() -> Self {
Register([0; 6])
}
}
impl From<[Data; 6]> for Register {
fn from(value: [Data; 6]) -> Self {
Register(value)
}
}
impl Index<Data> for Register {
type Output = Data;
fn index(&self, index: Data) -> &Self::Output {
&self.0[index as usize]
}
}
impl IndexMut<Data> for Register {
fn index_mut(&mut self, index: Data) -> &mut <Self as Index<Data>>::Output {
&mut self.0[index as usize]
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Instruction {
pub opcode: Mnemonic,
pub a: Data,
pub b: Data,
pub c: Data,
}
impl Display for Instruction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {} {} {}", self.opcode, self.a, self.b, self.c)
}
}
impl From<(Mnemonic, Data, Data, Data)> for Instruction {
fn from((opcode, a, b, c): (Mnemonic, Data, Data, Data)) -> Self {
Self { opcode, a, b, c }
}
}
impl Instruction {
pub fn new(opcode: Mnemonic, a: Data, b: Data, c: Data) -> Self {
Self { opcode, a, b, c }
}
}
pub type Addr = Data;
#[derive(Debug, Clone, PartialEq)]
pub struct Interpreter {
ip_reg: Addr,
ip: Addr,
}
impl Interpreter {
pub fn new(ip_reg: Addr) -> Self {
Self { ip_reg, ip: 0 }
}
#[inline]
pub fn execute(&mut self, instruction: Instruction, register: &mut Register) {
register[self.ip_reg] = self.ip;
execute_mnemonic(instruction, register);
self.ip = register[self.ip_reg] + 1;
}
pub fn run(&mut self, program: &[Instruction], register: &mut Register) -> Result<(), String> {
while let Some(&instruction) = program.get(self.ip as usize) {
if self.ip == 3 {
self.ip = optimized(register);
continue;
}
let _c_ip = self.ip;
self.execute(instruction, register);
//_trace(_c_ip, instruction, register);
}
Ok(())
}
}
fn execute_mnemonic(Instruction { opcode, a, b, c }: Instruction, reg: &mut Register) {
match opcode {
AddR => reg[c] = reg[a] + reg[b],
AddI => reg[c] = reg[a] + b,
MulR => reg[c] = reg[a] * reg[b],
MulI => reg[c] = reg[a] * b,
BanR => reg[c] = reg[a] & reg[b],
BanI => reg[c] = reg[a] & b,
BorR => reg[c] = reg[a] | reg[b],
BorI => reg[c] = reg[a] | b,
SetR => reg[c] = reg[a],
SetI => reg[c] = a,
GtIR => reg[c] = if a > reg[b] { 1 } else { 0 },
GtRI => reg[c] = if reg[a] > b { 1 } else { 0 },
GtRR => reg[c] = if reg[a] > reg[b] { 1 } else { 0 },
EqIR => reg[c] = if a == reg[b] { 1 } else { 0 },
EqRI => reg[c] = if reg[a] == b { 1 } else { 0 },
EqRR => reg[c] = if reg[a] == reg[b] { 1 } else { 0 },
}
}
#[inline]
fn _trace(ip: Addr, Instruction { opcode, a, b, c }: Instruction, reg: &Register) {
match opcode {
AddR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
AddI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
MulR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
MulI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BanR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BanI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BorR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BorI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
SetR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
SetI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
GtIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
GtRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
GtRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
EqIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
EqRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
EqRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Program {
ip_reg: Addr,
instructions: Vec<Instruction>,
}
impl Display for Program {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "#ip {}", self.ip_reg)?;
for instruction in &self.instructions {
writeln!(f, "{}", instruction)?;
}
Ok(())
}
}
impl Program {
pub fn new(ip_reg: Addr, instructions: impl IntoIterator<Item = Instruction>) -> Self {
Self {
ip_reg,
instructions: Vec::from_iter(instructions.into_iter()),
}
}
pub fn ip_reg(&self) -> Addr {
self.ip_reg
}
pub fn instructions(&self) -> &[Instruction] {
&self.instructions
}
}
#[aoc_generator(day19)]
pub fn parse(input: &str) -> Result<Program, String> {
let mut ip_reg = 6;
let mut instructions = Vec::with_capacity(16);
for line in input.lines() {
if line.starts_with("#ip") {
ip_reg = line[4..]
.trim()
.parse::<Addr>()
.map_err(|e| e.to_string())?;
} else {
let opc = line[0..4].parse()?;
let mut oprs = line[5..]
.split(' ')
.take(3)
.map(|s| s.trim().parse::<Data>().map_err(|e| e.to_string()));
let opr1 = oprs.next().unwrap()?;
let opr2 = oprs.next().unwrap()?;
let opr3 = oprs.next().unwrap()?;
instructions.push(Instruction::new(opc, opr1, opr2, opr3));
}
}
Ok(Program::new(ip_reg, instructions))
}
#[aoc(day19, part1)]
pub fn run_background_process(program: &Program) -> Data {
let mut interpreter = Interpreter::new(program.ip_reg);
let mut register = Register::default();
interpreter
.run(program.instructions(), &mut register)
.unwrap();
register[0]
}
#[aoc(day19, part2)]
pub fn run_background_process_2(program: &Program) -> Data {
let mut interpreter = Interpreter::new(program.ip_reg);
let mut register = Register::default();
register[0] = 1;
interpreter
.run(program.instructions(), &mut register)
.unwrap();
register[0]
}
/// Repeated loop:
///
/// ```text
/// 'L1: R1 = R5 * R2
/// if R4 == R1 then
/// R1 = 1
/// R0 = R5 + R0
/// else
/// R1 = 0
/// end if
/// R3 = R1 + R3
/// R2 = R2 + 1
/// if R2 > R4 then
/// R1 = 1
/// R3 = R3 + 1 // goto 'L2
/// else
/// R1 = 0
/// R3 = R3 + R1
/// R3 = 2 // goto 'L1
/// end if
/// 'L2:
/// ```
fn optimized(reg: &mut Register) -> Addr {
if reg[4] % reg[5] == 0 {
reg[0] = reg[5] + reg[0];
}
reg[2] = reg[4];
reg[1] = 0;
12
}
#[cfg(test)]
mod tests; | //! In detail, when running this program, the following events occur:
//!
//! * The first line (#ip 0) indicates that the instruction pointer should be | random_line_split |
mod.rs | //! # Day 19: Go With The Flow
//!
//! With the Elves well on their way constructing the North Pole base, you turn
//! your attention back to understanding the inner workings of programming the
//! device.
//!
//! You can't help but notice that the device's opcodes don't contain any flow
//! control like jump instructions. The device's manual goes on to explain:
//!
//! "In programs where flow control is required, the instruction pointer can be
//! bound to a register so that it can be manipulated directly. This way,
//! setr/seti can function as absolute jumps, addr/addi can function as relative
//! jumps, and other opcodes can cause truly fascinating effects."
//!
//! This mechanism is achieved through a declaration like #ip 1, which would
//! modify register 1 so that accesses to it let the program indirectly access
//! the instruction pointer itself. To compensate for this kind of binding,
//! there are now six registers (numbered 0 through 5); the five not bound to
//! the instruction pointer behave as normal. Otherwise, the same rules apply as
//! the last time you worked with this device.
//!
//! When the instruction pointer is bound to a register, its value is written to
//! that register just before each instruction is executed, and the value of
//! that register is written back to the instruction pointer immediately after
//! each instruction finishes execution. Afterward, move to the next instruction
//! by adding one to the instruction pointer, even if the value in the
//! instruction pointer was just updated by an instruction. (Because of this,
//! instructions must effectively set the instruction pointer to the instruction
//! before the one they want executed next.)
//!
//! The instruction pointer is 0 during the first instruction, 1 during the
//! second, and so on. If the instruction pointer ever causes the device to
//! attempt to load an instruction outside the instructions defined in the
//! program, the program instead immediately halts. The instruction pointer
//! starts at 0.
//!
//! It turns out that this new information is already proving useful: the CPU in
//! the device is not very powerful, and a background process is occupying most
//! of its time. You dump the background process' declarations and instructions
//! to a file (your puzzle input), making sure to use the names of the opcodes
//! rather than the numbers.
//!
//! For example, suppose you have the following program:
//!
//! ```text
//! #ip 0
//! seti 5 0 1
//! seti 6 0 2
//! addi 0 1 0
//! addr 1 2 3
//! setr 1 0 0
//! seti 8 0 4
//! seti 9 0 5
//! ```
//!
//! When executed, the following instructions are executed. Each line contains
//! the value of the instruction pointer at the time the instruction started,
//! the values of the six registers before executing the instructions (in square
//! brackets), the instruction itself, and the values of the six registers after
//! executing the instruction (also in square brackets).
//!
//! ```text
//! ip=0 [0, 0, 0, 0, 0, 0] seti 5 0 1 [0, 5, 0, 0, 0, 0]
//! ip=1 [1, 5, 0, 0, 0, 0] seti 6 0 2 [1, 5, 6, 0, 0, 0]
//! ip=2 [2, 5, 6, 0, 0, 0] addi 0 1 0 [3, 5, 6, 0, 0, 0]
//! ip=4 [4, 5, 6, 0, 0, 0] setr 1 0 0 [5, 5, 6, 0, 0, 0]
//! ip=6 [6, 5, 6, 0, 0, 0] seti 9 0 5 [6, 5, 6, 0, 0, 9]
//! ```
//!
//! In detail, when running this program, the following events occur:
//!
//! * The first line (#ip 0) indicates that the instruction pointer should be
//! bound to register 0 in this program. This is not an instruction, and so
//! the value of the instruction pointer does not change during the processing
//! of this line.
//! * The instruction pointer contains 0, and so the first instruction is
//! executed (seti 5 0 1). It updates register 0 to the current instruction
//! pointer value (0), sets register 1 to 5, sets the instruction pointer to
//! the value of register 0 (which has no effect, as the instruction did not
//! modify register 0), and then adds one to the instruction pointer.
//! * The instruction pointer contains 1, and so the second instruction, seti 6
//! 0 2, is executed. This is very similar to the instruction before it: 6 is
//! stored in register 2, and the instruction pointer is left with the value
//! 2.
//! * The instruction pointer is 2, which points at the instruction addi 0 1 0.
//! This is like a relative jump: the value of the instruction pointer, 2, is
//! loaded into register 0. Then, addi finds the result of adding the value in
//! register 0 and the value 1, storing the result, 3, back in register 0.
//! Register 0 is then copied back to the instruction pointer, which will
//! cause it to end up 1 larger than it would have otherwise and skip the next
//! instruction (addr 1 2 3) entirely. Finally, 1 is added to the instruction
//! pointer.
//! * The instruction pointer is 4, so the instruction setr 1 0 0 is run. This
//! is like an absolute jump: it copies the value contained in register 1, 5,
//! into register 0, which causes it to end up in the instruction pointer. The
//! instruction pointer is then incremented, leaving it at 6.
//! * The instruction pointer is 6, so the instruction seti 9 0 5 stores 9 into
//! register 5. The instruction pointer is incremented, causing it to point
//! outside the program, and so the program ends.
//!
//! What value is left in register 0 when the background process halts?
//!
//! ## Part 2
//!
//! A new background process immediately spins up in its place. It appears
//! identical, but on closer inspection, you notice that this time, register 0
//! started with the value 1.
//!
//! What value is left in register 0 when this new background process halts?
//!
//! [Advent of Code 2018 - Day 19](https://adventofcode.com/2018/day/19)
use std::{
fmt::{self, Display},
iter::FromIterator,
ops::{Index, IndexMut},
};
use crate::day16::{Data, Mnemonic};
use self::Mnemonic::*;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Register([Data; 6]);
impl Display for Register {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[{}, {}, {}, {}, {}, {}]",
self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5]
)
}
}
impl Default for Register {
fn default() -> Self {
Register([0; 6])
}
}
impl From<[Data; 6]> for Register {
fn from(value: [Data; 6]) -> Self {
Register(value)
}
}
impl Index<Data> for Register {
type Output = Data;
fn index(&self, index: Data) -> &Self::Output {
&self.0[index as usize]
}
}
impl IndexMut<Data> for Register {
fn index_mut(&mut self, index: Data) -> &mut <Self as Index<Data>>::Output {
&mut self.0[index as usize]
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Instruction {
pub opcode: Mnemonic,
pub a: Data,
pub b: Data,
pub c: Data,
}
impl Display for Instruction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {} {} {}", self.opcode, self.a, self.b, self.c)
}
}
impl From<(Mnemonic, Data, Data, Data)> for Instruction {
fn from((opcode, a, b, c): (Mnemonic, Data, Data, Data)) -> Self {
Self { opcode, a, b, c }
}
}
impl Instruction {
pub fn new(opcode: Mnemonic, a: Data, b: Data, c: Data) -> Self {
Self { opcode, a, b, c }
}
}
pub type Addr = Data;
#[derive(Debug, Clone, PartialEq)]
pub struct Interpreter {
ip_reg: Addr,
ip: Addr,
}
impl Interpreter {
pub fn new(ip_reg: Addr) -> Self {
Self { ip_reg, ip: 0 }
}
#[inline]
pub fn execute(&mut self, instruction: Instruction, register: &mut Register) {
register[self.ip_reg] = self.ip;
execute_mnemonic(instruction, register);
self.ip = register[self.ip_reg] + 1;
}
pub fn run(&mut self, program: &[Instruction], register: &mut Register) -> Result<(), String> {
while let Some(&instruction) = program.get(self.ip as usize) {
if self.ip == 3 {
self.ip = optimized(register);
continue;
}
let _c_ip = self.ip;
self.execute(instruction, register);
//_trace(_c_ip, instruction, register);
}
Ok(())
}
}
fn execute_mnemonic(Instruction { opcode, a, b, c }: Instruction, reg: &mut Register) {
match opcode {
AddR => reg[c] = reg[a] + reg[b],
AddI => reg[c] = reg[a] + b,
MulR => reg[c] = reg[a] * reg[b],
MulI => reg[c] = reg[a] * b,
BanR => reg[c] = reg[a] & reg[b],
BanI => reg[c] = reg[a] & b,
BorR => reg[c] = reg[a] | reg[b],
BorI => reg[c] = reg[a] | b,
SetR => reg[c] = reg[a],
SetI => reg[c] = a,
GtIR => reg[c] = if a > reg[b] { 1 } else { 0 },
GtRI => reg[c] = if reg[a] > b { 1 } else { 0 },
GtRR => reg[c] = if reg[a] > reg[b] { 1 } else { 0 },
EqIR => reg[c] = if a == reg[b] { 1 } else { 0 },
EqRI => reg[c] = if reg[a] == b { 1 } else { 0 },
EqRR => reg[c] = if reg[a] == reg[b] { 1 } else | ,
}
}
#[inline]
fn _trace(ip: Addr, Instruction { opcode, a, b, c }: Instruction, reg: &Register) {
match opcode {
AddR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
AddI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
MulR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
MulI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BanR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BanI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BorR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
BorI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
SetR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
SetI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
GtIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
GtRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
GtRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
EqIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
EqRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
EqRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg),
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Program {
ip_reg: Addr,
instructions: Vec<Instruction>,
}
impl Display for Program {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "#ip {}", self.ip_reg)?;
for instruction in &self.instructions {
writeln!(f, "{}", instruction)?;
}
Ok(())
}
}
impl Program {
pub fn new(ip_reg: Addr, instructions: impl IntoIterator<Item = Instruction>) -> Self {
Self {
ip_reg,
instructions: Vec::from_iter(instructions.into_iter()),
}
}
pub fn ip_reg(&self) -> Addr {
self.ip_reg
}
pub fn instructions(&self) -> &[Instruction] {
&self.instructions
}
}
#[aoc_generator(day19)]
pub fn parse(input: &str) -> Result<Program, String> {
let mut ip_reg = 6;
let mut instructions = Vec::with_capacity(16);
for line in input.lines() {
if line.starts_with("#ip") {
ip_reg = line[4..]
.trim()
.parse::<Addr>()
.map_err(|e| e.to_string())?;
} else {
let opc = line[0..4].parse()?;
let mut oprs = line[5..]
.split(' ')
.take(3)
.map(|s| s.trim().parse::<Data>().map_err(|e| e.to_string()));
let opr1 = oprs.next().unwrap()?;
let opr2 = oprs.next().unwrap()?;
let opr3 = oprs.next().unwrap()?;
instructions.push(Instruction::new(opc, opr1, opr2, opr3));
}
}
Ok(Program::new(ip_reg, instructions))
}
#[aoc(day19, part1)]
pub fn run_background_process(program: &Program) -> Data {
let mut interpreter = Interpreter::new(program.ip_reg);
let mut register = Register::default();
interpreter
.run(program.instructions(), &mut register)
.unwrap();
register[0]
}
#[aoc(day19, part2)]
pub fn run_background_process_2(program: &Program) -> Data {
let mut interpreter = Interpreter::new(program.ip_reg);
let mut register = Register::default();
register[0] = 1;
interpreter
.run(program.instructions(), &mut register)
.unwrap();
register[0]
}
/// Repeated loop:
///
/// ```text
/// 'L1: R1 = R5 * R2
/// if R4 == R1 then
/// R1 = 1
/// R0 = R5 + R0
/// else
/// R1 = 0
/// end if
/// R3 = R1 + R3
/// R2 = R2 + 1
/// if R2 > R4 then
/// R1 = 1
/// R3 = R3 + 1 // goto 'L2
/// else
/// R1 = 0
/// R3 = R3 + R1
/// R3 = 2 // goto 'L1
/// end if
/// 'L2:
/// ```
fn optimized(reg: &mut Register) -> Addr {
if reg[4] % reg[5] == 0 {
reg[0] = reg[5] + reg[0];
}
reg[2] = reg[4];
reg[1] = 0;
12
}
#[cfg(test)]
mod tests;
| { 0 } | conditional_block |
outputschedule.py | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.SWIG.common import switchboard
from ooflib.SWIG.engine import ooferror2
from ooflib.common import debug
from ooflib.common import enum
from ooflib.common import utils
from ooflib.common import registeredclass
from ooflib.common.IO import automatic
from ooflib.common.IO import datafile
from ooflib.common.IO import parameter
from ooflib.common.IO import reporter
from ooflib.common.IO import xmlmenudump
import math
################
# The ScheduleType class determines how an output schedule behaves.
# This is separate from the Schedule class because some Schedules can
# work with more than one ScheduleType.
class ScheduleType(registeredclass.RegisteredClass):
registry = []
tip="How output Schedules are interpreted."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/scheduletype.xml')
class AbsoluteOutputSchedule(ScheduleType):
conditional = False
def setOffset(self, schedule, time0):
schedule.setOffset(0.0)
registeredclass.Registration(
'Absolute',
ScheduleType,
AbsoluteOutputSchedule,
ordering=0,
tip="Output schedule times are absolute.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/absolutesched.xml')
)
class RelativeOutputSchedule(ScheduleType):
conditional = False
def setOffset(self, schedule, time0):
schedule.setOffset(time0)
registeredclass.Registration(
'Relative',
ScheduleType,
RelativeOutputSchedule,
ordering=1,
tip="Output schedule times are relative to the start time.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/relativesched.xml')
)
class ConditionalOutputSchedule(ScheduleType):
conditional = True
def setOffset(self, schedule, time0):
pass
registeredclass.Registration(
'Conditional',
ScheduleType,
ConditionalOutputSchedule,
ordering=2,
tip="Output schedule times are determined on the fly by a given criterion.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/reg/conditionalsched.xml')
)
######################
# The Schedule class determines when scheduled output is produced.
# UnconditionalSchedules determine the output times in advance.
# ConditionalSchedules do it on the fly.
class Schedule(registeredclass.RegisteredClass):
registry = []
tip="Ways to specify when Scheduled Output will be produced."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/schedule.xml')
def __init__(self):
self.time0 = 0.0
def reset(self, continuing):
pass
def singleTime(self):
# In subclasses this should return the time if the Schedule
# contains only a single time, or None if the Schedule
# contains multiple times. If it contains a single unknown
# time (which probably doesn't make sense) it should return
# None. This is used to distinguish between static and
# quasistatic evolutions.
return None
def setOffset(self, time0): # adjust for relative vs absolute scheduletypes
self.time0 = time0
class UnconditionalSchedule(Schedule):
conditional = False
def __iter__(self):
return self
class ConditionalSchedule(Schedule):
conditional = True
# The OutputScheduleParameter has a widget (defined in
# engine.IO.GUI.schedulewidget) that lists only the Schedules that are
# compatible with the current ScheduleType.
class OutputScheduleParameter(parameter.RegisteredParameter):
def __init__(self, name, value=None, default=None, tip=None, auxData={}):
parameter.RegisteredParameter.__init__(self, name,
reg=Schedule,
value=value, default=default,
tip=tip, auxData=auxData)
def clone(self):
return self.__class__(self.name, self.value, self.default, self.tip)
#######################
class Once(UnconditionalSchedule):
def __init__(self, time):
self.time = time
self.done = False
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
self.done = False
def next(self):
if self.done:
raise StopIteration
self.done = True
return self.time + self.time0
def singleTime(self):
return self.time
registeredclass.Registration(
'Once',
Schedule,
Once,
ordering=1,
params=[parameter.FloatParameter('time', 0.0, tip='The output time.')],
tip="Produce output at just one time.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/once.xml'))
class Periodic(UnconditionalSchedule):
def __init__(self, delay, interval):
self.delay = delay
self.interval = interval
self.count = 0
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
self.count = 0
def next(self):
t = self.time0 + self.delay + self.count*self.interval
self.count += 1
return t
registeredclass.Registration(
'Periodic',
Schedule,
Periodic,
ordering=0,
params=[
parameter.NonNegativeFloatParameter(
'delay', 0.0, tip='Time before the first output.'),
parameter.PositiveFloatParameter(
'interval', 0.0, tip='Time between outputs.')
],
tip="Produce evenly spaced periodic output.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/periodicsched.xml'))
class Geometric(UnconditionalSchedule):
def __init__(self, timestep, factor):
self.timestep = timestep
self.factor = factor
self.nextstep = timestep
self.lasttime = None
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
if not continuing:
self.nextstep = self.timestep
self.lasttime = None
def next(self):
## TODO 3.1: This doesn't quite do the right thing on the first
## step of a continued computation, if the previous step was
## truncated to hit the end time exactly. For example, with
## factor=2 and timestep=0.1, the times are 0.1, 0.3, 0.7,
## 1.5, etc. If the end time is 1, the output times will be
## 0.1, 0.3, 0.7, and 1.0. If the solution is continued, the
## next time will be 3.1, although it probably should be 1.5.
if self.lasttime is None:
self.lasttime = self.time0
self.lasttime += self.nextstep
self.nextstep *= self.factor
debug.fmsg("Geometric returning", self.lasttime)
return self.lasttime
registeredclass.Registration(
'Geometric',
Schedule,
Geometric,
ordering=0.5,
params=[
parameter.PositiveFloatParameter(
'timestep', 1.0, tip='Initial timestep.'),
parameter.PositiveFloatParameter(
'factor', 2.0,
tip='Factor by which to increase the timestep on each step.')
],
tip="Increase timestep by a fixed factor on each step.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/geomsched.xml'))
class SpecifiedTimes(UnconditionalSchedule):
def __init__(self, times):
self.times = times[:]
if self.times:
self.times.sort()
self.count = 0
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
if not continuing:
self.count = 0
def next(self):
if self.count == len(self.times):
raise StopIteration
t = self.times[self.count] + self.time0
self.count += 1
return t
def singleTime(self):
if len(self.times) == 1:
return self.times[0] + self.time0
return None
registeredclass.Registration(
'Specified Times',
Schedule,
SpecifiedTimes,
ordering=2,
params=[
parameter.ListOfFloatsParameter('times', [],
tip="Produce output at these times.")],
tip="Produce ouput at the specified times.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/specifiedsched.xml')
)
class EveryTime(ConditionalSchedule):
def condition(self, meshctxt, time):
return True
registeredclass.Registration(
'Every Time',
Schedule,
EveryTime,
ordering=10,
tip="Produce output at every time step.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/everytime.xml'))
##################################################
class OutputSchedule(object):
def __init__(self, meshcontext):
# outputs is a set of ScheduledOutput objects. nexttimes is
# a dictionary, keyed by ScheduledOutput objects, giving the
# next time that each S.O. should be performed. When a
# S.O. has no more scheduled times, it is removed from the
# dictionary. After a set of outputs have been performed, the
# outputs to be performed at the next step are stored in
# nextoutputs.
# The order in which outputs are performed is arbitrary.
self.meshcontext = meshcontext
self.removeAll() # initializes self.outputs, etc.
def nOutputs(self):
return len(self.outputs)
def removeAll(self):
self.outputs = [] # list, so GUI can present them in creation order
## Use OrderedSets and OrderedDicts instead of sets and dicts
## so that the order of outputs is repeatable, which makes
## testing much easier.
self.nexttimes = utils.OrderedDict() # next time to perform each output
self.nextoutputs = utils.OrderedSet() # outputs to perform at next time
self.conditionalOutputs = utils.OrderedSet()
self.nexttime = None
self.finished = set()
def add(self, name, output):
output.setName(name)
self.outputs.append(output)
def replace(self, name, output, scheduletype, schedule, destination):
for i, o in enumerate(self.outputs):
if o.name() == name:
output.setSchedule(schedule, scheduletype)
output.setDestination(destination)
# If the old output was given an automatically
# generated name, the name must be updated to reflect
# changes in the output.
if isinstance(o.name(), automatic.AutomaticName):
output.setName(automatic.AutomaticName(
self.uniqueName(output.defaultName(), exclude=name)
))
else:
output.setName(name)
self.outputs[i] = output
return
raise ValueError("No such scheduled output: " + name)
def rename(self, oldname, newname):
output = self.getByName(oldname)
newname = self.uniqueName(newname, exclude=oldname)
# Reassign even if oldname==newname, since one of them may be
# an AutomaticName.
output.setName(newname)
def remove(self, outputname):
self.outputs.remove(self.getByName(outputname))
def size(self):
return len(self.outputs)
def isConditional(self):
return len(self.conditionalOutputs) > 0
def isSingleTime(self):
# Returns True if all of the outputs operate just once (or not
# at all), and they do it at the same time. Used to
# distinguish between static and quasistatic time evolutions.
times = [output.schedule.singleTime() for output in self.outputs
if output.active and output.fullyDefined()]
firsttime = None # first time found in list
for time in times:
if time is None: # singleTime indicated multiple times
return False
if firsttime is None:
firsttime = time
if time != firsttime:
return False
return True
def getByName(self, name):
for o in self.outputs:
if o.name() == name:
return o
raise ValueError("No such scheduled output: " + name)
def uniqueName(self, name, exclude=None):
return utils.uniqueName(name, self.names(), exclude)
def names(self):
return [o.name() for o in self.outputs]
def reset(self, time0, continuing):
# Reset all output schedules, and advance them to the first
# time after time0 (which is the earliest time in the current
# evolution).
self.finished.clear()
self.nexttimes = utils.OrderedDict()
self.nexttime = None
self.nextoutputs.clear()
self.conditionalOutputs.clear()
for output in self.outputs:
if (output.active and output.fullyDefined()):
output.schedule.reset(continuing)
output.start(self.meshcontext, time0, continuing)
if output.schedule.conditional:
self.conditionalOutputs.add(output)
else:
try:
t = roundOffCheck(output.schedule.next(), time0)
if continuing:
while t <= time0:
t = output.schedule.next()
else:
while t < time0:
t = output.schedule.next()
except StopIteration:
# The schedule has no times in it later than time0.
# Just ignore it.
pass
else:
self.nexttimes[output] = t
if self.nexttime is None or t < self.nexttime:
self.nexttime = t
self.nextoutputs = utils.OrderedSet(
[o for (o,t) in self.nexttimes.items() if t == self.nexttime])
def times(self, endtime):
if not self.nexttimes:
self.nexttime = endtime
yield endtime
while self.nexttimes:
self.nexttime = min(self.nexttimes.values()) # min over all Outputs
self.nextoutputs = utils.OrderedSet(
[o for (o,t) in self.nexttimes.items() if t == self.nexttime])
yield self.nexttime
# Update next times for the outputs that have just been
# performed. If output.schedule.next() raises
# StopIteration, it's finished.
for output in self.nextoutputs:
try:
self.nexttimes[output] = roundOffCheck(
output.schedule.next(), endtime)
except StopIteration:
del self.nexttimes[output]
self.finished.add(output)
# end while self.nexttimes
if endtime != self.nexttime:
self.nextoutputs.clear()
yield endtime
def perform(self, time):
# perform() can be called before self.nexttime if there are
# conditional outputs, so we have to check the time here.
if time == self.nexttime:
for output in self.nextoutputs:
# No need to check output.active here. Only active
# outputs are in nextoutputs.
output.perform(self.meshcontext, time)
for output in self.conditionalOutputs:
if output.schedule.condition(self.meshcontext, time):
if output.active:
output.perform(self.meshcontext, time)
def finish(self):
for output in self.finished:
output.finish(self.meshcontext)
for output in self.nexttimes:
output.finish(self.meshcontext)
for output in self.conditionalOutputs:
# Only active outputs are in nexttimes or in finished, but
# inactive ones may be in conditionalOutputs, so we have
# to check.
if output.active:
output.finish(self.meshcontext)
def saveAll(self, datafile, meshctxt):
|
def roundOffCheck(val, target):
if abs(val - target) < 10.*utils.machine_epsilon*target:
return target
return val
| for output in self.outputs:
output.save(datafile, meshctxt) | identifier_body |
outputschedule.py | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.SWIG.common import switchboard
from ooflib.SWIG.engine import ooferror2
from ooflib.common import debug
from ooflib.common import enum
from ooflib.common import utils
from ooflib.common import registeredclass
from ooflib.common.IO import automatic
from ooflib.common.IO import datafile
from ooflib.common.IO import parameter
from ooflib.common.IO import reporter
from ooflib.common.IO import xmlmenudump
import math
################
# The ScheduleType class determines how an output schedule behaves.
# This is separate from the Schedule class because some Schedules can
# work with more than one ScheduleType.
class ScheduleType(registeredclass.RegisteredClass):
registry = []
tip="How output Schedules are interpreted."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/scheduletype.xml')
class AbsoluteOutputSchedule(ScheduleType):
conditional = False
def setOffset(self, schedule, time0):
schedule.setOffset(0.0)
registeredclass.Registration(
'Absolute',
ScheduleType,
AbsoluteOutputSchedule,
ordering=0,
tip="Output schedule times are absolute.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/absolutesched.xml')
)
class RelativeOutputSchedule(ScheduleType):
conditional = False
def setOffset(self, schedule, time0):
schedule.setOffset(time0)
registeredclass.Registration(
'Relative',
ScheduleType,
RelativeOutputSchedule,
ordering=1,
tip="Output schedule times are relative to the start time.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/relativesched.xml')
)
class ConditionalOutputSchedule(ScheduleType):
conditional = True
def setOffset(self, schedule, time0):
pass
registeredclass.Registration(
'Conditional',
ScheduleType,
ConditionalOutputSchedule,
ordering=2,
tip="Output schedule times are determined on the fly by a given criterion.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/reg/conditionalsched.xml')
)
######################
# The Schedule class determines when scheduled output is produced.
# UnconditionalSchedules determine the output times in advance.
# ConditionalSchedules do it on the fly.
class Schedule(registeredclass.RegisteredClass):
registry = []
tip="Ways to specify when Scheduled Output will be produced."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/schedule.xml')
def __init__(self):
self.time0 = 0.0
def reset(self, continuing):
pass
def singleTime(self):
# In subclasses this should return the time if the Schedule
# contains only a single time, or None if the Schedule
# contains multiple times. If it contains a single unknown
# time (which probably doesn't make sense) it should return
# None. This is used to distinguish between static and
# quasistatic evolutions.
return None
def setOffset(self, time0): # adjust for relative vs absolute scheduletypes
self.time0 = time0
class UnconditionalSchedule(Schedule):
conditional = False
def __iter__(self):
return self
class ConditionalSchedule(Schedule):
conditional = True
# The OutputScheduleParameter has a widget (defined in
# engine.IO.GUI.schedulewidget) that lists only the Schedules that are
# compatible with the current ScheduleType.
class OutputScheduleParameter(parameter.RegisteredParameter):
def __init__(self, name, value=None, default=None, tip=None, auxData={}):
parameter.RegisteredParameter.__init__(self, name,
reg=Schedule,
value=value, default=default,
tip=tip, auxData=auxData)
def clone(self):
return self.__class__(self.name, self.value, self.default, self.tip)
#######################
class Once(UnconditionalSchedule):
def __init__(self, time):
self.time = time
self.done = False
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
self.done = False
def next(self):
if self.done:
raise StopIteration
self.done = True
return self.time + self.time0
def singleTime(self):
return self.time
registeredclass.Registration(
'Once',
Schedule,
Once,
ordering=1,
params=[parameter.FloatParameter('time', 0.0, tip='The output time.')],
tip="Produce output at just one time.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/once.xml'))
class Periodic(UnconditionalSchedule):
def __init__(self, delay, interval):
self.delay = delay
self.interval = interval
self.count = 0
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
self.count = 0
def next(self):
t = self.time0 + self.delay + self.count*self.interval
self.count += 1
return t
registeredclass.Registration(
'Periodic',
Schedule,
Periodic,
ordering=0,
params=[
parameter.NonNegativeFloatParameter(
'delay', 0.0, tip='Time before the first output.'),
parameter.PositiveFloatParameter(
'interval', 0.0, tip='Time between outputs.')
],
tip="Produce evenly spaced periodic output.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/periodicsched.xml'))
class Geometric(UnconditionalSchedule):
def __init__(self, timestep, factor):
self.timestep = timestep
self.factor = factor
self.nextstep = timestep
self.lasttime = None
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
if not continuing:
self.nextstep = self.timestep
self.lasttime = None
def next(self):
## TODO 3.1: This doesn't quite do the right thing on the first
## step of a continued computation, if the previous step was
## truncated to hit the end time exactly. For example, with
## factor=2 and timestep=0.1, the times are 0.1, 0.3, 0.7,
## 1.5, etc. If the end time is 1, the output times will be
## 0.1, 0.3, 0.7, and 1.0. If the solution is continued, the
## next time will be 3.1, although it probably should be 1.5.
if self.lasttime is None:
self.lasttime = self.time0
self.lasttime += self.nextstep
self.nextstep *= self.factor
debug.fmsg("Geometric returning", self.lasttime)
return self.lasttime
registeredclass.Registration(
'Geometric',
Schedule,
Geometric,
ordering=0.5,
params=[
parameter.PositiveFloatParameter(
'timestep', 1.0, tip='Initial timestep.'),
parameter.PositiveFloatParameter(
'factor', 2.0,
tip='Factor by which to increase the timestep on each step.')
],
tip="Increase timestep by a fixed factor on each step.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/geomsched.xml'))
class SpecifiedTimes(UnconditionalSchedule):
def __init__(self, times):
self.times = times[:]
if self.times:
self.times.sort()
self.count = 0
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
if not continuing:
self.count = 0
def next(self):
if self.count == len(self.times):
raise StopIteration
t = self.times[self.count] + self.time0
self.count += 1
return t
def singleTime(self):
if len(self.times) == 1:
return self.times[0] + self.time0
return None
registeredclass.Registration(
'Specified Times',
Schedule,
SpecifiedTimes,
ordering=2,
params=[
parameter.ListOfFloatsParameter('times', [],
tip="Produce output at these times.")],
tip="Produce ouput at the specified times.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/specifiedsched.xml')
)
class EveryTime(ConditionalSchedule):
def condition(self, meshctxt, time):
return True
registeredclass.Registration(
'Every Time',
Schedule,
EveryTime,
ordering=10,
tip="Produce output at every time step.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/everytime.xml'))
##################################################
class OutputSchedule(object):
def __init__(self, meshcontext):
# outputs is a set of ScheduledOutput objects. nexttimes is
# a dictionary, keyed by ScheduledOutput objects, giving the
# next time that each S.O. should be performed. When a
# S.O. has no more scheduled times, it is removed from the
# dictionary. After a set of outputs have been performed, the
# outputs to be performed at the next step are stored in
# nextoutputs.
# The order in which outputs are performed is arbitrary.
self.meshcontext = meshcontext
self.removeAll() # initializes self.outputs, etc.
def nOutputs(self):
return len(self.outputs)
def removeAll(self):
self.outputs = [] # list, so GUI can present them in creation order
## Use OrderedSets and OrderedDicts instead of sets and dicts
## so that the order of outputs is repeatable, which makes
## testing much easier.
self.nexttimes = utils.OrderedDict() # next time to perform each output
self.nextoutputs = utils.OrderedSet() # outputs to perform at next time
self.conditionalOutputs = utils.OrderedSet()
self.nexttime = None
self.finished = set()
def add(self, name, output):
output.setName(name)
self.outputs.append(output)
def replace(self, name, output, scheduletype, schedule, destination):
for i, o in enumerate(self.outputs):
if o.name() == name:
output.setSchedule(schedule, scheduletype)
output.setDestination(destination)
# If the old output was given an automatically
# generated name, the name must be updated to reflect
# changes in the output.
if isinstance(o.name(), automatic.AutomaticName):
output.setName(automatic.AutomaticName(
self.uniqueName(output.defaultName(), exclude=name)
))
else:
output.setName(name)
self.outputs[i] = output
return
raise ValueError("No such scheduled output: " + name)
def rename(self, oldname, newname):
output = self.getByName(oldname)
newname = self.uniqueName(newname, exclude=oldname)
# Reassign even if oldname==newname, since one of them may be
# an AutomaticName.
output.setName(newname)
def remove(self, outputname):
self.outputs.remove(self.getByName(outputname))
def size(self):
return len(self.outputs)
def isConditional(self):
return len(self.conditionalOutputs) > 0
def isSingleTime(self):
# Returns True if all of the outputs operate just once (or not
# at all), and they do it at the same time. Used to
# distinguish between static and quasistatic time evolutions.
times = [output.schedule.singleTime() for output in self.outputs
if output.active and output.fullyDefined()]
firsttime = None # first time found in list
for time in times:
if time is None: # singleTime indicated multiple times
return False
if firsttime is None:
firsttime = time
if time != firsttime:
return False
return True
def getByName(self, name):
for o in self.outputs:
if o.name() == name:
return o
raise ValueError("No such scheduled output: " + name)
def uniqueName(self, name, exclude=None):
return utils.uniqueName(name, self.names(), exclude)
def | (self):
return [o.name() for o in self.outputs]
def reset(self, time0, continuing):
# Reset all output schedules, and advance them to the first
# time after time0 (which is the earliest time in the current
# evolution).
self.finished.clear()
self.nexttimes = utils.OrderedDict()
self.nexttime = None
self.nextoutputs.clear()
self.conditionalOutputs.clear()
for output in self.outputs:
if (output.active and output.fullyDefined()):
output.schedule.reset(continuing)
output.start(self.meshcontext, time0, continuing)
if output.schedule.conditional:
self.conditionalOutputs.add(output)
else:
try:
t = roundOffCheck(output.schedule.next(), time0)
if continuing:
while t <= time0:
t = output.schedule.next()
else:
while t < time0:
t = output.schedule.next()
except StopIteration:
# The schedule has no times in it later than time0.
# Just ignore it.
pass
else:
self.nexttimes[output] = t
if self.nexttime is None or t < self.nexttime:
self.nexttime = t
self.nextoutputs = utils.OrderedSet(
[o for (o,t) in self.nexttimes.items() if t == self.nexttime])
def times(self, endtime):
if not self.nexttimes:
self.nexttime = endtime
yield endtime
while self.nexttimes:
self.nexttime = min(self.nexttimes.values()) # min over all Outputs
self.nextoutputs = utils.OrderedSet(
[o for (o,t) in self.nexttimes.items() if t == self.nexttime])
yield self.nexttime
# Update next times for the outputs that have just been
# performed. If output.schedule.next() raises
# StopIteration, it's finished.
for output in self.nextoutputs:
try:
self.nexttimes[output] = roundOffCheck(
output.schedule.next(), endtime)
except StopIteration:
del self.nexttimes[output]
self.finished.add(output)
# end while self.nexttimes
if endtime != self.nexttime:
self.nextoutputs.clear()
yield endtime
def perform(self, time):
# perform() can be called before self.nexttime if there are
# conditional outputs, so we have to check the time here.
if time == self.nexttime:
for output in self.nextoutputs:
# No need to check output.active here. Only active
# outputs are in nextoutputs.
output.perform(self.meshcontext, time)
for output in self.conditionalOutputs:
if output.schedule.condition(self.meshcontext, time):
if output.active:
output.perform(self.meshcontext, time)
def finish(self):
for output in self.finished:
output.finish(self.meshcontext)
for output in self.nexttimes:
output.finish(self.meshcontext)
for output in self.conditionalOutputs:
# Only active outputs are in nexttimes or in finished, but
# inactive ones may be in conditionalOutputs, so we have
# to check.
if output.active:
output.finish(self.meshcontext)
def saveAll(self, datafile, meshctxt):
for output in self.outputs:
output.save(datafile, meshctxt)
def roundOffCheck(val, target):
if abs(val - target) < 10.*utils.machine_epsilon*target:
return target
return val
| names | identifier_name |
outputschedule.py | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.SWIG.common import switchboard
from ooflib.SWIG.engine import ooferror2
from ooflib.common import debug
from ooflib.common import enum
from ooflib.common import utils
from ooflib.common import registeredclass
from ooflib.common.IO import automatic
from ooflib.common.IO import datafile
from ooflib.common.IO import parameter
from ooflib.common.IO import reporter
from ooflib.common.IO import xmlmenudump
import math
################
# The ScheduleType class determines how an output schedule behaves.
# This is separate from the Schedule class because some Schedules can
# work with more than one ScheduleType.
class ScheduleType(registeredclass.RegisteredClass):
registry = []
tip="How output Schedules are interpreted."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/scheduletype.xml')
class AbsoluteOutputSchedule(ScheduleType):
conditional = False
def setOffset(self, schedule, time0):
schedule.setOffset(0.0)
registeredclass.Registration(
'Absolute',
ScheduleType,
AbsoluteOutputSchedule,
ordering=0,
tip="Output schedule times are absolute.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/absolutesched.xml')
)
class RelativeOutputSchedule(ScheduleType):
conditional = False
def setOffset(self, schedule, time0):
schedule.setOffset(time0)
registeredclass.Registration(
'Relative',
ScheduleType,
RelativeOutputSchedule,
ordering=1,
tip="Output schedule times are relative to the start time.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/relativesched.xml')
)
class ConditionalOutputSchedule(ScheduleType):
conditional = True
def setOffset(self, schedule, time0):
pass
registeredclass.Registration(
'Conditional',
ScheduleType,
ConditionalOutputSchedule,
ordering=2,
tip="Output schedule times are determined on the fly by a given criterion.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/reg/conditionalsched.xml')
)
######################
# The Schedule class determines when scheduled output is produced.
# UnconditionalSchedules determine the output times in advance.
# ConditionalSchedules do it on the fly.
class Schedule(registeredclass.RegisteredClass):
registry = []
tip="Ways to specify when Scheduled Output will be produced."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/schedule.xml')
def __init__(self):
self.time0 = 0.0
def reset(self, continuing):
pass
def singleTime(self):
# In subclasses this should return the time if the Schedule
# contains only a single time, or None if the Schedule
# contains multiple times. If it contains a single unknown
# time (which probably doesn't make sense) it should return
# None. This is used to distinguish between static and
# quasistatic evolutions.
return None
def setOffset(self, time0): # adjust for relative vs absolute scheduletypes
self.time0 = time0
class UnconditionalSchedule(Schedule):
conditional = False
def __iter__(self):
return self
class ConditionalSchedule(Schedule):
conditional = True
# The OutputScheduleParameter has a widget (defined in
# engine.IO.GUI.schedulewidget) that lists only the Schedules that are
# compatible with the current ScheduleType.
class OutputScheduleParameter(parameter.RegisteredParameter):
def __init__(self, name, value=None, default=None, tip=None, auxData={}):
parameter.RegisteredParameter.__init__(self, name,
reg=Schedule,
value=value, default=default,
tip=tip, auxData=auxData)
def clone(self):
return self.__class__(self.name, self.value, self.default, self.tip)
#######################
class Once(UnconditionalSchedule):
def __init__(self, time):
self.time = time
self.done = False
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
self.done = False
def next(self):
if self.done:
raise StopIteration
self.done = True
return self.time + self.time0
def singleTime(self):
return self.time
registeredclass.Registration(
'Once',
Schedule,
Once,
ordering=1,
params=[parameter.FloatParameter('time', 0.0, tip='The output time.')],
tip="Produce output at just one time.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/once.xml'))
class Periodic(UnconditionalSchedule):
def __init__(self, delay, interval):
self.delay = delay
self.interval = interval
self.count = 0
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
self.count = 0
def next(self):
t = self.time0 + self.delay + self.count*self.interval
self.count += 1
return t
registeredclass.Registration(
'Periodic',
Schedule,
Periodic,
ordering=0,
params=[
parameter.NonNegativeFloatParameter(
'delay', 0.0, tip='Time before the first output.'),
parameter.PositiveFloatParameter(
'interval', 0.0, tip='Time between outputs.')
],
tip="Produce evenly spaced periodic output.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/periodicsched.xml'))
class Geometric(UnconditionalSchedule):
def __init__(self, timestep, factor):
self.timestep = timestep
self.factor = factor
self.nextstep = timestep
self.lasttime = None
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
if not continuing:
self.nextstep = self.timestep
self.lasttime = None
def next(self):
## TODO 3.1: This doesn't quite do the right thing on the first
## step of a continued computation, if the previous step was
## truncated to hit the end time exactly. For example, with
## factor=2 and timestep=0.1, the times are 0.1, 0.3, 0.7,
## 1.5, etc. If the end time is 1, the output times will be
## 0.1, 0.3, 0.7, and 1.0. If the solution is continued, the
## next time will be 3.1, although it probably should be 1.5.
if self.lasttime is None:
self.lasttime = self.time0
self.lasttime += self.nextstep
self.nextstep *= self.factor
debug.fmsg("Geometric returning", self.lasttime)
return self.lasttime
registeredclass.Registration(
'Geometric',
Schedule,
Geometric,
ordering=0.5,
params=[
parameter.PositiveFloatParameter(
'timestep', 1.0, tip='Initial timestep.'),
parameter.PositiveFloatParameter(
'factor', 2.0,
tip='Factor by which to increase the timestep on each step.')
],
tip="Increase timestep by a fixed factor on each step.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/geomsched.xml'))
class SpecifiedTimes(UnconditionalSchedule):
def __init__(self, times):
self.times = times[:]
if self.times:
self.times.sort()
self.count = 0
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
if not continuing:
self.count = 0
def next(self):
if self.count == len(self.times):
raise StopIteration
t = self.times[self.count] + self.time0
self.count += 1
return t
def singleTime(self):
if len(self.times) == 1:
return self.times[0] + self.time0
return None
registeredclass.Registration(
'Specified Times',
Schedule,
SpecifiedTimes,
ordering=2,
params=[
parameter.ListOfFloatsParameter('times', [],
tip="Produce output at these times.")],
tip="Produce ouput at the specified times.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/specifiedsched.xml')
)
class EveryTime(ConditionalSchedule):
def condition(self, meshctxt, time):
return True
registeredclass.Registration(
'Every Time',
Schedule,
EveryTime,
ordering=10,
tip="Produce output at every time step.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/everytime.xml'))
##################################################
class OutputSchedule(object):
def __init__(self, meshcontext):
# outputs is a set of ScheduledOutput objects. nexttimes is
# a dictionary, keyed by ScheduledOutput objects, giving the
# next time that each S.O. should be performed. When a
# S.O. has no more scheduled times, it is removed from the
# dictionary. After a set of outputs have been performed, the
# outputs to be performed at the next step are stored in
# nextoutputs.
# The order in which outputs are performed is arbitrary.
self.meshcontext = meshcontext
self.removeAll() # initializes self.outputs, etc.
def nOutputs(self):
return len(self.outputs)
def removeAll(self):
self.outputs = [] # list, so GUI can present them in creation order
## Use OrderedSets and OrderedDicts instead of sets and dicts
## so that the order of outputs is repeatable, which makes
## testing much easier.
self.nexttimes = utils.OrderedDict() # next time to perform each output
self.nextoutputs = utils.OrderedSet() # outputs to perform at next time
self.conditionalOutputs = utils.OrderedSet()
self.nexttime = None
self.finished = set()
def add(self, name, output):
output.setName(name)
self.outputs.append(output)
def replace(self, name, output, scheduletype, schedule, destination):
for i, o in enumerate(self.outputs):
if o.name() == name:
output.setSchedule(schedule, scheduletype)
output.setDestination(destination)
# If the old output was given an automatically
# generated name, the name must be updated to reflect
# changes in the output.
if isinstance(o.name(), automatic.AutomaticName):
output.setName(automatic.AutomaticName(
self.uniqueName(output.defaultName(), exclude=name)
))
else:
output.setName(name)
self.outputs[i] = output
return
raise ValueError("No such scheduled output: " + name)
def rename(self, oldname, newname):
output = self.getByName(oldname)
newname = self.uniqueName(newname, exclude=oldname)
# Reassign even if oldname==newname, since one of them may be
# an AutomaticName.
output.setName(newname)
def remove(self, outputname):
self.outputs.remove(self.getByName(outputname))
def size(self):
return len(self.outputs)
def isConditional(self):
return len(self.conditionalOutputs) > 0
def isSingleTime(self):
# Returns True if all of the outputs operate just once (or not
# at all), and they do it at the same time. Used to
# distinguish between static and quasistatic time evolutions.
times = [output.schedule.singleTime() for output in self.outputs
if output.active and output.fullyDefined()]
firsttime = None # first time found in list
for time in times:
if time is None: # singleTime indicated multiple times
return False | if firsttime is None:
firsttime = time
if time != firsttime:
return False
return True
def getByName(self, name):
for o in self.outputs:
if o.name() == name:
return o
raise ValueError("No such scheduled output: " + name)
def uniqueName(self, name, exclude=None):
return utils.uniqueName(name, self.names(), exclude)
def names(self):
return [o.name() for o in self.outputs]
def reset(self, time0, continuing):
# Reset all output schedules, and advance them to the first
# time after time0 (which is the earliest time in the current
# evolution).
self.finished.clear()
self.nexttimes = utils.OrderedDict()
self.nexttime = None
self.nextoutputs.clear()
self.conditionalOutputs.clear()
for output in self.outputs:
if (output.active and output.fullyDefined()):
output.schedule.reset(continuing)
output.start(self.meshcontext, time0, continuing)
if output.schedule.conditional:
self.conditionalOutputs.add(output)
else:
try:
t = roundOffCheck(output.schedule.next(), time0)
if continuing:
while t <= time0:
t = output.schedule.next()
else:
while t < time0:
t = output.schedule.next()
except StopIteration:
# The schedule has no times in it later than time0.
# Just ignore it.
pass
else:
self.nexttimes[output] = t
if self.nexttime is None or t < self.nexttime:
self.nexttime = t
self.nextoutputs = utils.OrderedSet(
[o for (o,t) in self.nexttimes.items() if t == self.nexttime])
def times(self, endtime):
if not self.nexttimes:
self.nexttime = endtime
yield endtime
while self.nexttimes:
self.nexttime = min(self.nexttimes.values()) # min over all Outputs
self.nextoutputs = utils.OrderedSet(
[o for (o,t) in self.nexttimes.items() if t == self.nexttime])
yield self.nexttime
# Update next times for the outputs that have just been
# performed. If output.schedule.next() raises
# StopIteration, it's finished.
for output in self.nextoutputs:
try:
self.nexttimes[output] = roundOffCheck(
output.schedule.next(), endtime)
except StopIteration:
del self.nexttimes[output]
self.finished.add(output)
# end while self.nexttimes
if endtime != self.nexttime:
self.nextoutputs.clear()
yield endtime
def perform(self, time):
# perform() can be called before self.nexttime if there are
# conditional outputs, so we have to check the time here.
if time == self.nexttime:
for output in self.nextoutputs:
# No need to check output.active here. Only active
# outputs are in nextoutputs.
output.perform(self.meshcontext, time)
for output in self.conditionalOutputs:
if output.schedule.condition(self.meshcontext, time):
if output.active:
output.perform(self.meshcontext, time)
def finish(self):
for output in self.finished:
output.finish(self.meshcontext)
for output in self.nexttimes:
output.finish(self.meshcontext)
for output in self.conditionalOutputs:
# Only active outputs are in nexttimes or in finished, but
# inactive ones may be in conditionalOutputs, so we have
# to check.
if output.active:
output.finish(self.meshcontext)
def saveAll(self, datafile, meshctxt):
for output in self.outputs:
output.save(datafile, meshctxt)
def roundOffCheck(val, target):
if abs(val - target) < 10.*utils.machine_epsilon*target:
return target
return val | random_line_split | |
outputschedule.py | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.SWIG.common import switchboard
from ooflib.SWIG.engine import ooferror2
from ooflib.common import debug
from ooflib.common import enum
from ooflib.common import utils
from ooflib.common import registeredclass
from ooflib.common.IO import automatic
from ooflib.common.IO import datafile
from ooflib.common.IO import parameter
from ooflib.common.IO import reporter
from ooflib.common.IO import xmlmenudump
import math
################
# The ScheduleType class determines how an output schedule behaves.
# This is separate from the Schedule class because some Schedules can
# work with more than one ScheduleType.
class ScheduleType(registeredclass.RegisteredClass):
registry = []
tip="How output Schedules are interpreted."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/scheduletype.xml')
class AbsoluteOutputSchedule(ScheduleType):
conditional = False
def setOffset(self, schedule, time0):
schedule.setOffset(0.0)
registeredclass.Registration(
'Absolute',
ScheduleType,
AbsoluteOutputSchedule,
ordering=0,
tip="Output schedule times are absolute.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/absolutesched.xml')
)
class RelativeOutputSchedule(ScheduleType):
conditional = False
def setOffset(self, schedule, time0):
schedule.setOffset(time0)
registeredclass.Registration(
'Relative',
ScheduleType,
RelativeOutputSchedule,
ordering=1,
tip="Output schedule times are relative to the start time.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/relativesched.xml')
)
class ConditionalOutputSchedule(ScheduleType):
conditional = True
def setOffset(self, schedule, time0):
pass
registeredclass.Registration(
'Conditional',
ScheduleType,
ConditionalOutputSchedule,
ordering=2,
tip="Output schedule times are determined on the fly by a given criterion.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/reg/conditionalsched.xml')
)
######################
# The Schedule class determines when scheduled output is produced.
# UnconditionalSchedules determine the output times in advance.
# ConditionalSchedules do it on the fly.
class Schedule(registeredclass.RegisteredClass):
registry = []
tip="Ways to specify when Scheduled Output will be produced."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/schedule.xml')
def __init__(self):
self.time0 = 0.0
def reset(self, continuing):
pass
def singleTime(self):
# In subclasses this should return the time if the Schedule
# contains only a single time, or None if the Schedule
# contains multiple times. If it contains a single unknown
# time (which probably doesn't make sense) it should return
# None. This is used to distinguish between static and
# quasistatic evolutions.
return None
def setOffset(self, time0): # adjust for relative vs absolute scheduletypes
self.time0 = time0
class UnconditionalSchedule(Schedule):
conditional = False
def __iter__(self):
return self
class ConditionalSchedule(Schedule):
conditional = True
# The OutputScheduleParameter has a widget (defined in
# engine.IO.GUI.schedulewidget) that lists only the Schedules that are
# compatible with the current ScheduleType.
class OutputScheduleParameter(parameter.RegisteredParameter):
def __init__(self, name, value=None, default=None, tip=None, auxData={}):
parameter.RegisteredParameter.__init__(self, name,
reg=Schedule,
value=value, default=default,
tip=tip, auxData=auxData)
def clone(self):
return self.__class__(self.name, self.value, self.default, self.tip)
#######################
class Once(UnconditionalSchedule):
def __init__(self, time):
self.time = time
self.done = False
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
self.done = False
def next(self):
if self.done:
raise StopIteration
self.done = True
return self.time + self.time0
def singleTime(self):
return self.time
registeredclass.Registration(
'Once',
Schedule,
Once,
ordering=1,
params=[parameter.FloatParameter('time', 0.0, tip='The output time.')],
tip="Produce output at just one time.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/once.xml'))
class Periodic(UnconditionalSchedule):
def __init__(self, delay, interval):
self.delay = delay
self.interval = interval
self.count = 0
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
self.count = 0
def next(self):
t = self.time0 + self.delay + self.count*self.interval
self.count += 1
return t
registeredclass.Registration(
'Periodic',
Schedule,
Periodic,
ordering=0,
params=[
parameter.NonNegativeFloatParameter(
'delay', 0.0, tip='Time before the first output.'),
parameter.PositiveFloatParameter(
'interval', 0.0, tip='Time between outputs.')
],
tip="Produce evenly spaced periodic output.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/periodicsched.xml'))
class Geometric(UnconditionalSchedule):
def __init__(self, timestep, factor):
self.timestep = timestep
self.factor = factor
self.nextstep = timestep
self.lasttime = None
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
if not continuing:
self.nextstep = self.timestep
self.lasttime = None
def next(self):
## TODO 3.1: This doesn't quite do the right thing on the first
## step of a continued computation, if the previous step was
## truncated to hit the end time exactly. For example, with
## factor=2 and timestep=0.1, the times are 0.1, 0.3, 0.7,
## 1.5, etc. If the end time is 1, the output times will be
## 0.1, 0.3, 0.7, and 1.0. If the solution is continued, the
## next time will be 3.1, although it probably should be 1.5.
if self.lasttime is None:
self.lasttime = self.time0
self.lasttime += self.nextstep
self.nextstep *= self.factor
debug.fmsg("Geometric returning", self.lasttime)
return self.lasttime
registeredclass.Registration(
'Geometric',
Schedule,
Geometric,
ordering=0.5,
params=[
parameter.PositiveFloatParameter(
'timestep', 1.0, tip='Initial timestep.'),
parameter.PositiveFloatParameter(
'factor', 2.0,
tip='Factor by which to increase the timestep on each step.')
],
tip="Increase timestep by a fixed factor on each step.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/geomsched.xml'))
class SpecifiedTimes(UnconditionalSchedule):
def __init__(self, times):
self.times = times[:]
if self.times:
self.times.sort()
self.count = 0
UnconditionalSchedule.__init__(self)
def reset(self, continuing):
if not continuing:
self.count = 0
def next(self):
if self.count == len(self.times):
raise StopIteration
t = self.times[self.count] + self.time0
self.count += 1
return t
def singleTime(self):
if len(self.times) == 1:
return self.times[0] + self.time0
return None
registeredclass.Registration(
'Specified Times',
Schedule,
SpecifiedTimes,
ordering=2,
params=[
parameter.ListOfFloatsParameter('times', [],
tip="Produce output at these times.")],
tip="Produce ouput at the specified times.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/specifiedsched.xml')
)
class EveryTime(ConditionalSchedule):
def condition(self, meshctxt, time):
return True
registeredclass.Registration(
'Every Time',
Schedule,
EveryTime,
ordering=10,
tip="Produce output at every time step.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/everytime.xml'))
##################################################
class OutputSchedule(object):
def __init__(self, meshcontext):
# outputs is a set of ScheduledOutput objects. nexttimes is
# a dictionary, keyed by ScheduledOutput objects, giving the
# next time that each S.O. should be performed. When a
# S.O. has no more scheduled times, it is removed from the
# dictionary. After a set of outputs have been performed, the
# outputs to be performed at the next step are stored in
# nextoutputs.
# The order in which outputs are performed is arbitrary.
self.meshcontext = meshcontext
self.removeAll() # initializes self.outputs, etc.
def nOutputs(self):
return len(self.outputs)
def removeAll(self):
self.outputs = [] # list, so GUI can present them in creation order
## Use OrderedSets and OrderedDicts instead of sets and dicts
## so that the order of outputs is repeatable, which makes
## testing much easier.
self.nexttimes = utils.OrderedDict() # next time to perform each output
self.nextoutputs = utils.OrderedSet() # outputs to perform at next time
self.conditionalOutputs = utils.OrderedSet()
self.nexttime = None
self.finished = set()
def add(self, name, output):
output.setName(name)
self.outputs.append(output)
def replace(self, name, output, scheduletype, schedule, destination):
for i, o in enumerate(self.outputs):
if o.name() == name:
output.setSchedule(schedule, scheduletype)
output.setDestination(destination)
# If the old output was given an automatically
# generated name, the name must be updated to reflect
# changes in the output.
if isinstance(o.name(), automatic.AutomaticName):
output.setName(automatic.AutomaticName(
self.uniqueName(output.defaultName(), exclude=name)
))
else:
output.setName(name)
self.outputs[i] = output
return
raise ValueError("No such scheduled output: " + name)
def rename(self, oldname, newname):
output = self.getByName(oldname)
newname = self.uniqueName(newname, exclude=oldname)
# Reassign even if oldname==newname, since one of them may be
# an AutomaticName.
output.setName(newname)
def remove(self, outputname):
self.outputs.remove(self.getByName(outputname))
def size(self):
return len(self.outputs)
def isConditional(self):
return len(self.conditionalOutputs) > 0
def isSingleTime(self):
# Returns True if all of the outputs operate just once (or not
# at all), and they do it at the same time. Used to
# distinguish between static and quasistatic time evolutions.
times = [output.schedule.singleTime() for output in self.outputs
if output.active and output.fullyDefined()]
firsttime = None # first time found in list
for time in times:
if time is None: # singleTime indicated multiple times
|
if firsttime is None:
firsttime = time
if time != firsttime:
return False
return True
def getByName(self, name):
for o in self.outputs:
if o.name() == name:
return o
raise ValueError("No such scheduled output: " + name)
def uniqueName(self, name, exclude=None):
return utils.uniqueName(name, self.names(), exclude)
def names(self):
return [o.name() for o in self.outputs]
def reset(self, time0, continuing):
# Reset all output schedules, and advance them to the first
# time after time0 (which is the earliest time in the current
# evolution).
self.finished.clear()
self.nexttimes = utils.OrderedDict()
self.nexttime = None
self.nextoutputs.clear()
self.conditionalOutputs.clear()
for output in self.outputs:
if (output.active and output.fullyDefined()):
output.schedule.reset(continuing)
output.start(self.meshcontext, time0, continuing)
if output.schedule.conditional:
self.conditionalOutputs.add(output)
else:
try:
t = roundOffCheck(output.schedule.next(), time0)
if continuing:
while t <= time0:
t = output.schedule.next()
else:
while t < time0:
t = output.schedule.next()
except StopIteration:
# The schedule has no times in it later than time0.
# Just ignore it.
pass
else:
self.nexttimes[output] = t
if self.nexttime is None or t < self.nexttime:
self.nexttime = t
self.nextoutputs = utils.OrderedSet(
[o for (o,t) in self.nexttimes.items() if t == self.nexttime])
def times(self, endtime):
if not self.nexttimes:
self.nexttime = endtime
yield endtime
while self.nexttimes:
self.nexttime = min(self.nexttimes.values()) # min over all Outputs
self.nextoutputs = utils.OrderedSet(
[o for (o,t) in self.nexttimes.items() if t == self.nexttime])
yield self.nexttime
# Update next times for the outputs that have just been
# performed. If output.schedule.next() raises
# StopIteration, it's finished.
for output in self.nextoutputs:
try:
self.nexttimes[output] = roundOffCheck(
output.schedule.next(), endtime)
except StopIteration:
del self.nexttimes[output]
self.finished.add(output)
# end while self.nexttimes
if endtime != self.nexttime:
self.nextoutputs.clear()
yield endtime
def perform(self, time):
# perform() can be called before self.nexttime if there are
# conditional outputs, so we have to check the time here.
if time == self.nexttime:
for output in self.nextoutputs:
# No need to check output.active here. Only active
# outputs are in nextoutputs.
output.perform(self.meshcontext, time)
for output in self.conditionalOutputs:
if output.schedule.condition(self.meshcontext, time):
if output.active:
output.perform(self.meshcontext, time)
def finish(self):
for output in self.finished:
output.finish(self.meshcontext)
for output in self.nexttimes:
output.finish(self.meshcontext)
for output in self.conditionalOutputs:
# Only active outputs are in nexttimes or in finished, but
# inactive ones may be in conditionalOutputs, so we have
# to check.
if output.active:
output.finish(self.meshcontext)
def saveAll(self, datafile, meshctxt):
for output in self.outputs:
output.save(datafile, meshctxt)
def roundOffCheck(val, target):
if abs(val - target) < 10.*utils.machine_epsilon*target:
return target
return val
| return False | conditional_block |
dynamic_model_python_basic.py | #-*- coding: utf-8 -*-
#
# Copyright 2013-2014 Antoine Drouin (poinix@gmail.com)
#
# This file is part of PAT.
#
# PAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAT. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a 6dof model for a fixed wing vehicle
"""
import math
import numpy as np
import scipy.integrate
import scipy.optimize
import matplotlib.pyplot as plt
import pat.dynamic_model as dm
import pat.frames as fr
import pat.utils as pu
import pat.algebra as pal
import pat.atmosphere as patm
"""
Components of the state vector
"""
sv_x = 0 # position x axis
sv_y = 1 # position y axis
sv_z = 2 # heigh above ground
sv_v = 3 # airspeed
sv_alpha = 4 # alpha
sv_beta = 5 # beta
sv_phi = 6 # roll (euler, ltp to body)
sv_theta = 7 # pitch (euler, ltp to body)
sv_psi = 8 # yaw (euler, ltp to body)
sv_p = 9 # rotational vel body x
sv_q = 10 # rotational vel body y
sv_r = 11 # rotational vel body z
sv_size = 12
"""
Components of the input vector
"""
iv_da = 0
iv_de = 1
iv_dr = 2
iv_size = 4
def get_aero_to_body(X):
"""
computes the aero to body rotation matix
"""
ca = math.cos(X[sv_alpha]); sa = math.sin(X[sv_alpha])
cb = math.cos(X[sv_beta]); sb = math.sin(X[sv_beta])
return np.array([[ca*cb, -ca*sb, -sa],
[sb , cb , 0.],
[sa*cb, -sa*sb, ca]])
def get_f_eng_body(X, U, P):
"""
return propulsion forces expressed in body frame
"""
rho = patm.get_rho(-X[sv_z])
f_engines_body = np.zeros((P.eng_nb, 3))
for i in range(0, P.eng_nb):
thrust = U[i]*P.fmaxs[i]*math.pow((rho/P.rhois[i]),P.nrhos[i])*math.pow((X[sv_v]/P.Vis[i]),P.nVs[i])
f_engines_body[i] = np.dot(P.eng_to_body[i], np.array([thrust, 0., 0.]))
return f_engines_body
def get_f_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic forces expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
CL = P.CL0 + P.CL_alpha*d_alpha + P.CL_beta*X[sv_beta] +\
np.dot(P.CL_omega,rvel) + np.dot(P.CL_sfc,Usfc)
CD = P.CD0 + P.CD_k1*CL + P.CD_k2*(CL**2) + np.dot(P.CD_sfc,Usfc)
CY = P.CY_alpha*d_alpha + P.CY_beta*X[sv_beta] +\
np.dot(P.CY_omega,rvel) + np.dot(P.CY_sfc,Usfc)
return Pdyn*P.Sref*np.dot(get_aero_to_body(X),[-CD, CY, -CL])
def get_m_eng_body(f_eng_body, P):
"""
return propulsion moments expressed in body frame
"""
m = np.zeros(3)
for i in range(0, P.eng_nb):
m += np.cross(P.eng_pos[i], f_eng_body[i])
return m
def get_m_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic moments expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
Cl = P.Cl_alpha*d_alpha + P.Cl_beta*X[sv_beta] +\
np.dot(P.Cl_omega,rvel) + np.dot(P.Cl_sfc,Usfc)
Cm = P.Cm0 + P.Cm_alpha*d_alpha + P.Cm_beta*X[sv_beta] +\
np.dot(P.Cm_omega,rvel) + np.dot(P.Cm_sfc,Usfc)
Cn = P.Cn_alpha*d_alpha + P.Cn_beta*X[sv_beta] +\
np.dot(P.Cn_omega,rvel) + np.dot(P.Cn_sfc,Usfc)
return Pdyn*P.Sref*np.array([Cl*P.Bref, Cm*P.Cref, Cn*P.Bref])
def dyn(X, t, U, P):
"""
Dynamic model
"""
rho = patm.get_rho(-X[sv_z])
Pdyn = 0.5*rho*X[sv_v]**2
Ueng = U[0:P.eng_nb] # engines part of input vector
Usfc = U[P.eng_nb:P.eng_nb+P.sfc_nb] # control surfaces part of input vector
X_rvel_body = X[sv_p:sv_r+1] # body rotational velocities
X_euler = X[sv_phi:sv_psi+1] # euler angles
# Newton for forces in body frame
f_aero_body = get_f_aero_body(X, Usfc, P, Pdyn)
f_eng_body = get_f_eng_body(X, Ueng, P)
earth_to_body = pal.rmat_of_euler(X_euler)
f_weight_body = np.dot(earth_to_body, [0., 0., P.m*P.g])
forces_body = f_aero_body + np.sum(f_eng_body, axis=0) + f_weight_body
vel_body = np.dot(get_aero_to_body(X), [X[sv_v], 0., 0.]) # u, v, w
accel_body = 1./P.m*forces_body - np.cross(X_rvel_body, vel_body)
# Newton for moments in body frame
m_aero_body = get_m_aero_body(X, Usfc, P, Pdyn)
m_eng_body = get_m_eng_body(f_eng_body, P)
raccel_body = np.dot(P.invI, m_aero_body + m_eng_body - np.cross(X_rvel_body, np.dot(P.I, X_rvel_body)))
Xdot = np.zeros(sv_size)
Xdot[sv_x:sv_z+1] = np.dot(np.transpose(earth_to_body), vel_body)
Xdot[sv_v] = np.inner(vel_body, accel_body)/X[sv_v]
u, v, w = vel_body
ud, vd, wd = accel_body
Xdot[sv_alpha] = (u*wd - w*ud)/(u**2+w**2)
Xdot[sv_beta] = (X[sv_v]*vd - v*Xdot[sv_v]) / X[sv_v] / math.sqrt(u**2+w**2)
Xdot[sv_phi:sv_psi+1] = pal.euler_derivatives(X_euler, X_rvel_body)
Xdot[sv_p:sv_r+1] = raccel_body
return Xdot
def trim(P, args=None, debug=False):
"""
Find throttle, elevator and angle of attack corresponding
to the given airspeed and and flight path
"""
if args<>None:
va, gamma, h = (args['va'], args['gamma'], args['h'] )
else:
va, gamma, h = (P.Vref, 0., 0.)
if debug:
print "searching for constant path trajectory with"
print " va {:f} m/s".format(va)
print " gamma {:f} deg".format(pu.deg_of_rad(gamma))
def err_func((throttle, elevator, alpha)):
X=[0., 0., -h, va, alpha, 0., 0., gamma+alpha, 0., 0., 0., 0.]
U = np.zeros(P.input_nb)
U[0:P.eng_nb] = throttle; U[P.eng_nb+iv_de] = elevator
Xdot = dyn(X, 0., U, P)
Xdot_ref = [va*math.cos(gamma), 0., -va*math.sin(gamma), 0., 0., 0., 0., 0., 0., 0., 0., 0.]
return np.linalg.norm(Xdot - Xdot_ref)
p0 = [0.2, pu.rad_of_deg(2.), pu.rad_of_deg(0.)]
thr_e, ele_e, alpha_e = scipy.optimize.fmin_powell(err_func, p0, disp=debug, ftol=1e-9)
if debug:
print """result:
throttle : {:f} %
elevator : {:f} deg
angle of attack : {:f} deg""".format(100.*thr_e, pu.deg_of_rad(ele_e), pu.deg_of_rad(alpha_e))
Ue = np.zeros(P.input_nb)
Ue[0:P.eng_nb] = thr_e; Ue[P.eng_nb+iv_de] = ele_e
Xe = [va*math.cos(gamma), 0., va*math.sin(gamma), va, alpha_e, 0., 0., gamma+alpha_e, 0., 0., 0., 0.]
return Xe, Ue
import pat.vehicles.fixed_wing.dynamic_model_python_parameters
class Param(pat.vehicles.fixed_wing.dynamic_model_python_parameters.Param):
pass
class DynamicModel(dm.DynamicModel):
sv_x = sv_x # position x axis
sv_y = sv_y # position y axis
sv_z = sv_z # heigh above ground
sv_v = sv_v # airspeed
sv_alpha = sv_alpha # alpha
sv_beta = sv_beta # beta
sv_phi = sv_phi # roll (euler, ltp to body)
sv_theta = sv_theta # pitch (euler, ltp to body)
sv_psi = sv_psi # yaw (euler, ltp to body)
sv_p = sv_p # rotational vel body x
sv_q = sv_q # rotational vel body y
sv_r = sv_r # rotational vel body z
sv_size = sv_size
iv_th = 0 # throttle
iv_da = 1 # aileron
iv_de = 2 # elevator
iv_dr = 3 # rudder
iv_size = 4
# hack for multiple engines
_iv_da = 0
_iv_de = 1
_iv_dr = 2
dyn = lambda self, X, t, U, P: dyn(X, t, U, self.P)
trim = lambda self, args=None, debug=False: trim(self.P, args, debug)
def __init__(self, params=None):
|
def name(self):
return "Fixed Wing Python Basic ({:s})".format(self.P.name)
def reset(self, X0=None):
if X0<>None: self.X = X0
else: self.X = np.array([0., 0., 0., 68., 0., 0., 0., 0., 0., 0., 0., 0.])
return self.X
def run(self, dt, U):
foo, self.X = scipy.integrate.odeint(dyn, self.X, [0, dt], args=(U, self.P, ))
return self.X
def param(self):
return str(self.P)
def iv_dth(self):
if self.P.eng_nb>1: return range(0,self.P.eng_nb)
else: return 0
def iv_da(self): return self.P.eng_nb + DynamicModel._iv_da
def iv_de(self): return self.P.eng_nb + DynamicModel._iv_de
def iv_dr(self): return self.P.eng_nb + DynamicModel._iv_dr
def input_nb(self): return self.P.input_nb
def state_SixDOFfEuclidianEuler(self):
X = np.zeros(fr.SixDOFfEuclidianEuler.size)
X[fr.SixDOFfEuclidianEuler.x:fr.SixDOFfEuclidianEuler.z+1] = self.X[sv_x:sv_z+1]
X[fr.SixDOFfEuclidianEuler.phi:fr.SixDOFfEuclidianEuler.r+1] = self.X[sv_phi:sv_r+1]
return X
def get_jacobian(self, Xe, Ue):
A,B = pu.num_jacobian(Xe, Ue, self.P, dyn)
return A, B
def state_str(self):
return """pos: {:-.2f}, {:-.2f}, {:-.2f} m
vel: {:-.2f} m/s, alpha {:-.2f}, beta {:-.2f} deg
att: {:-.2f}, {:-.2f}, {:-.2f} deg
""".format(self.X[sv_x], self.X[sv_y], self.X[sv_z],
self.X[sv_v], pu.deg_of_rad(self.X[sv_alpha]), pu.deg_of_rad(self.X[sv_beta]),
pu.deg_of_rad(self.X[sv_phi]), pu.deg_of_rad(self.X[sv_theta]), pu.deg_of_rad(self.X[sv_psi]))
def plot_trajectory(self, time, X, U=None, figure=None, window_title="Trajectory", legend=None, filename=None):
plot_trajectory(time, X, U, figure, window_title, legend, filename)
#
# Some plotting functions
#
def plot_trajectory(time, X, U=None, figure=None, window_title="Trajectory",
legend=None, filename=None):
margins=(0.04, 0.05, 0.98, 0.96, 0.20, 0.34)
figure = pu.prepare_fig(figure, window_title, figsize=(20.48, 10.24), margins=margins)
plots = [("x", "m", X[:,sv_x]), ("y", "m", X[:,sv_y]), ("z", "m", X[:,sv_z]),
("v", "m/s", X[:,sv_v]),
("$\\alpha$", "deg", pu.deg_of_rad(X[:,sv_alpha])),
("$\\beta$", "deg", pu.deg_of_rad(X[:,sv_beta])),
("$\phi$", "deg", pu.deg_of_rad(X[:,sv_phi])),
("$\\theta$", "deg", pu.deg_of_rad(X[:,sv_theta])),
("$\\psi$", "deg", pu.deg_of_rad(X[:,sv_psi])),
("$p$", "deg/s", pu.deg_of_rad(X[:,sv_p])),
("$q$", "deg/s", pu.deg_of_rad(X[:,sv_q])),
("$r$", "deg/s", pu.deg_of_rad(X[:,sv_r]))]
nrow = 5 if U<>None else 4
for i, (title, ylab, data) in enumerate(plots):
ax = plt.subplot(nrow, 3, i+1)
plt.plot(time, data)
pu.decorate(ax, title=title, ylab=ylab)
if legend<>None:
plt.legend(legend, loc='best')
if U<>None:
ax = figure.add_subplot(5, 3, 13)
ax.plot(time, 100*U[:, 0])
pu.decorate(ax, title="$d_{th}$", ylab="%")
ax = figure.add_subplot(5, 3, 14)
ax.plot(time, pu.deg_of_rad(U[:, iv_da+1]))
pu.decorate(ax, title="$d_a$", ylab="deg")
ax = figure.add_subplot(5, 3, 15)
ax.plot(time, pu.deg_of_rad(U[:, iv_de+1]))
pu.decorate(ax, title="$d_e$", ylab="deg")
return figure
| print "Info: Dynamic fixed wing basic"
dm.DynamicModel.__init__(self)
if params == None: params="../config/Rcam_single_engine.xml"
self.X = np.zeros(DynamicModel.sv_size)
self.P = Param(params)
self.reset() | identifier_body |
dynamic_model_python_basic.py | #-*- coding: utf-8 -*-
#
# Copyright 2013-2014 Antoine Drouin (poinix@gmail.com)
#
# This file is part of PAT.
#
# PAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAT. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a 6dof model for a fixed wing vehicle
"""
import math
import numpy as np
import scipy.integrate
import scipy.optimize
import matplotlib.pyplot as plt
import pat.dynamic_model as dm
import pat.frames as fr
import pat.utils as pu
import pat.algebra as pal
import pat.atmosphere as patm
"""
Components of the state vector
"""
sv_x = 0 # position x axis
sv_y = 1 # position y axis
sv_z = 2 # heigh above ground
sv_v = 3 # airspeed
sv_alpha = 4 # alpha
sv_beta = 5 # beta
sv_phi = 6 # roll (euler, ltp to body)
sv_theta = 7 # pitch (euler, ltp to body)
sv_psi = 8 # yaw (euler, ltp to body)
sv_p = 9 # rotational vel body x
sv_q = 10 # rotational vel body y
sv_r = 11 # rotational vel body z
sv_size = 12
"""
Components of the input vector
"""
iv_da = 0
iv_de = 1
iv_dr = 2
iv_size = 4
def get_aero_to_body(X):
"""
computes the aero to body rotation matix
"""
ca = math.cos(X[sv_alpha]); sa = math.sin(X[sv_alpha])
cb = math.cos(X[sv_beta]); sb = math.sin(X[sv_beta])
return np.array([[ca*cb, -ca*sb, -sa],
[sb , cb , 0.],
[sa*cb, -sa*sb, ca]])
def get_f_eng_body(X, U, P):
"""
return propulsion forces expressed in body frame
"""
rho = patm.get_rho(-X[sv_z])
f_engines_body = np.zeros((P.eng_nb, 3))
for i in range(0, P.eng_nb):
thrust = U[i]*P.fmaxs[i]*math.pow((rho/P.rhois[i]),P.nrhos[i])*math.pow((X[sv_v]/P.Vis[i]),P.nVs[i])
f_engines_body[i] = np.dot(P.eng_to_body[i], np.array([thrust, 0., 0.]))
return f_engines_body
def get_f_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic forces expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
CL = P.CL0 + P.CL_alpha*d_alpha + P.CL_beta*X[sv_beta] +\
np.dot(P.CL_omega,rvel) + np.dot(P.CL_sfc,Usfc)
CD = P.CD0 + P.CD_k1*CL + P.CD_k2*(CL**2) + np.dot(P.CD_sfc,Usfc)
CY = P.CY_alpha*d_alpha + P.CY_beta*X[sv_beta] +\
np.dot(P.CY_omega,rvel) + np.dot(P.CY_sfc,Usfc)
return Pdyn*P.Sref*np.dot(get_aero_to_body(X),[-CD, CY, -CL])
def get_m_eng_body(f_eng_body, P):
"""
return propulsion moments expressed in body frame
"""
m = np.zeros(3)
for i in range(0, P.eng_nb):
m += np.cross(P.eng_pos[i], f_eng_body[i])
return m
def get_m_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic moments expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
Cl = P.Cl_alpha*d_alpha + P.Cl_beta*X[sv_beta] +\
np.dot(P.Cl_omega,rvel) + np.dot(P.Cl_sfc,Usfc)
Cm = P.Cm0 + P.Cm_alpha*d_alpha + P.Cm_beta*X[sv_beta] +\
np.dot(P.Cm_omega,rvel) + np.dot(P.Cm_sfc,Usfc)
Cn = P.Cn_alpha*d_alpha + P.Cn_beta*X[sv_beta] +\
np.dot(P.Cn_omega,rvel) + np.dot(P.Cn_sfc,Usfc)
return Pdyn*P.Sref*np.array([Cl*P.Bref, Cm*P.Cref, Cn*P.Bref])
def dyn(X, t, U, P):
"""
Dynamic model
"""
rho = patm.get_rho(-X[sv_z])
Pdyn = 0.5*rho*X[sv_v]**2
Ueng = U[0:P.eng_nb] # engines part of input vector
Usfc = U[P.eng_nb:P.eng_nb+P.sfc_nb] # control surfaces part of input vector
X_rvel_body = X[sv_p:sv_r+1] # body rotational velocities
X_euler = X[sv_phi:sv_psi+1] # euler angles
# Newton for forces in body frame
f_aero_body = get_f_aero_body(X, Usfc, P, Pdyn)
f_eng_body = get_f_eng_body(X, Ueng, P)
earth_to_body = pal.rmat_of_euler(X_euler)
f_weight_body = np.dot(earth_to_body, [0., 0., P.m*P.g])
forces_body = f_aero_body + np.sum(f_eng_body, axis=0) + f_weight_body
vel_body = np.dot(get_aero_to_body(X), [X[sv_v], 0., 0.]) # u, v, w
accel_body = 1./P.m*forces_body - np.cross(X_rvel_body, vel_body)
# Newton for moments in body frame
m_aero_body = get_m_aero_body(X, Usfc, P, Pdyn)
m_eng_body = get_m_eng_body(f_eng_body, P)
raccel_body = np.dot(P.invI, m_aero_body + m_eng_body - np.cross(X_rvel_body, np.dot(P.I, X_rvel_body)))
Xdot = np.zeros(sv_size)
Xdot[sv_x:sv_z+1] = np.dot(np.transpose(earth_to_body), vel_body)
Xdot[sv_v] = np.inner(vel_body, accel_body)/X[sv_v]
u, v, w = vel_body
ud, vd, wd = accel_body
Xdot[sv_alpha] = (u*wd - w*ud)/(u**2+w**2)
Xdot[sv_beta] = (X[sv_v]*vd - v*Xdot[sv_v]) / X[sv_v] / math.sqrt(u**2+w**2)
Xdot[sv_phi:sv_psi+1] = pal.euler_derivatives(X_euler, X_rvel_body)
Xdot[sv_p:sv_r+1] = raccel_body
return Xdot
def trim(P, args=None, debug=False):
"""
Find throttle, elevator and angle of attack corresponding
to the given airspeed and and flight path
"""
if args<>None:
va, gamma, h = (args['va'], args['gamma'], args['h'] )
else:
va, gamma, h = (P.Vref, 0., 0.)
if debug:
print "searching for constant path trajectory with"
print " va {:f} m/s".format(va)
print " gamma {:f} deg".format(pu.deg_of_rad(gamma))
def err_func((throttle, elevator, alpha)):
X=[0., 0., -h, va, alpha, 0., 0., gamma+alpha, 0., 0., 0., 0.]
U = np.zeros(P.input_nb)
U[0:P.eng_nb] = throttle; U[P.eng_nb+iv_de] = elevator
Xdot = dyn(X, 0., U, P)
Xdot_ref = [va*math.cos(gamma), 0., -va*math.sin(gamma), 0., 0., 0., 0., 0., 0., 0., 0., 0.]
return np.linalg.norm(Xdot - Xdot_ref)
p0 = [0.2, pu.rad_of_deg(2.), pu.rad_of_deg(0.)]
thr_e, ele_e, alpha_e = scipy.optimize.fmin_powell(err_func, p0, disp=debug, ftol=1e-9)
if debug:
print """result:
throttle : {:f} %
elevator : {:f} deg
angle of attack : {:f} deg""".format(100.*thr_e, pu.deg_of_rad(ele_e), pu.deg_of_rad(alpha_e))
Ue = np.zeros(P.input_nb)
Ue[0:P.eng_nb] = thr_e; Ue[P.eng_nb+iv_de] = ele_e
Xe = [va*math.cos(gamma), 0., va*math.sin(gamma), va, alpha_e, 0., 0., gamma+alpha_e, 0., 0., 0., 0.]
return Xe, Ue
import pat.vehicles.fixed_wing.dynamic_model_python_parameters
class Param(pat.vehicles.fixed_wing.dynamic_model_python_parameters.Param):
pass
class DynamicModel(dm.DynamicModel):
sv_x = sv_x # position x axis
sv_y = sv_y # position y axis
sv_z = sv_z # heigh above ground
sv_v = sv_v # airspeed
sv_alpha = sv_alpha # alpha
sv_beta = sv_beta # beta
sv_phi = sv_phi # roll (euler, ltp to body)
sv_theta = sv_theta # pitch (euler, ltp to body)
sv_psi = sv_psi # yaw (euler, ltp to body)
sv_p = sv_p # rotational vel body x
sv_q = sv_q # rotational vel body y
sv_r = sv_r # rotational vel body z
sv_size = sv_size
iv_th = 0 # throttle
iv_da = 1 # aileron
iv_de = 2 # elevator
iv_dr = 3 # rudder
iv_size = 4
# hack for multiple engines
_iv_da = 0
_iv_de = 1
_iv_dr = 2
dyn = lambda self, X, t, U, P: dyn(X, t, U, self.P)
trim = lambda self, args=None, debug=False: trim(self.P, args, debug)
def __init__(self, params=None):
print "Info: Dynamic fixed wing basic"
dm.DynamicModel.__init__(self)
if params == None: params="../config/Rcam_single_engine.xml"
self.X = np.zeros(DynamicModel.sv_size)
self.P = Param(params)
self.reset()
def name(self):
return "Fixed Wing Python Basic ({:s})".format(self.P.name)
def reset(self, X0=None):
if X0<>None: self.X = X0
else: self.X = np.array([0., 0., 0., 68., 0., 0., 0., 0., 0., 0., 0., 0.])
return self.X
def run(self, dt, U):
foo, self.X = scipy.integrate.odeint(dyn, self.X, [0, dt], args=(U, self.P, ))
return self.X
def param(self):
return str(self.P)
def iv_dth(self):
if self.P.eng_nb>1: return range(0,self.P.eng_nb)
else: return 0
def iv_da(self): return self.P.eng_nb + DynamicModel._iv_da
def iv_de(self): return self.P.eng_nb + DynamicModel._iv_de
def iv_dr(self): return self.P.eng_nb + DynamicModel._iv_dr
def input_nb(self): return self.P.input_nb
def state_SixDOFfEuclidianEuler(self):
X = np.zeros(fr.SixDOFfEuclidianEuler.size)
X[fr.SixDOFfEuclidianEuler.x:fr.SixDOFfEuclidianEuler.z+1] = self.X[sv_x:sv_z+1]
X[fr.SixDOFfEuclidianEuler.phi:fr.SixDOFfEuclidianEuler.r+1] = self.X[sv_phi:sv_r+1]
return X
def get_jacobian(self, Xe, Ue):
A,B = pu.num_jacobian(Xe, Ue, self.P, dyn)
return A, B
def state_str(self):
return """pos: {:-.2f}, {:-.2f}, {:-.2f} m
vel: {:-.2f} m/s, alpha {:-.2f}, beta {:-.2f} deg
att: {:-.2f}, {:-.2f}, {:-.2f} deg
""".format(self.X[sv_x], self.X[sv_y], self.X[sv_z],
self.X[sv_v], pu.deg_of_rad(self.X[sv_alpha]), pu.deg_of_rad(self.X[sv_beta]),
pu.deg_of_rad(self.X[sv_phi]), pu.deg_of_rad(self.X[sv_theta]), pu.deg_of_rad(self.X[sv_psi]))
def plot_trajectory(self, time, X, U=None, figure=None, window_title="Trajectory", legend=None, filename=None):
plot_trajectory(time, X, U, figure, window_title, legend, filename)
#
# Some plotting functions
#
def plot_trajectory(time, X, U=None, figure=None, window_title="Trajectory",
legend=None, filename=None):
margins=(0.04, 0.05, 0.98, 0.96, 0.20, 0.34)
figure = pu.prepare_fig(figure, window_title, figsize=(20.48, 10.24), margins=margins)
plots = [("x", "m", X[:,sv_x]), ("y", "m", X[:,sv_y]), ("z", "m", X[:,sv_z]),
("v", "m/s", X[:,sv_v]),
("$\\alpha$", "deg", pu.deg_of_rad(X[:,sv_alpha])),
("$\\beta$", "deg", pu.deg_of_rad(X[:,sv_beta])),
("$\phi$", "deg", pu.deg_of_rad(X[:,sv_phi])),
("$\\theta$", "deg", pu.deg_of_rad(X[:,sv_theta])),
("$\\psi$", "deg", pu.deg_of_rad(X[:,sv_psi])),
("$p$", "deg/s", pu.deg_of_rad(X[:,sv_p])),
("$q$", "deg/s", pu.deg_of_rad(X[:,sv_q])),
("$r$", "deg/s", pu.deg_of_rad(X[:,sv_r]))]
nrow = 5 if U<>None else 4
for i, (title, ylab, data) in enumerate(plots):
|
if legend<>None:
plt.legend(legend, loc='best')
if U<>None:
ax = figure.add_subplot(5, 3, 13)
ax.plot(time, 100*U[:, 0])
pu.decorate(ax, title="$d_{th}$", ylab="%")
ax = figure.add_subplot(5, 3, 14)
ax.plot(time, pu.deg_of_rad(U[:, iv_da+1]))
pu.decorate(ax, title="$d_a$", ylab="deg")
ax = figure.add_subplot(5, 3, 15)
ax.plot(time, pu.deg_of_rad(U[:, iv_de+1]))
pu.decorate(ax, title="$d_e$", ylab="deg")
return figure
| ax = plt.subplot(nrow, 3, i+1)
plt.plot(time, data)
pu.decorate(ax, title=title, ylab=ylab) | conditional_block |
dynamic_model_python_basic.py | #-*- coding: utf-8 -*-
#
# Copyright 2013-2014 Antoine Drouin (poinix@gmail.com)
#
# This file is part of PAT.
#
# PAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAT. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a 6dof model for a fixed wing vehicle
"""
import math
import numpy as np
import scipy.integrate
import scipy.optimize
import matplotlib.pyplot as plt
import pat.dynamic_model as dm
import pat.frames as fr
import pat.utils as pu
import pat.algebra as pal
import pat.atmosphere as patm
"""
Components of the state vector
"""
sv_x = 0 # position x axis
sv_y = 1 # position y axis
sv_z = 2 # heigh above ground
sv_v = 3 # airspeed
sv_alpha = 4 # alpha
sv_beta = 5 # beta
sv_phi = 6 # roll (euler, ltp to body)
sv_theta = 7 # pitch (euler, ltp to body)
sv_psi = 8 # yaw (euler, ltp to body)
sv_p = 9 # rotational vel body x
sv_q = 10 # rotational vel body y
sv_r = 11 # rotational vel body z
sv_size = 12
"""
Components of the input vector
"""
iv_da = 0
iv_de = 1
iv_dr = 2
iv_size = 4
def get_aero_to_body(X):
"""
computes the aero to body rotation matix
"""
ca = math.cos(X[sv_alpha]); sa = math.sin(X[sv_alpha])
cb = math.cos(X[sv_beta]); sb = math.sin(X[sv_beta])
return np.array([[ca*cb, -ca*sb, -sa],
[sb , cb , 0.],
[sa*cb, -sa*sb, ca]])
def get_f_eng_body(X, U, P):
"""
return propulsion forces expressed in body frame
"""
rho = patm.get_rho(-X[sv_z])
f_engines_body = np.zeros((P.eng_nb, 3))
for i in range(0, P.eng_nb):
thrust = U[i]*P.fmaxs[i]*math.pow((rho/P.rhois[i]),P.nrhos[i])*math.pow((X[sv_v]/P.Vis[i]),P.nVs[i])
f_engines_body[i] = np.dot(P.eng_to_body[i], np.array([thrust, 0., 0.]))
return f_engines_body
def get_f_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic forces expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
CL = P.CL0 + P.CL_alpha*d_alpha + P.CL_beta*X[sv_beta] +\
np.dot(P.CL_omega,rvel) + np.dot(P.CL_sfc,Usfc)
CD = P.CD0 + P.CD_k1*CL + P.CD_k2*(CL**2) + np.dot(P.CD_sfc,Usfc)
CY = P.CY_alpha*d_alpha + P.CY_beta*X[sv_beta] +\
np.dot(P.CY_omega,rvel) + np.dot(P.CY_sfc,Usfc)
return Pdyn*P.Sref*np.dot(get_aero_to_body(X),[-CD, CY, -CL])
def get_m_eng_body(f_eng_body, P):
"""
return propulsion moments expressed in body frame
"""
m = np.zeros(3)
for i in range(0, P.eng_nb):
m += np.cross(P.eng_pos[i], f_eng_body[i])
return m
def get_m_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic moments expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
Cl = P.Cl_alpha*d_alpha + P.Cl_beta*X[sv_beta] +\
np.dot(P.Cl_omega,rvel) + np.dot(P.Cl_sfc,Usfc)
Cm = P.Cm0 + P.Cm_alpha*d_alpha + P.Cm_beta*X[sv_beta] +\
np.dot(P.Cm_omega,rvel) + np.dot(P.Cm_sfc,Usfc)
Cn = P.Cn_alpha*d_alpha + P.Cn_beta*X[sv_beta] +\
np.dot(P.Cn_omega,rvel) + np.dot(P.Cn_sfc,Usfc)
return Pdyn*P.Sref*np.array([Cl*P.Bref, Cm*P.Cref, Cn*P.Bref])
def dyn(X, t, U, P):
"""
Dynamic model
"""
rho = patm.get_rho(-X[sv_z])
Pdyn = 0.5*rho*X[sv_v]**2
Ueng = U[0:P.eng_nb] # engines part of input vector
Usfc = U[P.eng_nb:P.eng_nb+P.sfc_nb] # control surfaces part of input vector
X_rvel_body = X[sv_p:sv_r+1] # body rotational velocities
X_euler = X[sv_phi:sv_psi+1] # euler angles
# Newton for forces in body frame
f_aero_body = get_f_aero_body(X, Usfc, P, Pdyn)
f_eng_body = get_f_eng_body(X, Ueng, P)
earth_to_body = pal.rmat_of_euler(X_euler)
f_weight_body = np.dot(earth_to_body, [0., 0., P.m*P.g])
forces_body = f_aero_body + np.sum(f_eng_body, axis=0) + f_weight_body
vel_body = np.dot(get_aero_to_body(X), [X[sv_v], 0., 0.]) # u, v, w
accel_body = 1./P.m*forces_body - np.cross(X_rvel_body, vel_body)
# Newton for moments in body frame
m_aero_body = get_m_aero_body(X, Usfc, P, Pdyn)
m_eng_body = get_m_eng_body(f_eng_body, P)
raccel_body = np.dot(P.invI, m_aero_body + m_eng_body - np.cross(X_rvel_body, np.dot(P.I, X_rvel_body)))
Xdot = np.zeros(sv_size)
Xdot[sv_x:sv_z+1] = np.dot(np.transpose(earth_to_body), vel_body)
Xdot[sv_v] = np.inner(vel_body, accel_body)/X[sv_v]
u, v, w = vel_body
ud, vd, wd = accel_body
Xdot[sv_alpha] = (u*wd - w*ud)/(u**2+w**2)
Xdot[sv_beta] = (X[sv_v]*vd - v*Xdot[sv_v]) / X[sv_v] / math.sqrt(u**2+w**2)
Xdot[sv_phi:sv_psi+1] = pal.euler_derivatives(X_euler, X_rvel_body)
Xdot[sv_p:sv_r+1] = raccel_body
return Xdot
def trim(P, args=None, debug=False):
"""
Find throttle, elevator and angle of attack corresponding
to the given airspeed and and flight path
"""
if args<>None:
va, gamma, h = (args['va'], args['gamma'], args['h'] )
else:
va, gamma, h = (P.Vref, 0., 0.)
if debug:
print "searching for constant path trajectory with"
print " va {:f} m/s".format(va)
print " gamma {:f} deg".format(pu.deg_of_rad(gamma))
def err_func((throttle, elevator, alpha)):
X=[0., 0., -h, va, alpha, 0., 0., gamma+alpha, 0., 0., 0., 0.]
U = np.zeros(P.input_nb)
U[0:P.eng_nb] = throttle; U[P.eng_nb+iv_de] = elevator
Xdot = dyn(X, 0., U, P)
Xdot_ref = [va*math.cos(gamma), 0., -va*math.sin(gamma), 0., 0., 0., 0., 0., 0., 0., 0., 0.]
return np.linalg.norm(Xdot - Xdot_ref)
p0 = [0.2, pu.rad_of_deg(2.), pu.rad_of_deg(0.)]
thr_e, ele_e, alpha_e = scipy.optimize.fmin_powell(err_func, p0, disp=debug, ftol=1e-9)
if debug:
print """result:
throttle : {:f} %
elevator : {:f} deg
angle of attack : {:f} deg""".format(100.*thr_e, pu.deg_of_rad(ele_e), pu.deg_of_rad(alpha_e))
Ue = np.zeros(P.input_nb)
Ue[0:P.eng_nb] = thr_e; Ue[P.eng_nb+iv_de] = ele_e
Xe = [va*math.cos(gamma), 0., va*math.sin(gamma), va, alpha_e, 0., 0., gamma+alpha_e, 0., 0., 0., 0.]
return Xe, Ue
import pat.vehicles.fixed_wing.dynamic_model_python_parameters
class Param(pat.vehicles.fixed_wing.dynamic_model_python_parameters.Param):
pass
class DynamicModel(dm.DynamicModel):
sv_x = sv_x # position x axis
sv_y = sv_y # position y axis
sv_z = sv_z # heigh above ground
sv_v = sv_v # airspeed
sv_alpha = sv_alpha # alpha
sv_beta = sv_beta # beta
sv_phi = sv_phi # roll (euler, ltp to body)
sv_theta = sv_theta # pitch (euler, ltp to body)
sv_psi = sv_psi # yaw (euler, ltp to body)
sv_p = sv_p # rotational vel body x
sv_q = sv_q # rotational vel body y
sv_r = sv_r # rotational vel body z
sv_size = sv_size
iv_th = 0 # throttle
iv_da = 1 # aileron
iv_de = 2 # elevator
iv_dr = 3 # rudder
iv_size = 4
# hack for multiple engines
_iv_da = 0
_iv_de = 1
_iv_dr = 2
dyn = lambda self, X, t, U, P: dyn(X, t, U, self.P)
trim = lambda self, args=None, debug=False: trim(self.P, args, debug)
def __init__(self, params=None):
print "Info: Dynamic fixed wing basic"
dm.DynamicModel.__init__(self)
if params == None: params="../config/Rcam_single_engine.xml"
self.X = np.zeros(DynamicModel.sv_size)
self.P = Param(params)
self.reset()
def name(self):
return "Fixed Wing Python Basic ({:s})".format(self.P.name)
def reset(self, X0=None):
if X0<>None: self.X = X0
else: self.X = np.array([0., 0., 0., 68., 0., 0., 0., 0., 0., 0., 0., 0.])
return self.X
def run(self, dt, U):
foo, self.X = scipy.integrate.odeint(dyn, self.X, [0, dt], args=(U, self.P, ))
return self.X
def param(self):
return str(self.P)
def iv_dth(self):
if self.P.eng_nb>1: return range(0,self.P.eng_nb)
else: return 0
def iv_da(self): return self.P.eng_nb + DynamicModel._iv_da
def iv_de(self): return self.P.eng_nb + DynamicModel._iv_de
def iv_dr(self): return self.P.eng_nb + DynamicModel._iv_dr
def input_nb(self): return self.P.input_nb
def state_SixDOFfEuclidianEuler(self):
X = np.zeros(fr.SixDOFfEuclidianEuler.size)
X[fr.SixDOFfEuclidianEuler.x:fr.SixDOFfEuclidianEuler.z+1] = self.X[sv_x:sv_z+1]
X[fr.SixDOFfEuclidianEuler.phi:fr.SixDOFfEuclidianEuler.r+1] = self.X[sv_phi:sv_r+1]
return X
def | (self, Xe, Ue):
A,B = pu.num_jacobian(Xe, Ue, self.P, dyn)
return A, B
def state_str(self):
return """pos: {:-.2f}, {:-.2f}, {:-.2f} m
vel: {:-.2f} m/s, alpha {:-.2f}, beta {:-.2f} deg
att: {:-.2f}, {:-.2f}, {:-.2f} deg
""".format(self.X[sv_x], self.X[sv_y], self.X[sv_z],
self.X[sv_v], pu.deg_of_rad(self.X[sv_alpha]), pu.deg_of_rad(self.X[sv_beta]),
pu.deg_of_rad(self.X[sv_phi]), pu.deg_of_rad(self.X[sv_theta]), pu.deg_of_rad(self.X[sv_psi]))
def plot_trajectory(self, time, X, U=None, figure=None, window_title="Trajectory", legend=None, filename=None):
plot_trajectory(time, X, U, figure, window_title, legend, filename)
#
# Some plotting functions
#
def plot_trajectory(time, X, U=None, figure=None, window_title="Trajectory",
legend=None, filename=None):
margins=(0.04, 0.05, 0.98, 0.96, 0.20, 0.34)
figure = pu.prepare_fig(figure, window_title, figsize=(20.48, 10.24), margins=margins)
plots = [("x", "m", X[:,sv_x]), ("y", "m", X[:,sv_y]), ("z", "m", X[:,sv_z]),
("v", "m/s", X[:,sv_v]),
("$\\alpha$", "deg", pu.deg_of_rad(X[:,sv_alpha])),
("$\\beta$", "deg", pu.deg_of_rad(X[:,sv_beta])),
("$\phi$", "deg", pu.deg_of_rad(X[:,sv_phi])),
("$\\theta$", "deg", pu.deg_of_rad(X[:,sv_theta])),
("$\\psi$", "deg", pu.deg_of_rad(X[:,sv_psi])),
("$p$", "deg/s", pu.deg_of_rad(X[:,sv_p])),
("$q$", "deg/s", pu.deg_of_rad(X[:,sv_q])),
("$r$", "deg/s", pu.deg_of_rad(X[:,sv_r]))]
nrow = 5 if U<>None else 4
for i, (title, ylab, data) in enumerate(plots):
ax = plt.subplot(nrow, 3, i+1)
plt.plot(time, data)
pu.decorate(ax, title=title, ylab=ylab)
if legend<>None:
plt.legend(legend, loc='best')
if U<>None:
ax = figure.add_subplot(5, 3, 13)
ax.plot(time, 100*U[:, 0])
pu.decorate(ax, title="$d_{th}$", ylab="%")
ax = figure.add_subplot(5, 3, 14)
ax.plot(time, pu.deg_of_rad(U[:, iv_da+1]))
pu.decorate(ax, title="$d_a$", ylab="deg")
ax = figure.add_subplot(5, 3, 15)
ax.plot(time, pu.deg_of_rad(U[:, iv_de+1]))
pu.decorate(ax, title="$d_e$", ylab="deg")
return figure
| get_jacobian | identifier_name |
dynamic_model_python_basic.py | #-*- coding: utf-8 -*-
#
# Copyright 2013-2014 Antoine Drouin (poinix@gmail.com)
#
# This file is part of PAT.
#
# PAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAT. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a 6dof model for a fixed wing vehicle
"""
import math
import numpy as np
import scipy.integrate
import scipy.optimize
import matplotlib.pyplot as plt
import pat.dynamic_model as dm
import pat.frames as fr
import pat.utils as pu
import pat.algebra as pal
import pat.atmosphere as patm
"""
Components of the state vector
"""
sv_x = 0 # position x axis
sv_y = 1 # position y axis
sv_z = 2 # heigh above ground
sv_v = 3 # airspeed
sv_alpha = 4 # alpha
sv_beta = 5 # beta
sv_phi = 6 # roll (euler, ltp to body)
sv_theta = 7 # pitch (euler, ltp to body)
sv_psi = 8 # yaw (euler, ltp to body)
sv_p = 9 # rotational vel body x
sv_q = 10 # rotational vel body y
sv_r = 11 # rotational vel body z
sv_size = 12
"""
Components of the input vector
"""
iv_da = 0
iv_de = 1
iv_dr = 2
iv_size = 4
def get_aero_to_body(X):
"""
computes the aero to body rotation matix
"""
ca = math.cos(X[sv_alpha]); sa = math.sin(X[sv_alpha])
cb = math.cos(X[sv_beta]); sb = math.sin(X[sv_beta])
return np.array([[ca*cb, -ca*sb, -sa],
[sb , cb , 0.],
[sa*cb, -sa*sb, ca]])
def get_f_eng_body(X, U, P):
"""
return propulsion forces expressed in body frame
"""
rho = patm.get_rho(-X[sv_z])
f_engines_body = np.zeros((P.eng_nb, 3))
for i in range(0, P.eng_nb):
thrust = U[i]*P.fmaxs[i]*math.pow((rho/P.rhois[i]),P.nrhos[i])*math.pow((X[sv_v]/P.Vis[i]),P.nVs[i])
f_engines_body[i] = np.dot(P.eng_to_body[i], np.array([thrust, 0., 0.]))
return f_engines_body
def get_f_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic forces expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
CL = P.CL0 + P.CL_alpha*d_alpha + P.CL_beta*X[sv_beta] +\
np.dot(P.CL_omega,rvel) + np.dot(P.CL_sfc,Usfc)
CD = P.CD0 + P.CD_k1*CL + P.CD_k2*(CL**2) + np.dot(P.CD_sfc,Usfc)
CY = P.CY_alpha*d_alpha + P.CY_beta*X[sv_beta] +\
np.dot(P.CY_omega,rvel) + np.dot(P.CY_sfc,Usfc)
return Pdyn*P.Sref*np.dot(get_aero_to_body(X),[-CD, CY, -CL])
def get_m_eng_body(f_eng_body, P):
"""
return propulsion moments expressed in body frame
"""
m = np.zeros(3)
for i in range(0, P.eng_nb):
m += np.cross(P.eng_pos[i], f_eng_body[i])
return m
def get_m_aero_body(X, Usfc, P, Pdyn):
"""
return aerodynamic moments expressed in body frame
"""
d_alpha = X[sv_alpha] - P.alpha0
rvel = X[sv_p:sv_r+1]*np.array([P.Bref, P.Cref, P.Bref])/2/P.Vref
Cl = P.Cl_alpha*d_alpha + P.Cl_beta*X[sv_beta] +\
np.dot(P.Cl_omega,rvel) + np.dot(P.Cl_sfc,Usfc)
Cm = P.Cm0 + P.Cm_alpha*d_alpha + P.Cm_beta*X[sv_beta] +\
np.dot(P.Cm_omega,rvel) + np.dot(P.Cm_sfc,Usfc)
Cn = P.Cn_alpha*d_alpha + P.Cn_beta*X[sv_beta] +\
np.dot(P.Cn_omega,rvel) + np.dot(P.Cn_sfc,Usfc)
return Pdyn*P.Sref*np.array([Cl*P.Bref, Cm*P.Cref, Cn*P.Bref])
def dyn(X, t, U, P):
"""
Dynamic model
"""
rho = patm.get_rho(-X[sv_z])
Pdyn = 0.5*rho*X[sv_v]**2
Ueng = U[0:P.eng_nb] # engines part of input vector
Usfc = U[P.eng_nb:P.eng_nb+P.sfc_nb] # control surfaces part of input vector
X_rvel_body = X[sv_p:sv_r+1] # body rotational velocities
X_euler = X[sv_phi:sv_psi+1] # euler angles
# Newton for forces in body frame
f_aero_body = get_f_aero_body(X, Usfc, P, Pdyn)
f_eng_body = get_f_eng_body(X, Ueng, P)
earth_to_body = pal.rmat_of_euler(X_euler)
f_weight_body = np.dot(earth_to_body, [0., 0., P.m*P.g])
forces_body = f_aero_body + np.sum(f_eng_body, axis=0) + f_weight_body
vel_body = np.dot(get_aero_to_body(X), [X[sv_v], 0., 0.]) # u, v, w
accel_body = 1./P.m*forces_body - np.cross(X_rvel_body, vel_body)
# Newton for moments in body frame
m_aero_body = get_m_aero_body(X, Usfc, P, Pdyn)
m_eng_body = get_m_eng_body(f_eng_body, P)
raccel_body = np.dot(P.invI, m_aero_body + m_eng_body - np.cross(X_rvel_body, np.dot(P.I, X_rvel_body)))
Xdot = np.zeros(sv_size)
Xdot[sv_x:sv_z+1] = np.dot(np.transpose(earth_to_body), vel_body)
Xdot[sv_v] = np.inner(vel_body, accel_body)/X[sv_v]
u, v, w = vel_body
ud, vd, wd = accel_body
Xdot[sv_alpha] = (u*wd - w*ud)/(u**2+w**2)
Xdot[sv_beta] = (X[sv_v]*vd - v*Xdot[sv_v]) / X[sv_v] / math.sqrt(u**2+w**2)
Xdot[sv_phi:sv_psi+1] = pal.euler_derivatives(X_euler, X_rvel_body)
Xdot[sv_p:sv_r+1] = raccel_body
return Xdot
def trim(P, args=None, debug=False):
"""
Find throttle, elevator and angle of attack corresponding
to the given airspeed and and flight path
"""
if args<>None:
va, gamma, h = (args['va'], args['gamma'], args['h'] )
else:
va, gamma, h = (P.Vref, 0., 0.)
if debug:
print "searching for constant path trajectory with"
print " va {:f} m/s".format(va)
print " gamma {:f} deg".format(pu.deg_of_rad(gamma)) | Xdot = dyn(X, 0., U, P)
Xdot_ref = [va*math.cos(gamma), 0., -va*math.sin(gamma), 0., 0., 0., 0., 0., 0., 0., 0., 0.]
return np.linalg.norm(Xdot - Xdot_ref)
p0 = [0.2, pu.rad_of_deg(2.), pu.rad_of_deg(0.)]
thr_e, ele_e, alpha_e = scipy.optimize.fmin_powell(err_func, p0, disp=debug, ftol=1e-9)
if debug:
print """result:
throttle : {:f} %
elevator : {:f} deg
angle of attack : {:f} deg""".format(100.*thr_e, pu.deg_of_rad(ele_e), pu.deg_of_rad(alpha_e))
Ue = np.zeros(P.input_nb)
Ue[0:P.eng_nb] = thr_e; Ue[P.eng_nb+iv_de] = ele_e
Xe = [va*math.cos(gamma), 0., va*math.sin(gamma), va, alpha_e, 0., 0., gamma+alpha_e, 0., 0., 0., 0.]
return Xe, Ue
import pat.vehicles.fixed_wing.dynamic_model_python_parameters
class Param(pat.vehicles.fixed_wing.dynamic_model_python_parameters.Param):
pass
class DynamicModel(dm.DynamicModel):
sv_x = sv_x # position x axis
sv_y = sv_y # position y axis
sv_z = sv_z # heigh above ground
sv_v = sv_v # airspeed
sv_alpha = sv_alpha # alpha
sv_beta = sv_beta # beta
sv_phi = sv_phi # roll (euler, ltp to body)
sv_theta = sv_theta # pitch (euler, ltp to body)
sv_psi = sv_psi # yaw (euler, ltp to body)
sv_p = sv_p # rotational vel body x
sv_q = sv_q # rotational vel body y
sv_r = sv_r # rotational vel body z
sv_size = sv_size
iv_th = 0 # throttle
iv_da = 1 # aileron
iv_de = 2 # elevator
iv_dr = 3 # rudder
iv_size = 4
# hack for multiple engines
_iv_da = 0
_iv_de = 1
_iv_dr = 2
dyn = lambda self, X, t, U, P: dyn(X, t, U, self.P)
trim = lambda self, args=None, debug=False: trim(self.P, args, debug)
def __init__(self, params=None):
print "Info: Dynamic fixed wing basic"
dm.DynamicModel.__init__(self)
if params == None: params="../config/Rcam_single_engine.xml"
self.X = np.zeros(DynamicModel.sv_size)
self.P = Param(params)
self.reset()
def name(self):
return "Fixed Wing Python Basic ({:s})".format(self.P.name)
def reset(self, X0=None):
if X0<>None: self.X = X0
else: self.X = np.array([0., 0., 0., 68., 0., 0., 0., 0., 0., 0., 0., 0.])
return self.X
def run(self, dt, U):
foo, self.X = scipy.integrate.odeint(dyn, self.X, [0, dt], args=(U, self.P, ))
return self.X
def param(self):
return str(self.P)
def iv_dth(self):
if self.P.eng_nb>1: return range(0,self.P.eng_nb)
else: return 0
def iv_da(self): return self.P.eng_nb + DynamicModel._iv_da
def iv_de(self): return self.P.eng_nb + DynamicModel._iv_de
def iv_dr(self): return self.P.eng_nb + DynamicModel._iv_dr
def input_nb(self): return self.P.input_nb
def state_SixDOFfEuclidianEuler(self):
X = np.zeros(fr.SixDOFfEuclidianEuler.size)
X[fr.SixDOFfEuclidianEuler.x:fr.SixDOFfEuclidianEuler.z+1] = self.X[sv_x:sv_z+1]
X[fr.SixDOFfEuclidianEuler.phi:fr.SixDOFfEuclidianEuler.r+1] = self.X[sv_phi:sv_r+1]
return X
def get_jacobian(self, Xe, Ue):
A,B = pu.num_jacobian(Xe, Ue, self.P, dyn)
return A, B
def state_str(self):
return """pos: {:-.2f}, {:-.2f}, {:-.2f} m
vel: {:-.2f} m/s, alpha {:-.2f}, beta {:-.2f} deg
att: {:-.2f}, {:-.2f}, {:-.2f} deg
""".format(self.X[sv_x], self.X[sv_y], self.X[sv_z],
self.X[sv_v], pu.deg_of_rad(self.X[sv_alpha]), pu.deg_of_rad(self.X[sv_beta]),
pu.deg_of_rad(self.X[sv_phi]), pu.deg_of_rad(self.X[sv_theta]), pu.deg_of_rad(self.X[sv_psi]))
def plot_trajectory(self, time, X, U=None, figure=None, window_title="Trajectory", legend=None, filename=None):
plot_trajectory(time, X, U, figure, window_title, legend, filename)
#
# Some plotting functions
#
def plot_trajectory(time, X, U=None, figure=None, window_title="Trajectory",
legend=None, filename=None):
margins=(0.04, 0.05, 0.98, 0.96, 0.20, 0.34)
figure = pu.prepare_fig(figure, window_title, figsize=(20.48, 10.24), margins=margins)
plots = [("x", "m", X[:,sv_x]), ("y", "m", X[:,sv_y]), ("z", "m", X[:,sv_z]),
("v", "m/s", X[:,sv_v]),
("$\\alpha$", "deg", pu.deg_of_rad(X[:,sv_alpha])),
("$\\beta$", "deg", pu.deg_of_rad(X[:,sv_beta])),
("$\phi$", "deg", pu.deg_of_rad(X[:,sv_phi])),
("$\\theta$", "deg", pu.deg_of_rad(X[:,sv_theta])),
("$\\psi$", "deg", pu.deg_of_rad(X[:,sv_psi])),
("$p$", "deg/s", pu.deg_of_rad(X[:,sv_p])),
("$q$", "deg/s", pu.deg_of_rad(X[:,sv_q])),
("$r$", "deg/s", pu.deg_of_rad(X[:,sv_r]))]
nrow = 5 if U<>None else 4
for i, (title, ylab, data) in enumerate(plots):
ax = plt.subplot(nrow, 3, i+1)
plt.plot(time, data)
pu.decorate(ax, title=title, ylab=ylab)
if legend<>None:
plt.legend(legend, loc='best')
if U<>None:
ax = figure.add_subplot(5, 3, 13)
ax.plot(time, 100*U[:, 0])
pu.decorate(ax, title="$d_{th}$", ylab="%")
ax = figure.add_subplot(5, 3, 14)
ax.plot(time, pu.deg_of_rad(U[:, iv_da+1]))
pu.decorate(ax, title="$d_a$", ylab="deg")
ax = figure.add_subplot(5, 3, 15)
ax.plot(time, pu.deg_of_rad(U[:, iv_de+1]))
pu.decorate(ax, title="$d_e$", ylab="deg")
return figure |
def err_func((throttle, elevator, alpha)):
X=[0., 0., -h, va, alpha, 0., 0., gamma+alpha, 0., 0., 0., 0.]
U = np.zeros(P.input_nb)
U[0:P.eng_nb] = throttle; U[P.eng_nb+iv_de] = elevator | random_line_split |
ibazel_test.go | // Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibazel
import (
"bytes"
"fmt"
"os"
"reflect"
"runtime"
"runtime/debug"
"syscall"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/bazelbuild/bazel-watcher/internal/bazel"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/command"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/fswatcher/common"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/log"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/workspace"
mock_bazel "github.com/bazelbuild/bazel-watcher/internal/bazel/testing"
analysispb "github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/analysis"
blaze_query "github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/blaze_query"
)
type fakeFSNotifyWatcher struct {
ErrorChan chan error
EventChan chan common.Event
}
var _ common.Watcher = &fakeFSNotifyWatcher{}
func (w *fakeFSNotifyWatcher) Close() error { return nil }
func (w *fakeFSNotifyWatcher) UpdateAll(names []string) error { return nil }
func (w *fakeFSNotifyWatcher) Events() chan common.Event { return w.EventChan }
var oldCommandDefaultCommand = command.DefaultCommand
func assertEqual(t *testing.T, want, got interface{}, msg string) {
if !reflect.DeepEqual(want, got) {
t.Errorf("Wanted %s, got %s. %s", want, got, msg)
debug.PrintStack()
}
}
func assertOsExited(t *testing.T, osExitChan chan int) {
select {
case exitCode := <-osExitChan:
assertEqual(t, 3, exitCode, "Should have exited ibazel")
case <-time.After(time.Second):
t.Errorf("It should have os.Exit'd")
debug.PrintStack()
}
}
func assertNotOsExited(t *testing.T, osExitChan chan int) {
select {
case <-osExitChan:
t.Errorf("It shouldn't have os.Exit'd")
debug.PrintStack()
case <-time.After(time.Second):
// works as expected
}
}
type mockCommand struct {
startupArgs []string
bazelArgs []string
target string
args []string
notifiedOfChanges bool
started bool
terminated bool
signalChan chan syscall.Signal
doTermChan chan struct{}
didTermChan chan struct{}
}
func (m *mockCommand) Start() (*bytes.Buffer, error) {
if m.started {
panic("Can't run command twice")
}
m.started = true
return nil, nil
}
func (m *mockCommand) NotifyOfChanges() *bytes.Buffer {
m.notifiedOfChanges = true
return nil
}
func (m *mockCommand) Terminate() {
if !m.started |
m.signalChan <- syscall.SIGTERM
<-m.doTermChan
m.terminated = true
m.didTermChan <- struct{}{}
}
func (m *mockCommand) Kill() {
if !m.started {
panic("Sending kill signal before terminating")
}
m.signalChan <- syscall.SIGKILL
}
func (m *mockCommand) assertTerminated(t *testing.T) {
select {
case <-m.didTermChan:
// works as expected
case <-time.After(time.Second):
t.Errorf("A process wasn't terminated within assert timeout")
debug.PrintStack()
}
}
func (m *mockCommand) assertSignal(t *testing.T, signum syscall.Signal) {
if <-m.signalChan != signum {
t.Errorf("An incorrect signal was used to terminate a process")
debug.PrintStack()
}
}
func (m *mockCommand) IsSubprocessRunning() bool {
return m.started && !m.terminated
}
func getMockCommand(i *IBazel) *mockCommand {
c, ok := i.cmd.(*mockCommand)
if !ok {
panic(fmt.Sprintf("Unable to cast i.cmd to a mockCommand. Was: %v", i.cmd))
}
return c
}
func init() {
commandDefaultCommand = func(startupArgs []string, bazelArgs []string, target string, args []string) command.Command {
// Don't do anything
return &mockCommand{
startupArgs: startupArgs,
bazelArgs: bazelArgs,
target: target,
args: args,
}
}
}
func newIBazel(t *testing.T) (*IBazel, *mock_bazel.MockBazel) {
mockBazel := &mock_bazel.MockBazel{}
bazelNew = func() bazel.Bazel {
return mockBazel
}
i, err := New("testing")
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.workspaceFinder = &workspace.FakeWorkspace{}
return i, mockBazel
}
func TestIBazelLifecycle(t *testing.T) {
log.SetTesting(t)
i, _ := newIBazel(t)
i.Cleanup()
// Now inspect private API. If things weren't closed properly this will block
// and the test will timeout.
<-i.sourceFileWatcher.Events()
<-i.buildFileWatcher.Events()
}
func TestIBazelLoop(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
mockBazel.AddQueryResponse("buildfiles(deps(set(//my:target)))", &blaze_query.QueryResult{})
mockBazel.AddQueryResponse("kind('source file', deps(set(//my:target)))", &blaze_query.QueryResult{})
// Replace the file watching channel with one that has a buffer.
i.buildFileWatcher = &fakeFSNotifyWatcher{
EventChan: make(chan common.Event, 1),
}
defer i.Cleanup()
// The process for testing this is going to be to emit events to the channels
// that are associated with these objects and walk the state transition
// graph.
// First let's consume all the events from all the channels we care about
called := false
command := func(targets ...string) (*bytes.Buffer, error) {
called = true
return nil, nil
}
i.state = QUERY
step := func() {
i.iteration("demo", command, []string{}, "//my:target")
}
assertRun := func() {
t.Helper()
if called == false {
_, file, line, _ := runtime.Caller(1) // decorate + log + public function.
t.Errorf("%s:%v Should have run the provided comand", file, line)
}
called = false
}
assertState := func(state State) {
t.Helper()
if i.state != state {
_, file, line, _ := runtime.Caller(1) // decorate + log + public function.
t.Errorf("%s:%v Expected state to be %s but was %s", file, line, state, i.state)
}
}
// Pretend a fairly normal event chain happens.
// Start, run the program, write a source file, run, write a build file, run.
assertState(QUERY)
step()
i.filesWatched[i.buildFileWatcher] = map[string]struct{}{"/path/to/BUILD": {}}
i.filesWatched[i.sourceFileWatcher] = map[string]struct{}{"/path/to/foo": {}}
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
// Source file change.
go func() { i.sourceFileWatcher.Events() <- common.Event{Op: common.Write, Name: "/path/to/foo"} }()
step()
assertState(DEBOUNCE_RUN)
step()
// Don't send another event in to test the timer
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
// Build file change.
i.buildFileWatcher.Events() <- common.Event{Op: common.Write, Name: "/path/to/BUILD"}
step()
assertState(DEBOUNCE_QUERY)
// Don't send another event in to test the timer
step()
assertState(QUERY)
step()
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
}
func TestIBazelBuild(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
defer i.Cleanup()
mockBazel.AddQueryResponse("//path/to:target", &blaze_query.QueryResult{
Target: []*blaze_query.Target{
{
Type: blaze_query.Target_RULE.Enum(),
Rule: &blaze_query.Rule{
Name: proto.String("//path/to:target"),
Attribute: []*blaze_query.Attribute{
{Name: proto.String("name")},
},
},
},
},
})
i.build("//path/to:target")
expected := [][]string{
{"SetStartupArgs"},
{"SetArguments"},
{"Info"},
{"SetStartupArgs"},
{"SetArguments"},
{"Cancel"},
{"WriteToStderr", "true"},
{"WriteToStdout", "true"},
{"Build", "//path/to:target"},
}
mockBazel.AssertActions(t, expected)
}
func TestIBazelTest(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
defer i.Cleanup()
mockBazel.AddCQueryResponse("//path/to:target", &analysispb.CqueryResult{
Results: []*analysispb.ConfiguredTarget{{
Target: &blaze_query.Target{
Type: blaze_query.Target_RULE.Enum(),
Rule: &blaze_query.Rule{
Name: proto.String("//path/to:target"),
Attribute: []*blaze_query.Attribute{
{Name: proto.String("name")},
},
},
},
}},
})
i.test("//path/to:target")
expected := [][]string{
{"SetStartupArgs"},
{"SetArguments"},
{"Info"},
{"SetStartupArgs"},
{"SetArguments"},
{"SetStartupArgs"},
{"SetArguments"},
{"WriteToStderr", "false"},
{"WriteToStdout", "false"},
{"CQuery", "//path/to:target"},
{"SetArguments", "--test_output=streamed"},
{"Cancel"},
{"WriteToStderr", "true"},
{"WriteToStdout", "true"},
{"Test", "//path/to:target"},
}
mockBazel.AssertActions(t, expected)
}
func TestIBazelRun_notifyPreexistiingJobWhenStarting(t *testing.T) {
log.SetTesting(t)
commandDefaultCommand = func(startupArgs []string, bazelArgs []string, target string, args []string) command.Command {
assertEqual(t, startupArgs, []string{}, "Startup args")
assertEqual(t, bazelArgs, []string{}, "Bazel args")
assertEqual(t, target, "", "Target")
assertEqual(t, args, []string{}, "Args")
return &mockCommand{}
}
defer func() { commandDefaultCommand = oldCommandDefaultCommand }()
i, _ := newIBazel(t)
defer i.Cleanup()
i.args = []string{"--do_it"}
cmd := &mockCommand{
notifiedOfChanges: false,
}
i.cmd = cmd
path := "//path/to:target"
i.run(path)
if !cmd.notifiedOfChanges {
t.Errorf("The previously running command was not notified of changes")
}
}
func TestHandleSignals_SIGINTWithoutRunningCommand(t *testing.T) {
log.SetTesting(t)
log.FakeExit()
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
assertEqual(t, i.cmd, nil, "There shouldn't be a subprocess running")
// SIGINT without a running command should attempt to exit
i.sigs <- syscall.SIGINT
i.handleSignals()
// Goroutine tests are kind of racey
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTNormalTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertNotOsExited(t, osExitChan)
// Second ctrl-c terminates ibazel
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTForcefulTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
assertNotOsExited(t, osExitChan)
// Second ctrl-c sends SIGKILL
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGKILL)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertNotOsExited(t, osExitChan)
// Yet another ctrl-c terminates ibazel
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTHitLimitTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
assertNotOsExited(t, osExitChan)
// Second ctrl-c sends SIGKILL
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGKILL)
assertNotOsExited(t, osExitChan)
// Third ctrl-c terminates ibazel even if the subprocess is not closed
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGTERM(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
i.sigs <- syscall.SIGTERM
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertOsExited(t, osExitChan)
}
func TestParseTarget(t *testing.T) {
log.SetTesting(t)
tests := []struct {
in string
repo string
target string
}{
{"@//my:target", "", "my:target"},
{"@repo//my:target", "repo", "my:target"},
{"@bazel_tools//:strange/target", "bazel_tools", ":strange/target"},
}
for _, test := range tests {
t.Run(test.in, func(t *testing.T) {
gotRepo, gotTarget := parseTarget(test.in)
if gotRepo != test.repo {
t.Errorf("parseTarget(%q).repo = %q, want %q", test.in, gotRepo, test.repo)
}
if gotTarget != test.target {
t.Errorf("parseTarget(%q).target = %q, want %q", test.in, gotTarget, test.target)
}
})
}
}
| {
panic("Terminated before starting")
} | conditional_block |
ibazel_test.go | // Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibazel
import (
"bytes"
"fmt"
"os"
"reflect"
"runtime"
"runtime/debug"
"syscall"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/bazelbuild/bazel-watcher/internal/bazel"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/command"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/fswatcher/common"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/log"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/workspace"
mock_bazel "github.com/bazelbuild/bazel-watcher/internal/bazel/testing"
analysispb "github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/analysis"
blaze_query "github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/blaze_query"
)
type fakeFSNotifyWatcher struct {
ErrorChan chan error
EventChan chan common.Event
}
var _ common.Watcher = &fakeFSNotifyWatcher{}
func (w *fakeFSNotifyWatcher) Close() error { return nil }
func (w *fakeFSNotifyWatcher) UpdateAll(names []string) error { return nil }
func (w *fakeFSNotifyWatcher) Events() chan common.Event { return w.EventChan }
var oldCommandDefaultCommand = command.DefaultCommand
func assertEqual(t *testing.T, want, got interface{}, msg string) {
if !reflect.DeepEqual(want, got) {
t.Errorf("Wanted %s, got %s. %s", want, got, msg)
debug.PrintStack()
}
}
func assertOsExited(t *testing.T, osExitChan chan int) {
select {
case exitCode := <-osExitChan:
assertEqual(t, 3, exitCode, "Should have exited ibazel")
case <-time.After(time.Second):
t.Errorf("It should have os.Exit'd")
debug.PrintStack()
}
}
func assertNotOsExited(t *testing.T, osExitChan chan int) {
select {
case <-osExitChan:
t.Errorf("It shouldn't have os.Exit'd")
debug.PrintStack()
case <-time.After(time.Second):
// works as expected
}
}
type mockCommand struct {
startupArgs []string
bazelArgs []string
target string
args []string
notifiedOfChanges bool
started bool
terminated bool
signalChan chan syscall.Signal
doTermChan chan struct{}
didTermChan chan struct{}
}
func (m *mockCommand) Start() (*bytes.Buffer, error) {
if m.started {
panic("Can't run command twice")
}
m.started = true
return nil, nil
}
func (m *mockCommand) NotifyOfChanges() *bytes.Buffer {
m.notifiedOfChanges = true
return nil
}
func (m *mockCommand) Terminate() {
if !m.started {
panic("Terminated before starting")
}
m.signalChan <- syscall.SIGTERM
<-m.doTermChan
m.terminated = true
m.didTermChan <- struct{}{}
}
func (m *mockCommand) Kill() {
if !m.started {
panic("Sending kill signal before terminating")
}
m.signalChan <- syscall.SIGKILL
}
func (m *mockCommand) assertTerminated(t *testing.T) {
select {
case <-m.didTermChan:
// works as expected
case <-time.After(time.Second):
t.Errorf("A process wasn't terminated within assert timeout")
debug.PrintStack()
}
}
func (m *mockCommand) assertSignal(t *testing.T, signum syscall.Signal) {
if <-m.signalChan != signum {
t.Errorf("An incorrect signal was used to terminate a process")
debug.PrintStack()
}
}
func (m *mockCommand) IsSubprocessRunning() bool {
return m.started && !m.terminated
}
func getMockCommand(i *IBazel) *mockCommand {
c, ok := i.cmd.(*mockCommand)
if !ok {
panic(fmt.Sprintf("Unable to cast i.cmd to a mockCommand. Was: %v", i.cmd))
}
return c
}
func init() {
commandDefaultCommand = func(startupArgs []string, bazelArgs []string, target string, args []string) command.Command {
// Don't do anything
return &mockCommand{
startupArgs: startupArgs,
bazelArgs: bazelArgs,
target: target,
args: args,
}
}
}
func newIBazel(t *testing.T) (*IBazel, *mock_bazel.MockBazel) {
mockBazel := &mock_bazel.MockBazel{}
bazelNew = func() bazel.Bazel {
return mockBazel
}
i, err := New("testing")
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.workspaceFinder = &workspace.FakeWorkspace{}
return i, mockBazel
}
func TestIBazelLifecycle(t *testing.T) {
log.SetTesting(t)
i, _ := newIBazel(t)
i.Cleanup()
// Now inspect private API. If things weren't closed properly this will block
// and the test will timeout.
<-i.sourceFileWatcher.Events()
<-i.buildFileWatcher.Events()
}
func TestIBazelLoop(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
mockBazel.AddQueryResponse("buildfiles(deps(set(//my:target)))", &blaze_query.QueryResult{})
mockBazel.AddQueryResponse("kind('source file', deps(set(//my:target)))", &blaze_query.QueryResult{})
// Replace the file watching channel with one that has a buffer.
i.buildFileWatcher = &fakeFSNotifyWatcher{
EventChan: make(chan common.Event, 1),
}
defer i.Cleanup()
// The process for testing this is going to be to emit events to the channels
// that are associated with these objects and walk the state transition
// graph.
// First let's consume all the events from all the channels we care about
called := false
command := func(targets ...string) (*bytes.Buffer, error) {
called = true
return nil, nil
}
i.state = QUERY
step := func() {
i.iteration("demo", command, []string{}, "//my:target")
}
assertRun := func() {
t.Helper()
if called == false {
_, file, line, _ := runtime.Caller(1) // decorate + log + public function.
t.Errorf("%s:%v Should have run the provided comand", file, line)
}
called = false
}
assertState := func(state State) {
t.Helper()
if i.state != state {
_, file, line, _ := runtime.Caller(1) // decorate + log + public function.
t.Errorf("%s:%v Expected state to be %s but was %s", file, line, state, i.state)
}
}
// Pretend a fairly normal event chain happens.
// Start, run the program, write a source file, run, write a build file, run.
assertState(QUERY)
step()
i.filesWatched[i.buildFileWatcher] = map[string]struct{}{"/path/to/BUILD": {}}
i.filesWatched[i.sourceFileWatcher] = map[string]struct{}{"/path/to/foo": {}}
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
// Source file change.
go func() { i.sourceFileWatcher.Events() <- common.Event{Op: common.Write, Name: "/path/to/foo"} }()
step()
assertState(DEBOUNCE_RUN)
step()
// Don't send another event in to test the timer
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
// Build file change.
i.buildFileWatcher.Events() <- common.Event{Op: common.Write, Name: "/path/to/BUILD"}
step()
assertState(DEBOUNCE_QUERY)
// Don't send another event in to test the timer
step()
assertState(QUERY)
step()
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
}
func TestIBazelBuild(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
defer i.Cleanup()
mockBazel.AddQueryResponse("//path/to:target", &blaze_query.QueryResult{
Target: []*blaze_query.Target{
{
Type: blaze_query.Target_RULE.Enum(),
Rule: &blaze_query.Rule{
Name: proto.String("//path/to:target"),
Attribute: []*blaze_query.Attribute{
{Name: proto.String("name")},
},
},
},
},
})
i.build("//path/to:target")
expected := [][]string{
{"SetStartupArgs"},
{"SetArguments"},
{"Info"},
{"SetStartupArgs"},
{"SetArguments"},
{"Cancel"},
{"WriteToStderr", "true"},
{"WriteToStdout", "true"},
{"Build", "//path/to:target"},
}
mockBazel.AssertActions(t, expected)
}
func TestIBazelTest(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
defer i.Cleanup()
mockBazel.AddCQueryResponse("//path/to:target", &analysispb.CqueryResult{
Results: []*analysispb.ConfiguredTarget{{
Target: &blaze_query.Target{
Type: blaze_query.Target_RULE.Enum(),
Rule: &blaze_query.Rule{
Name: proto.String("//path/to:target"),
Attribute: []*blaze_query.Attribute{
{Name: proto.String("name")},
},
},
},
}},
})
i.test("//path/to:target")
expected := [][]string{
{"SetStartupArgs"},
{"SetArguments"},
{"Info"},
{"SetStartupArgs"},
{"SetArguments"},
{"SetStartupArgs"},
{"SetArguments"},
{"WriteToStderr", "false"},
{"WriteToStdout", "false"},
{"CQuery", "//path/to:target"},
{"SetArguments", "--test_output=streamed"},
{"Cancel"},
{"WriteToStderr", "true"},
{"WriteToStdout", "true"},
{"Test", "//path/to:target"},
}
mockBazel.AssertActions(t, expected)
}
func TestIBazelRun_notifyPreexistiingJobWhenStarting(t *testing.T) {
log.SetTesting(t)
commandDefaultCommand = func(startupArgs []string, bazelArgs []string, target string, args []string) command.Command {
assertEqual(t, startupArgs, []string{}, "Startup args")
assertEqual(t, bazelArgs, []string{}, "Bazel args")
assertEqual(t, target, "", "Target")
assertEqual(t, args, []string{}, "Args")
return &mockCommand{}
}
defer func() { commandDefaultCommand = oldCommandDefaultCommand }()
i, _ := newIBazel(t)
defer i.Cleanup()
i.args = []string{"--do_it"}
cmd := &mockCommand{
notifiedOfChanges: false,
}
i.cmd = cmd
path := "//path/to:target"
i.run(path)
if !cmd.notifiedOfChanges {
t.Errorf("The previously running command was not notified of changes")
}
}
func TestHandleSignals_SIGINTWithoutRunningCommand(t *testing.T) {
log.SetTesting(t)
log.FakeExit()
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
assertEqual(t, i.cmd, nil, "There shouldn't be a subprocess running")
// SIGINT without a running command should attempt to exit
i.sigs <- syscall.SIGINT
i.handleSignals()
// Goroutine tests are kind of racey
assertOsExited(t, osExitChan)
}
func | (t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertNotOsExited(t, osExitChan)
// Second ctrl-c terminates ibazel
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTForcefulTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
assertNotOsExited(t, osExitChan)
// Second ctrl-c sends SIGKILL
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGKILL)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertNotOsExited(t, osExitChan)
// Yet another ctrl-c terminates ibazel
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTHitLimitTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
assertNotOsExited(t, osExitChan)
// Second ctrl-c sends SIGKILL
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGKILL)
assertNotOsExited(t, osExitChan)
// Third ctrl-c terminates ibazel even if the subprocess is not closed
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGTERM(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
i.sigs <- syscall.SIGTERM
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertOsExited(t, osExitChan)
}
func TestParseTarget(t *testing.T) {
log.SetTesting(t)
tests := []struct {
in string
repo string
target string
}{
{"@//my:target", "", "my:target"},
{"@repo//my:target", "repo", "my:target"},
{"@bazel_tools//:strange/target", "bazel_tools", ":strange/target"},
}
for _, test := range tests {
t.Run(test.in, func(t *testing.T) {
gotRepo, gotTarget := parseTarget(test.in)
if gotRepo != test.repo {
t.Errorf("parseTarget(%q).repo = %q, want %q", test.in, gotRepo, test.repo)
}
if gotTarget != test.target {
t.Errorf("parseTarget(%q).target = %q, want %q", test.in, gotTarget, test.target)
}
})
}
}
| TestHandleSignals_SIGINTNormalTermination | identifier_name |
ibazel_test.go | // Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibazel
import (
"bytes"
"fmt"
"os"
"reflect"
"runtime"
"runtime/debug"
"syscall"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/bazelbuild/bazel-watcher/internal/bazel"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/command"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/fswatcher/common"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/log"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/workspace"
mock_bazel "github.com/bazelbuild/bazel-watcher/internal/bazel/testing"
analysispb "github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/analysis"
blaze_query "github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/blaze_query"
)
type fakeFSNotifyWatcher struct {
ErrorChan chan error
EventChan chan common.Event
}
var _ common.Watcher = &fakeFSNotifyWatcher{}
func (w *fakeFSNotifyWatcher) Close() error { return nil }
func (w *fakeFSNotifyWatcher) UpdateAll(names []string) error { return nil }
func (w *fakeFSNotifyWatcher) Events() chan common.Event { return w.EventChan }
var oldCommandDefaultCommand = command.DefaultCommand
func assertEqual(t *testing.T, want, got interface{}, msg string) |
func assertOsExited(t *testing.T, osExitChan chan int) {
select {
case exitCode := <-osExitChan:
assertEqual(t, 3, exitCode, "Should have exited ibazel")
case <-time.After(time.Second):
t.Errorf("It should have os.Exit'd")
debug.PrintStack()
}
}
func assertNotOsExited(t *testing.T, osExitChan chan int) {
select {
case <-osExitChan:
t.Errorf("It shouldn't have os.Exit'd")
debug.PrintStack()
case <-time.After(time.Second):
// works as expected
}
}
type mockCommand struct {
startupArgs []string
bazelArgs []string
target string
args []string
notifiedOfChanges bool
started bool
terminated bool
signalChan chan syscall.Signal
doTermChan chan struct{}
didTermChan chan struct{}
}
func (m *mockCommand) Start() (*bytes.Buffer, error) {
if m.started {
panic("Can't run command twice")
}
m.started = true
return nil, nil
}
func (m *mockCommand) NotifyOfChanges() *bytes.Buffer {
m.notifiedOfChanges = true
return nil
}
func (m *mockCommand) Terminate() {
if !m.started {
panic("Terminated before starting")
}
m.signalChan <- syscall.SIGTERM
<-m.doTermChan
m.terminated = true
m.didTermChan <- struct{}{}
}
func (m *mockCommand) Kill() {
if !m.started {
panic("Sending kill signal before terminating")
}
m.signalChan <- syscall.SIGKILL
}
func (m *mockCommand) assertTerminated(t *testing.T) {
select {
case <-m.didTermChan:
// works as expected
case <-time.After(time.Second):
t.Errorf("A process wasn't terminated within assert timeout")
debug.PrintStack()
}
}
func (m *mockCommand) assertSignal(t *testing.T, signum syscall.Signal) {
if <-m.signalChan != signum {
t.Errorf("An incorrect signal was used to terminate a process")
debug.PrintStack()
}
}
func (m *mockCommand) IsSubprocessRunning() bool {
return m.started && !m.terminated
}
func getMockCommand(i *IBazel) *mockCommand {
c, ok := i.cmd.(*mockCommand)
if !ok {
panic(fmt.Sprintf("Unable to cast i.cmd to a mockCommand. Was: %v", i.cmd))
}
return c
}
func init() {
commandDefaultCommand = func(startupArgs []string, bazelArgs []string, target string, args []string) command.Command {
// Don't do anything
return &mockCommand{
startupArgs: startupArgs,
bazelArgs: bazelArgs,
target: target,
args: args,
}
}
}
func newIBazel(t *testing.T) (*IBazel, *mock_bazel.MockBazel) {
mockBazel := &mock_bazel.MockBazel{}
bazelNew = func() bazel.Bazel {
return mockBazel
}
i, err := New("testing")
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.workspaceFinder = &workspace.FakeWorkspace{}
return i, mockBazel
}
func TestIBazelLifecycle(t *testing.T) {
log.SetTesting(t)
i, _ := newIBazel(t)
i.Cleanup()
// Now inspect private API. If things weren't closed properly this will block
// and the test will timeout.
<-i.sourceFileWatcher.Events()
<-i.buildFileWatcher.Events()
}
func TestIBazelLoop(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
mockBazel.AddQueryResponse("buildfiles(deps(set(//my:target)))", &blaze_query.QueryResult{})
mockBazel.AddQueryResponse("kind('source file', deps(set(//my:target)))", &blaze_query.QueryResult{})
// Replace the file watching channel with one that has a buffer.
i.buildFileWatcher = &fakeFSNotifyWatcher{
EventChan: make(chan common.Event, 1),
}
defer i.Cleanup()
// The process for testing this is going to be to emit events to the channels
// that are associated with these objects and walk the state transition
// graph.
// First let's consume all the events from all the channels we care about
called := false
command := func(targets ...string) (*bytes.Buffer, error) {
called = true
return nil, nil
}
i.state = QUERY
step := func() {
i.iteration("demo", command, []string{}, "//my:target")
}
assertRun := func() {
t.Helper()
if called == false {
_, file, line, _ := runtime.Caller(1) // decorate + log + public function.
t.Errorf("%s:%v Should have run the provided comand", file, line)
}
called = false
}
assertState := func(state State) {
t.Helper()
if i.state != state {
_, file, line, _ := runtime.Caller(1) // decorate + log + public function.
t.Errorf("%s:%v Expected state to be %s but was %s", file, line, state, i.state)
}
}
// Pretend a fairly normal event chain happens.
// Start, run the program, write a source file, run, write a build file, run.
assertState(QUERY)
step()
i.filesWatched[i.buildFileWatcher] = map[string]struct{}{"/path/to/BUILD": {}}
i.filesWatched[i.sourceFileWatcher] = map[string]struct{}{"/path/to/foo": {}}
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
// Source file change.
go func() { i.sourceFileWatcher.Events() <- common.Event{Op: common.Write, Name: "/path/to/foo"} }()
step()
assertState(DEBOUNCE_RUN)
step()
// Don't send another event in to test the timer
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
// Build file change.
i.buildFileWatcher.Events() <- common.Event{Op: common.Write, Name: "/path/to/BUILD"}
step()
assertState(DEBOUNCE_QUERY)
// Don't send another event in to test the timer
step()
assertState(QUERY)
step()
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
}
func TestIBazelBuild(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
defer i.Cleanup()
mockBazel.AddQueryResponse("//path/to:target", &blaze_query.QueryResult{
Target: []*blaze_query.Target{
{
Type: blaze_query.Target_RULE.Enum(),
Rule: &blaze_query.Rule{
Name: proto.String("//path/to:target"),
Attribute: []*blaze_query.Attribute{
{Name: proto.String("name")},
},
},
},
},
})
i.build("//path/to:target")
expected := [][]string{
{"SetStartupArgs"},
{"SetArguments"},
{"Info"},
{"SetStartupArgs"},
{"SetArguments"},
{"Cancel"},
{"WriteToStderr", "true"},
{"WriteToStdout", "true"},
{"Build", "//path/to:target"},
}
mockBazel.AssertActions(t, expected)
}
func TestIBazelTest(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
defer i.Cleanup()
mockBazel.AddCQueryResponse("//path/to:target", &analysispb.CqueryResult{
Results: []*analysispb.ConfiguredTarget{{
Target: &blaze_query.Target{
Type: blaze_query.Target_RULE.Enum(),
Rule: &blaze_query.Rule{
Name: proto.String("//path/to:target"),
Attribute: []*blaze_query.Attribute{
{Name: proto.String("name")},
},
},
},
}},
})
i.test("//path/to:target")
expected := [][]string{
{"SetStartupArgs"},
{"SetArguments"},
{"Info"},
{"SetStartupArgs"},
{"SetArguments"},
{"SetStartupArgs"},
{"SetArguments"},
{"WriteToStderr", "false"},
{"WriteToStdout", "false"},
{"CQuery", "//path/to:target"},
{"SetArguments", "--test_output=streamed"},
{"Cancel"},
{"WriteToStderr", "true"},
{"WriteToStdout", "true"},
{"Test", "//path/to:target"},
}
mockBazel.AssertActions(t, expected)
}
func TestIBazelRun_notifyPreexistiingJobWhenStarting(t *testing.T) {
log.SetTesting(t)
commandDefaultCommand = func(startupArgs []string, bazelArgs []string, target string, args []string) command.Command {
assertEqual(t, startupArgs, []string{}, "Startup args")
assertEqual(t, bazelArgs, []string{}, "Bazel args")
assertEqual(t, target, "", "Target")
assertEqual(t, args, []string{}, "Args")
return &mockCommand{}
}
defer func() { commandDefaultCommand = oldCommandDefaultCommand }()
i, _ := newIBazel(t)
defer i.Cleanup()
i.args = []string{"--do_it"}
cmd := &mockCommand{
notifiedOfChanges: false,
}
i.cmd = cmd
path := "//path/to:target"
i.run(path)
if !cmd.notifiedOfChanges {
t.Errorf("The previously running command was not notified of changes")
}
}
func TestHandleSignals_SIGINTWithoutRunningCommand(t *testing.T) {
log.SetTesting(t)
log.FakeExit()
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
assertEqual(t, i.cmd, nil, "There shouldn't be a subprocess running")
// SIGINT without a running command should attempt to exit
i.sigs <- syscall.SIGINT
i.handleSignals()
// Goroutine tests are kind of racey
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTNormalTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertNotOsExited(t, osExitChan)
// Second ctrl-c terminates ibazel
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTForcefulTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
assertNotOsExited(t, osExitChan)
// Second ctrl-c sends SIGKILL
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGKILL)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertNotOsExited(t, osExitChan)
// Yet another ctrl-c terminates ibazel
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTHitLimitTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
assertNotOsExited(t, osExitChan)
// Second ctrl-c sends SIGKILL
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGKILL)
assertNotOsExited(t, osExitChan)
// Third ctrl-c terminates ibazel even if the subprocess is not closed
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGTERM(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
i.sigs <- syscall.SIGTERM
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertOsExited(t, osExitChan)
}
func TestParseTarget(t *testing.T) {
log.SetTesting(t)
tests := []struct {
in string
repo string
target string
}{
{"@//my:target", "", "my:target"},
{"@repo//my:target", "repo", "my:target"},
{"@bazel_tools//:strange/target", "bazel_tools", ":strange/target"},
}
for _, test := range tests {
t.Run(test.in, func(t *testing.T) {
gotRepo, gotTarget := parseTarget(test.in)
if gotRepo != test.repo {
t.Errorf("parseTarget(%q).repo = %q, want %q", test.in, gotRepo, test.repo)
}
if gotTarget != test.target {
t.Errorf("parseTarget(%q).target = %q, want %q", test.in, gotTarget, test.target)
}
})
}
}
| {
if !reflect.DeepEqual(want, got) {
t.Errorf("Wanted %s, got %s. %s", want, got, msg)
debug.PrintStack()
}
} | identifier_body |
ibazel_test.go | // Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibazel
import (
"bytes"
"fmt"
"os"
"reflect"
"runtime"
"runtime/debug"
"syscall"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/bazelbuild/bazel-watcher/internal/bazel"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/command"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/fswatcher/common"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/log"
"github.com/bazelbuild/bazel-watcher/internal/ibazel/workspace"
mock_bazel "github.com/bazelbuild/bazel-watcher/internal/bazel/testing"
analysispb "github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/analysis"
blaze_query "github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/blaze_query"
)
type fakeFSNotifyWatcher struct {
ErrorChan chan error
EventChan chan common.Event
}
var _ common.Watcher = &fakeFSNotifyWatcher{}
func (w *fakeFSNotifyWatcher) Close() error { return nil }
func (w *fakeFSNotifyWatcher) UpdateAll(names []string) error { return nil }
func (w *fakeFSNotifyWatcher) Events() chan common.Event { return w.EventChan }
var oldCommandDefaultCommand = command.DefaultCommand
func assertEqual(t *testing.T, want, got interface{}, msg string) {
if !reflect.DeepEqual(want, got) {
t.Errorf("Wanted %s, got %s. %s", want, got, msg)
debug.PrintStack()
}
}
func assertOsExited(t *testing.T, osExitChan chan int) {
select {
case exitCode := <-osExitChan:
assertEqual(t, 3, exitCode, "Should have exited ibazel")
case <-time.After(time.Second):
t.Errorf("It should have os.Exit'd")
debug.PrintStack()
}
}
func assertNotOsExited(t *testing.T, osExitChan chan int) {
select {
case <-osExitChan:
t.Errorf("It shouldn't have os.Exit'd")
debug.PrintStack()
case <-time.After(time.Second):
// works as expected
}
}
type mockCommand struct {
startupArgs []string
bazelArgs []string
target string
args []string
notifiedOfChanges bool
started bool
terminated bool
signalChan chan syscall.Signal
doTermChan chan struct{}
didTermChan chan struct{}
}
func (m *mockCommand) Start() (*bytes.Buffer, error) {
if m.started {
panic("Can't run command twice")
}
m.started = true
return nil, nil
}
func (m *mockCommand) NotifyOfChanges() *bytes.Buffer {
m.notifiedOfChanges = true
return nil
}
func (m *mockCommand) Terminate() {
if !m.started {
panic("Terminated before starting")
}
m.signalChan <- syscall.SIGTERM
<-m.doTermChan
m.terminated = true
m.didTermChan <- struct{}{}
}
func (m *mockCommand) Kill() {
if !m.started {
panic("Sending kill signal before terminating")
}
m.signalChan <- syscall.SIGKILL
}
func (m *mockCommand) assertTerminated(t *testing.T) {
select {
case <-m.didTermChan:
// works as expected
case <-time.After(time.Second):
t.Errorf("A process wasn't terminated within assert timeout")
debug.PrintStack()
}
}
func (m *mockCommand) assertSignal(t *testing.T, signum syscall.Signal) {
if <-m.signalChan != signum {
t.Errorf("An incorrect signal was used to terminate a process")
debug.PrintStack()
}
}
func (m *mockCommand) IsSubprocessRunning() bool {
return m.started && !m.terminated
}
func getMockCommand(i *IBazel) *mockCommand {
c, ok := i.cmd.(*mockCommand)
if !ok {
panic(fmt.Sprintf("Unable to cast i.cmd to a mockCommand. Was: %v", i.cmd))
}
return c
}
func init() {
commandDefaultCommand = func(startupArgs []string, bazelArgs []string, target string, args []string) command.Command {
// Don't do anything
return &mockCommand{
startupArgs: startupArgs,
bazelArgs: bazelArgs,
target: target,
args: args,
}
}
}
func newIBazel(t *testing.T) (*IBazel, *mock_bazel.MockBazel) {
mockBazel := &mock_bazel.MockBazel{}
bazelNew = func() bazel.Bazel {
return mockBazel
}
i, err := New("testing")
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.workspaceFinder = &workspace.FakeWorkspace{}
return i, mockBazel
}
func TestIBazelLifecycle(t *testing.T) {
log.SetTesting(t)
i, _ := newIBazel(t)
i.Cleanup()
// Now inspect private API. If things weren't closed properly this will block
// and the test will timeout.
<-i.sourceFileWatcher.Events()
<-i.buildFileWatcher.Events()
}
func TestIBazelLoop(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
mockBazel.AddQueryResponse("buildfiles(deps(set(//my:target)))", &blaze_query.QueryResult{})
mockBazel.AddQueryResponse("kind('source file', deps(set(//my:target)))", &blaze_query.QueryResult{})
// Replace the file watching channel with one that has a buffer.
i.buildFileWatcher = &fakeFSNotifyWatcher{
EventChan: make(chan common.Event, 1),
}
defer i.Cleanup()
// The process for testing this is going to be to emit events to the channels
// that are associated with these objects and walk the state transition
// graph.
// First let's consume all the events from all the channels we care about
called := false
command := func(targets ...string) (*bytes.Buffer, error) {
called = true
return nil, nil
}
i.state = QUERY
step := func() {
i.iteration("demo", command, []string{}, "//my:target")
}
assertRun := func() {
t.Helper()
if called == false {
_, file, line, _ := runtime.Caller(1) // decorate + log + public function.
t.Errorf("%s:%v Should have run the provided comand", file, line)
}
called = false
}
assertState := func(state State) {
t.Helper()
if i.state != state {
_, file, line, _ := runtime.Caller(1) // decorate + log + public function.
t.Errorf("%s:%v Expected state to be %s but was %s", file, line, state, i.state)
}
}
// Pretend a fairly normal event chain happens.
// Start, run the program, write a source file, run, write a build file, run.
assertState(QUERY)
step()
i.filesWatched[i.buildFileWatcher] = map[string]struct{}{"/path/to/BUILD": {}}
i.filesWatched[i.sourceFileWatcher] = map[string]struct{}{"/path/to/foo": {}}
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
// Source file change.
go func() { i.sourceFileWatcher.Events() <- common.Event{Op: common.Write, Name: "/path/to/foo"} }()
step()
assertState(DEBOUNCE_RUN)
step()
// Don't send another event in to test the timer
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
// Build file change.
i.buildFileWatcher.Events() <- common.Event{Op: common.Write, Name: "/path/to/BUILD"}
step()
assertState(DEBOUNCE_QUERY)
// Don't send another event in to test the timer
step()
assertState(QUERY)
step()
assertState(RUN)
step() // Actually run the command
assertRun()
assertState(WAIT)
}
func TestIBazelBuild(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
defer i.Cleanup()
mockBazel.AddQueryResponse("//path/to:target", &blaze_query.QueryResult{
Target: []*blaze_query.Target{
{
Type: blaze_query.Target_RULE.Enum(),
Rule: &blaze_query.Rule{
Name: proto.String("//path/to:target"),
Attribute: []*blaze_query.Attribute{
{Name: proto.String("name")},
},
},
},
},
})
i.build("//path/to:target")
expected := [][]string{
{"SetStartupArgs"},
{"SetArguments"},
{"Info"},
{"SetStartupArgs"},
{"SetArguments"},
{"Cancel"},
{"WriteToStderr", "true"},
{"WriteToStdout", "true"},
{"Build", "//path/to:target"},
}
mockBazel.AssertActions(t, expected)
}
func TestIBazelTest(t *testing.T) {
log.SetTesting(t)
i, mockBazel := newIBazel(t)
defer i.Cleanup()
mockBazel.AddCQueryResponse("//path/to:target", &analysispb.CqueryResult{
Results: []*analysispb.ConfiguredTarget{{
Target: &blaze_query.Target{
Type: blaze_query.Target_RULE.Enum(),
Rule: &blaze_query.Rule{
Name: proto.String("//path/to:target"),
Attribute: []*blaze_query.Attribute{
{Name: proto.String("name")},
},
},
},
}},
})
i.test("//path/to:target")
expected := [][]string{
{"SetStartupArgs"},
{"SetArguments"},
{"Info"},
{"SetStartupArgs"},
{"SetArguments"},
{"SetStartupArgs"},
{"SetArguments"},
{"WriteToStderr", "false"},
{"WriteToStdout", "false"},
{"CQuery", "//path/to:target"},
{"SetArguments", "--test_output=streamed"},
{"Cancel"},
{"WriteToStderr", "true"},
{"WriteToStdout", "true"},
{"Test", "//path/to:target"},
}
mockBazel.AssertActions(t, expected)
}
func TestIBazelRun_notifyPreexistiingJobWhenStarting(t *testing.T) {
log.SetTesting(t)
commandDefaultCommand = func(startupArgs []string, bazelArgs []string, target string, args []string) command.Command {
assertEqual(t, startupArgs, []string{}, "Startup args")
assertEqual(t, bazelArgs, []string{}, "Bazel args")
assertEqual(t, target, "", "Target")
assertEqual(t, args, []string{}, "Args")
return &mockCommand{}
}
defer func() { commandDefaultCommand = oldCommandDefaultCommand }()
i, _ := newIBazel(t)
defer i.Cleanup()
i.args = []string{"--do_it"}
cmd := &mockCommand{
notifiedOfChanges: false,
}
i.cmd = cmd
path := "//path/to:target"
i.run(path)
if !cmd.notifiedOfChanges {
t.Errorf("The previously running command was not notified of changes")
}
}
func TestHandleSignals_SIGINTWithoutRunningCommand(t *testing.T) {
log.SetTesting(t)
log.FakeExit()
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
assertEqual(t, i.cmd, nil, "There shouldn't be a subprocess running")
// SIGINT without a running command should attempt to exit
i.sigs <- syscall.SIGINT
i.handleSignals()
// Goroutine tests are kind of racey
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTNormalTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertNotOsExited(t, osExitChan)
// Second ctrl-c terminates ibazel
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTForcefulTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
assertNotOsExited(t, osExitChan)
// Second ctrl-c sends SIGKILL
i.sigs <- syscall.SIGINT
i.handleSignals() | // Yet another ctrl-c terminates ibazel
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGINTHitLimitTermination(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
// First ctrl-c sends custom signal (SIGTERM)
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
assertNotOsExited(t, osExitChan)
// Second ctrl-c sends SIGKILL
i.sigs <- syscall.SIGINT
i.handleSignals()
cmd.assertSignal(t, syscall.SIGKILL)
assertNotOsExited(t, osExitChan)
// Third ctrl-c terminates ibazel even if the subprocess is not closed
i.sigs <- syscall.SIGINT
i.handleSignals()
assertOsExited(t, osExitChan)
}
func TestHandleSignals_SIGTERM(t *testing.T) {
log.SetTesting(t)
i := &IBazel{}
err := i.setup()
if err != nil {
t.Errorf("Error creating IBazel: %s", err)
}
i.sigs = make(chan os.Signal, 1)
defer i.Cleanup()
osExitChan := make(chan int, 1)
osExit = func(i int) {
osExitChan <- i
}
cmd := &mockCommand{
signalChan: make(chan syscall.Signal, 10),
doTermChan: make(chan struct{}, 1),
didTermChan: make(chan struct{}, 1),
}
i.cmd = cmd
cmd.Start()
i.sigs <- syscall.SIGTERM
i.handleSignals()
cmd.assertSignal(t, syscall.SIGTERM)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertOsExited(t, osExitChan)
}
func TestParseTarget(t *testing.T) {
log.SetTesting(t)
tests := []struct {
in string
repo string
target string
}{
{"@//my:target", "", "my:target"},
{"@repo//my:target", "repo", "my:target"},
{"@bazel_tools//:strange/target", "bazel_tools", ":strange/target"},
}
for _, test := range tests {
t.Run(test.in, func(t *testing.T) {
gotRepo, gotTarget := parseTarget(test.in)
if gotRepo != test.repo {
t.Errorf("parseTarget(%q).repo = %q, want %q", test.in, gotRepo, test.repo)
}
if gotTarget != test.target {
t.Errorf("parseTarget(%q).target = %q, want %q", test.in, gotTarget, test.target)
}
})
}
} | cmd.assertSignal(t, syscall.SIGKILL)
cmd.doTermChan <- struct{}{}
cmd.assertTerminated(t)
assertNotOsExited(t, osExitChan)
| random_line_split |
ym.rs | use core::time::Duration;
use core::num::NonZeroU32;
use core::fmt;
use core::ops::Range;
use chrono::NaiveDateTime;
pub mod flags;
pub mod effects;
mod parse;
mod player;
use flags::*;
use effects::*;
pub const MAX_DD_SAMPLES: usize = 32;
pub const MFP_TIMER_FREQUENCY: u32 = 2_457_600;
const DEFAULT_CHIPSET_FREQUENCY: u32 = 2_000_000;
const DEFAULT_FRAME_FREQUENCY: u16 = 50;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum YmVersion {
Ym2,
Ym3,
Ym4,
Ym5,
Ym6,
}
impl YmVersion {
/// The YM version identifier tag as a string (4 ascii characters).
pub fn tag(self) -> &'static str {
match self {
YmVersion::Ym2 => "YM2!",
YmVersion::Ym3 => "YM3!",
YmVersion::Ym4 => "YM4!",
YmVersion::Ym5 => "YM5!",
YmVersion::Ym6 => "YM6!",
}
}
}
impl fmt::Display for YmVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.tag().fmt(f)
}
}
/// The **YM** music file.
///
/// The YM-file consist of [YmFrame]s that represent the state of the AY/YM chipset registers and
/// contain additional information about special effects.
///
/// Depending on the [YmSong::version] special effects are being encoded differently.
#[derive(Debug, Clone)]
pub struct YmSong {
/// YM-file version.
pub version: YmVersion,
/// The last modification timestamp of the YM-file from the LHA envelope.
pub created: Option<NaiveDateTime>,
/// The song attributes.
pub song_attrs: SongAttributes,
/// The song title or a file name.
pub title: String,
/// The song author.
pub author: String,
/// The comment.
pub comments: String,
/// The number of cycles per second of the AY/YM chipset clock.
pub chipset_frequency: u32,
/// The number of frames played each second.
pub frame_frequency: u16,
/// The loop frame index.
pub loop_frame: u32,
/// The AY/YM state frames.
pub frames: Box<[YmFrame]>,
/// `DIGI-DRUM` samples.
pub dd_samples: Box<[u8]>,
/// `DIGI-DRUM` sample end indexes in [YmSong::dd_samples].
pub dd_samples_ends: [usize;MAX_DD_SAMPLES],
cursor: usize,
voice_effects: [(SidVoice, SinusSid, DigiDrum); 3],
buzzer: SyncBuzzer,
}
/// This type represent the state of the AY/YM chipset registers and contain additional information
/// about special effects.
///
/// ```text
/// X - AY/YM register data.
/// S - Controls special effects.
/// P - Frequency pre-divisor.
/// F - Frequency divisor.
/// - - Unused.
/// ----------------------------------------------------------
/// b7 b6 b5 b4 b3 b2 b1 b0 Register description
/// 0: X X X X X X X X Fine period voice A
/// 1: S S S S X X X X Coarse period voice A
/// 2: X X X X X X X X Fine period voice B
/// 3: S S S S X X X X Coarse period voice B
/// 4: X X X X X X X X Fine period voice C
/// 5: - - - - X X X X Coarse period voice C
/// 6: P P P X X X X X Noise period
/// 7: X X X X X X X X Mixer control
/// 8: P P P X X X X X Volume voice A
/// 9: - - - X X X X X Volume voice B
/// 10: - - - X X X X X Volume voice C
/// 11: X X X X X X X X Envelope fine period
/// 12: X X X X X X X X Envelope coarse period
/// 13: x x x x X X X X Envelope shape
/// ----------------------------------------------------------
/// virtual registers to store extra data for special effects:
/// ----------------------------------------------------------
/// 14: F F F F F F F F Frequency divisor for S in 1
/// 15: F F F F F F F F Frequency divisor for S in 3
/// ```
///
/// The AY/YM `Envelope shape` register is modified only if the value of the 13 frame
/// register is not equal to `0xff`.
///
/// # Special effects
///
/// The frequency of a special effect is encoded as `(2457600 / P) / F`.
///
/// The divisor `F` is an unsigned 8-bit integer.
///
/// The pre-divisor `P` is encoded as:
///
/// |PPP| pre-divisor value|
/// |-----------------------|
/// |000| Timer off |
/// |001| 4 |
/// |010| 10 |
/// |011| 16 |
/// |100| 50 |
/// |101| 64 |
/// |110| 100 |
/// |111| 200 |
///
/// * The pre-divisor `P` in register 6 matches effect controlled by register 1.
/// * The divisor `F` in register 14 matches effect controlled by register 1.
/// * The pre-divisor `P` in register 8 matches effect controlled by register 3.
/// * The divisor `F` in register 15 matches effect controlled by register 3.
///
/// If an effect is active, the additional data resides in `X` bits in the `Volume` register of
/// the relevant voice:
///
/// * For the [`SID voice`][SidVoice] and [`Sinus SID`][SinusSid] effects the 4 lowest `X` bits
/// determine the effect's volume.
/// * For the [`Sync Buzzer`][SyncBuzzer] the 4 lowest `X` bits determine the effect's `Envelope shape`.
/// * For the [`DIGI-DRUM`][DigiDrum] effect the 5 `X` bits determine the played sample number.
/// * The `DIGI-DRUM` sample plays until its end or if it's overridden by another effect.
/// * All other effects are active only for the duration of a single frame.
/// * When the `DIGI-DRUM` is active the volume register from the frame for the relevant voice is being
/// ignored and the relevant voice mixer tone and noise bits are forced to be set.
///
/// The control bits of special effects are interpreted differently depending on the YM-file verion.
///
/// ## YM6!
///
/// The `S` bits in registers 1 and 3 controls any two of the selectable effects:
/// ```text
/// b7 b6 b5 b4
/// - - 0 0 effect disabled
/// - - 0 1 effect active on voice A
/// - - 1 0 effect active on voice B
/// - - 1 1 effect active on voice C
/// 0 0 - - select SID voice effect
/// 0 1 - - select DIGI-DRUM effect
/// 1 0 - - select Sinus SID effect
/// 1 1 - - select Sync Buzzer effect
/// ```
///
/// ## YM4!/YM5!
///
/// The `S` bits in register 1 controls the `SID voice` effect.
/// The `S` bits in register 3 controls the `DIGI-DRUM` effect.
/// ```text
/// b7 b6 b5 b4
/// - - 0 0 effect disabled
/// - - 0 1 effect active on voice A
/// - - 1 0 effect active on voice B
/// - - 1 1 effect active on voice C
/// - 0 - - SID voice timer continues, ignored for DIGI-DRUM
/// - 1 - - SID voice timer restarts, ignored for DIGI-DRUM
///```
///
/// ## YM3!
///
/// There are no special effects in this version.
///
/// ## YM2!
///
/// Only the `DIGI-DRUM` effect is recognized in this format. It is being played on voice C, and
/// uses one of the 40 predefined samples.
///
/// * The effect starts when the highest bit (7) of the `Volume voice C` register (10) is 1.
/// * The sample number is taken from the lowest 7 bits of the `Volume voice C` register (10).
/// * The effect frequency is calculated by `(2457600 / 4) / X`, where `X` is the unsigned 8-bit
/// value stored in the register 12 of the frame.
/// * The value of AY/YM chipset registers 11, 12 and 13 is only written if the value of the
/// frame register 13 is not equal to `0xFF`.
/// * The register 12 of the AY/YM chipset is always being set to `0` in this format.
/// * The register 13 of the AY/YM chipset is always being set to `0x10` in this format.
#[derive(Default, Debug, Clone, Copy)]
pub struct YmFrame {
/// Frame data.
pub data: [u8;16]
}
impl YmSong {
/// Creates a new instance of `YmSong` from the given `frames` and other meta data.
pub fn new(
version: YmVersion,
frames: Box<[YmFrame]>,
loop_frame: u32,
title: String,
created: Option<NaiveDateTime>
) -> YmSong
{
YmSong {
version,
created,
song_attrs: SongAttributes::default(),
title,
author: String::new(),
comments: String::new(),
chipset_frequency: DEFAULT_CHIPSET_FREQUENCY,
frame_frequency: DEFAULT_FRAME_FREQUENCY,
loop_frame,
frames,
dd_samples: Box::new([]),
dd_samples_ends: [0usize;MAX_DD_SAMPLES],
cursor: 0,
voice_effects: Default::default(),
buzzer: Default::default()
}
}
/// Returns `YmSong` with the `author` and `comments` set from the given arguments.
pub fn with_meta(mut self, author: String, comments: String) -> YmSong {
self.author = author;
self.comments = comments;
self
}
/// Returns `YmSong` with the `song_attrs`, `dd_samples` and `dd_samples_ends` set from the given arguments.
pub fn with_samples(
mut self,
song_attrs: SongAttributes,
dd_samples: Box<[u8]>,
dd_samples_ends: [usize;MAX_DD_SAMPLES]
) -> YmSong
{
self.song_attrs = song_attrs;
self.dd_samples = dd_samples;
self.dd_samples_ends = dd_samples_ends;
self
}
/// Returns `YmSong` with the `chipset_frequency` and `frame_frequency` set from the given arguments.
pub fn with_frequency(mut self, chipset_frequency: u32, frame_frequency: u16) -> YmSong |
/// Returns the song duration.
pub fn song_duration(&self) -> Duration {
let seconds = self.frames.len() as f64 / self.frame_frequency as f64;
Duration::from_secs_f64(seconds)
}
/// Returns the AY/YM chipset clock frequency.
#[inline]
pub fn clock_frequency(&self) -> f32 {
self.chipset_frequency as f32
}
/// Returns the number of AY/YM chipset clock cycles of a single music frame.
pub fn frame_cycles(&self) -> f32 {
self.clock_frequency() / self.frame_frequency as f32
}
/// Calculates the timer interval in clock cycles, from the given `divisor`.
pub fn timer_interval(&self, divisor: NonZeroU32) -> f32 {
let divisor = divisor.get() as f32;
self.clock_frequency() as f32 * divisor / MFP_TIMER_FREQUENCY as f32
}
/// Returns the indicated sample data range in the [YmSong::dd_samples] for the given `sample`.
///
/// # Panics
/// Panics if `sample` value is not below [MAX_DD_SAMPLES].
pub fn sample_data_range(&self, sample: usize) -> Range<usize> {
let end = self.dd_samples_ends[sample];
let start = match sample {
0 => 0,
index => self.dd_samples_ends[index - 1]
};
start..end
}
}
impl YmFrame {
/// Returns special effect control flags from the register 1.
pub fn fx0(&self) -> FxCtrlFlags {
FxCtrlFlags::from_bits_retain(self.data[1])
}
/// Returns special effect control flags from the register 3.
pub fn fx1(&self) -> FxCtrlFlags {
FxCtrlFlags::from_bits_retain(self.data[3])
}
/// Returns the value of the volume register for the indicated `chan`.
///
/// The 2 lowest bits of `chan` indicate the voice channel:
/// ```text
/// b1 b0 voice channel
/// 0 0 A
/// 0 1 B
/// 1 0 C
/// 1 1 invalid (panics in debug mode)
/// ```
pub fn vol(&self, chan: u8) -> u8 {
let chan = chan & 3;
debug_assert_ne!(chan, 3);
self.data[(VOL_A_REG + chan) as usize] & 0x1f
}
/// Calculates the timer divsor for the special effect `fx0`.
pub fn timer_divisor0(&self) -> Option<NonZeroU32> {
calculate_timer_divisor(self.data[6], self.data[14])
}
/// Calculates the timer divsor for the special effect `fx1`.
pub fn timer_divisor1(&self) -> Option<NonZeroU32> {
calculate_timer_divisor(self.data[8], self.data[15])
}
}
fn calculate_timer_divisor(prediv3: u8, div8: u8) -> Option<NonZeroU32> {
let prediv = match prediv3 & 0b11100000 {
0b00000000 => 0,
0b00100000 => 4,
0b01000000 => 10,
0b01100000 => 16,
0b10000000 => 50,
0b10100000 => 64,
0b11000000 => 100,
0b11100000 => 200,
_ => unreachable!()
};
NonZeroU32::new(prediv * div8 as u32)
}
| {
self.chipset_frequency = chipset_frequency;
self.frame_frequency = frame_frequency;
self
} | identifier_body |
ym.rs | use core::time::Duration;
use core::num::NonZeroU32;
use core::fmt;
use core::ops::Range;
use chrono::NaiveDateTime;
pub mod flags;
pub mod effects;
mod parse;
mod player;
use flags::*;
use effects::*;
pub const MAX_DD_SAMPLES: usize = 32;
pub const MFP_TIMER_FREQUENCY: u32 = 2_457_600;
const DEFAULT_CHIPSET_FREQUENCY: u32 = 2_000_000;
const DEFAULT_FRAME_FREQUENCY: u16 = 50;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum YmVersion {
Ym2,
Ym3,
Ym4,
Ym5,
Ym6,
}
impl YmVersion {
/// The YM version identifier tag as a string (4 ascii characters).
pub fn tag(self) -> &'static str {
match self {
YmVersion::Ym2 => "YM2!",
YmVersion::Ym3 => "YM3!",
YmVersion::Ym4 => "YM4!",
YmVersion::Ym5 => "YM5!",
YmVersion::Ym6 => "YM6!",
}
}
}
impl fmt::Display for YmVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.tag().fmt(f)
}
}
/// The **YM** music file.
///
/// The YM-file consist of [YmFrame]s that represent the state of the AY/YM chipset registers and
/// contain additional information about special effects.
///
/// Depending on the [YmSong::version] special effects are being encoded differently.
#[derive(Debug, Clone)]
pub struct YmSong {
/// YM-file version.
pub version: YmVersion,
/// The last modification timestamp of the YM-file from the LHA envelope.
pub created: Option<NaiveDateTime>,
/// The song attributes.
pub song_attrs: SongAttributes,
/// The song title or a file name.
pub title: String,
/// The song author.
pub author: String,
/// The comment.
pub comments: String,
/// The number of cycles per second of the AY/YM chipset clock.
pub chipset_frequency: u32,
/// The number of frames played each second.
pub frame_frequency: u16,
/// The loop frame index.
pub loop_frame: u32,
/// The AY/YM state frames.
pub frames: Box<[YmFrame]>,
/// `DIGI-DRUM` samples.
pub dd_samples: Box<[u8]>,
/// `DIGI-DRUM` sample end indexes in [YmSong::dd_samples].
pub dd_samples_ends: [usize;MAX_DD_SAMPLES],
cursor: usize,
voice_effects: [(SidVoice, SinusSid, DigiDrum); 3],
buzzer: SyncBuzzer,
}
/// This type represent the state of the AY/YM chipset registers and contain additional information
/// about special effects.
///
/// ```text
/// X - AY/YM register data.
/// S - Controls special effects.
/// P - Frequency pre-divisor.
/// F - Frequency divisor.
/// - - Unused.
/// ----------------------------------------------------------
/// b7 b6 b5 b4 b3 b2 b1 b0 Register description
/// 0: X X X X X X X X Fine period voice A
/// 1: S S S S X X X X Coarse period voice A
/// 2: X X X X X X X X Fine period voice B
/// 3: S S S S X X X X Coarse period voice B
/// 4: X X X X X X X X Fine period voice C
/// 5: - - - - X X X X Coarse period voice C
/// 6: P P P X X X X X Noise period
/// 7: X X X X X X X X Mixer control
/// 8: P P P X X X X X Volume voice A
/// 9: - - - X X X X X Volume voice B
/// 10: - - - X X X X X Volume voice C
/// 11: X X X X X X X X Envelope fine period
/// 12: X X X X X X X X Envelope coarse period
/// 13: x x x x X X X X Envelope shape
/// ----------------------------------------------------------
/// virtual registers to store extra data for special effects:
/// ----------------------------------------------------------
/// 14: F F F F F F F F Frequency divisor for S in 1
/// 15: F F F F F F F F Frequency divisor for S in 3
/// ```
///
/// The AY/YM `Envelope shape` register is modified only if the value of the 13 frame
/// register is not equal to `0xff`.
///
/// # Special effects
///
/// The frequency of a special effect is encoded as `(2457600 / P) / F`.
///
/// The divisor `F` is an unsigned 8-bit integer.
///
/// The pre-divisor `P` is encoded as:
///
/// |PPP| pre-divisor value|
/// |-----------------------|
/// |000| Timer off |
/// |001| 4 |
/// |010| 10 |
/// |011| 16 |
/// |100| 50 |
/// |101| 64 |
/// |110| 100 |
/// |111| 200 |
///
/// * The pre-divisor `P` in register 6 matches effect controlled by register 1.
/// * The divisor `F` in register 14 matches effect controlled by register 1.
/// * The pre-divisor `P` in register 8 matches effect controlled by register 3.
/// * The divisor `F` in register 15 matches effect controlled by register 3.
///
/// If an effect is active, the additional data resides in `X` bits in the `Volume` register of
/// the relevant voice:
///
/// * For the [`SID voice`][SidVoice] and [`Sinus SID`][SinusSid] effects the 4 lowest `X` bits
/// determine the effect's volume.
/// * For the [`Sync Buzzer`][SyncBuzzer] the 4 lowest `X` bits determine the effect's `Envelope shape`.
/// * For the [`DIGI-DRUM`][DigiDrum] effect the 5 `X` bits determine the played sample number.
/// * The `DIGI-DRUM` sample plays until its end or if it's overridden by another effect.
/// * All other effects are active only for the duration of a single frame.
/// * When the `DIGI-DRUM` is active the volume register from the frame for the relevant voice is being
/// ignored and the relevant voice mixer tone and noise bits are forced to be set.
///
/// The control bits of special effects are interpreted differently depending on the YM-file verion.
///
/// ## YM6!
///
/// The `S` bits in registers 1 and 3 controls any two of the selectable effects:
/// ```text
/// b7 b6 b5 b4
/// - - 0 0 effect disabled
/// - - 0 1 effect active on voice A
/// - - 1 0 effect active on voice B
/// - - 1 1 effect active on voice C
/// 0 0 - - select SID voice effect
/// 0 1 - - select DIGI-DRUM effect
/// 1 0 - - select Sinus SID effect
/// 1 1 - - select Sync Buzzer effect
/// ```
///
/// ## YM4!/YM5!
///
/// The `S` bits in register 1 controls the `SID voice` effect.
/// The `S` bits in register 3 controls the `DIGI-DRUM` effect.
/// ```text
/// b7 b6 b5 b4
/// - - 0 0 effect disabled
/// - - 0 1 effect active on voice A
/// - - 1 0 effect active on voice B
/// - - 1 1 effect active on voice C
/// - 0 - - SID voice timer continues, ignored for DIGI-DRUM
/// - 1 - - SID voice timer restarts, ignored for DIGI-DRUM
///```
///
/// ## YM3!
///
/// There are no special effects in this version.
///
/// ## YM2!
///
/// Only the `DIGI-DRUM` effect is recognized in this format. It is being played on voice C, and
/// uses one of the 40 predefined samples.
///
/// * The effect starts when the highest bit (7) of the `Volume voice C` register (10) is 1.
/// * The sample number is taken from the lowest 7 bits of the `Volume voice C` register (10).
/// * The effect frequency is calculated by `(2457600 / 4) / X`, where `X` is the unsigned 8-bit
/// value stored in the register 12 of the frame.
/// * The value of AY/YM chipset registers 11, 12 and 13 is only written if the value of the
/// frame register 13 is not equal to `0xFF`.
/// * The register 12 of the AY/YM chipset is always being set to `0` in this format.
/// * The register 13 of the AY/YM chipset is always being set to `0x10` in this format.
#[derive(Default, Debug, Clone, Copy)]
pub struct YmFrame {
/// Frame data.
pub data: [u8;16]
}
impl YmSong {
/// Creates a new instance of `YmSong` from the given `frames` and other meta data.
pub fn new(
version: YmVersion,
frames: Box<[YmFrame]>,
loop_frame: u32,
title: String,
created: Option<NaiveDateTime>
) -> YmSong
{
YmSong {
version,
created,
song_attrs: SongAttributes::default(),
title,
author: String::new(),
comments: String::new(),
chipset_frequency: DEFAULT_CHIPSET_FREQUENCY,
frame_frequency: DEFAULT_FRAME_FREQUENCY,
loop_frame,
frames,
dd_samples: Box::new([]),
dd_samples_ends: [0usize;MAX_DD_SAMPLES],
cursor: 0,
voice_effects: Default::default(),
buzzer: Default::default()
}
}
/// Returns `YmSong` with the `author` and `comments` set from the given arguments.
pub fn with_meta(mut self, author: String, comments: String) -> YmSong {
self.author = author;
self.comments = comments;
self
}
/// Returns `YmSong` with the `song_attrs`, `dd_samples` and `dd_samples_ends` set from the given arguments.
pub fn with_samples(
mut self,
song_attrs: SongAttributes,
dd_samples: Box<[u8]>,
dd_samples_ends: [usize;MAX_DD_SAMPLES]
) -> YmSong
{
self.song_attrs = song_attrs;
self.dd_samples = dd_samples;
self.dd_samples_ends = dd_samples_ends;
self
}
/// Returns `YmSong` with the `chipset_frequency` and `frame_frequency` set from the given arguments.
pub fn with_frequency(mut self, chipset_frequency: u32, frame_frequency: u16) -> YmSong {
self.chipset_frequency = chipset_frequency;
self.frame_frequency = frame_frequency;
self
}
/// Returns the song duration.
pub fn | (&self) -> Duration {
let seconds = self.frames.len() as f64 / self.frame_frequency as f64;
Duration::from_secs_f64(seconds)
}
/// Returns the AY/YM chipset clock frequency.
#[inline]
pub fn clock_frequency(&self) -> f32 {
self.chipset_frequency as f32
}
/// Returns the number of AY/YM chipset clock cycles of a single music frame.
pub fn frame_cycles(&self) -> f32 {
self.clock_frequency() / self.frame_frequency as f32
}
/// Calculates the timer interval in clock cycles, from the given `divisor`.
pub fn timer_interval(&self, divisor: NonZeroU32) -> f32 {
let divisor = divisor.get() as f32;
self.clock_frequency() as f32 * divisor / MFP_TIMER_FREQUENCY as f32
}
/// Returns the indicated sample data range in the [YmSong::dd_samples] for the given `sample`.
///
/// # Panics
/// Panics if `sample` value is not below [MAX_DD_SAMPLES].
pub fn sample_data_range(&self, sample: usize) -> Range<usize> {
let end = self.dd_samples_ends[sample];
let start = match sample {
0 => 0,
index => self.dd_samples_ends[index - 1]
};
start..end
}
}
impl YmFrame {
/// Returns special effect control flags from the register 1.
pub fn fx0(&self) -> FxCtrlFlags {
FxCtrlFlags::from_bits_retain(self.data[1])
}
/// Returns special effect control flags from the register 3.
pub fn fx1(&self) -> FxCtrlFlags {
FxCtrlFlags::from_bits_retain(self.data[3])
}
/// Returns the value of the volume register for the indicated `chan`.
///
/// The 2 lowest bits of `chan` indicate the voice channel:
/// ```text
/// b1 b0 voice channel
/// 0 0 A
/// 0 1 B
/// 1 0 C
/// 1 1 invalid (panics in debug mode)
/// ```
pub fn vol(&self, chan: u8) -> u8 {
let chan = chan & 3;
debug_assert_ne!(chan, 3);
self.data[(VOL_A_REG + chan) as usize] & 0x1f
}
/// Calculates the timer divsor for the special effect `fx0`.
pub fn timer_divisor0(&self) -> Option<NonZeroU32> {
calculate_timer_divisor(self.data[6], self.data[14])
}
/// Calculates the timer divsor for the special effect `fx1`.
pub fn timer_divisor1(&self) -> Option<NonZeroU32> {
calculate_timer_divisor(self.data[8], self.data[15])
}
}
fn calculate_timer_divisor(prediv3: u8, div8: u8) -> Option<NonZeroU32> {
let prediv = match prediv3 & 0b11100000 {
0b00000000 => 0,
0b00100000 => 4,
0b01000000 => 10,
0b01100000 => 16,
0b10000000 => 50,
0b10100000 => 64,
0b11000000 => 100,
0b11100000 => 200,
_ => unreachable!()
};
NonZeroU32::new(prediv * div8 as u32)
}
| song_duration | identifier_name |
ym.rs | use core::time::Duration;
use core::num::NonZeroU32;
use core::fmt;
use core::ops::Range;
use chrono::NaiveDateTime;
pub mod flags;
pub mod effects;
mod parse;
mod player;
use flags::*;
use effects::*;
pub const MAX_DD_SAMPLES: usize = 32;
pub const MFP_TIMER_FREQUENCY: u32 = 2_457_600;
const DEFAULT_CHIPSET_FREQUENCY: u32 = 2_000_000;
const DEFAULT_FRAME_FREQUENCY: u16 = 50;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum YmVersion {
Ym2,
Ym3,
Ym4,
Ym5,
Ym6,
}
impl YmVersion {
/// The YM version identifier tag as a string (4 ascii characters).
pub fn tag(self) -> &'static str {
match self {
YmVersion::Ym2 => "YM2!",
YmVersion::Ym3 => "YM3!",
YmVersion::Ym4 => "YM4!",
YmVersion::Ym5 => "YM5!",
YmVersion::Ym6 => "YM6!",
}
}
}
impl fmt::Display for YmVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.tag().fmt(f)
}
}
/// The **YM** music file.
///
/// The YM-file consist of [YmFrame]s that represent the state of the AY/YM chipset registers and
/// contain additional information about special effects.
///
/// Depending on the [YmSong::version] special effects are being encoded differently.
#[derive(Debug, Clone)]
pub struct YmSong {
/// YM-file version.
pub version: YmVersion,
/// The last modification timestamp of the YM-file from the LHA envelope.
pub created: Option<NaiveDateTime>,
/// The song attributes.
pub song_attrs: SongAttributes,
/// The song title or a file name.
pub title: String,
/// The song author.
pub author: String,
/// The comment.
pub comments: String,
/// The number of cycles per second of the AY/YM chipset clock.
pub chipset_frequency: u32,
/// The number of frames played each second.
pub frame_frequency: u16,
/// The loop frame index.
pub loop_frame: u32,
/// The AY/YM state frames.
pub frames: Box<[YmFrame]>,
/// `DIGI-DRUM` samples.
pub dd_samples: Box<[u8]>,
/// `DIGI-DRUM` sample end indexes in [YmSong::dd_samples].
pub dd_samples_ends: [usize;MAX_DD_SAMPLES],
cursor: usize,
voice_effects: [(SidVoice, SinusSid, DigiDrum); 3],
buzzer: SyncBuzzer,
}
/// This type represent the state of the AY/YM chipset registers and contain additional information
/// about special effects.
///
/// ```text
/// X - AY/YM register data.
/// S - Controls special effects.
/// P - Frequency pre-divisor.
/// F - Frequency divisor.
/// - - Unused.
/// ----------------------------------------------------------
/// b7 b6 b5 b4 b3 b2 b1 b0 Register description
/// 0: X X X X X X X X Fine period voice A
/// 1: S S S S X X X X Coarse period voice A
/// 2: X X X X X X X X Fine period voice B
/// 3: S S S S X X X X Coarse period voice B
/// 4: X X X X X X X X Fine period voice C
/// 5: - - - - X X X X Coarse period voice C
/// 6: P P P X X X X X Noise period
/// 7: X X X X X X X X Mixer control
/// 8: P P P X X X X X Volume voice A
/// 9: - - - X X X X X Volume voice B
/// 10: - - - X X X X X Volume voice C
/// 11: X X X X X X X X Envelope fine period
/// 12: X X X X X X X X Envelope coarse period
/// 13: x x x x X X X X Envelope shape
/// ----------------------------------------------------------
/// virtual registers to store extra data for special effects:
/// ----------------------------------------------------------
/// 14: F F F F F F F F Frequency divisor for S in 1
/// 15: F F F F F F F F Frequency divisor for S in 3
/// ```
///
/// The AY/YM `Envelope shape` register is modified only if the value of the 13 frame
/// register is not equal to `0xff`.
///
/// # Special effects
///
/// The frequency of a special effect is encoded as `(2457600 / P) / F`.
///
/// The divisor `F` is an unsigned 8-bit integer.
///
/// The pre-divisor `P` is encoded as:
///
/// |PPP| pre-divisor value|
/// |-----------------------|
/// |000| Timer off |
/// |001| 4 |
/// |010| 10 |
/// |011| 16 |
/// |100| 50 |
/// |101| 64 |
/// |110| 100 |
/// |111| 200 |
///
/// * The pre-divisor `P` in register 6 matches effect controlled by register 1.
/// * The divisor `F` in register 14 matches effect controlled by register 1.
/// * The pre-divisor `P` in register 8 matches effect controlled by register 3.
/// * The divisor `F` in register 15 matches effect controlled by register 3.
///
/// If an effect is active, the additional data resides in `X` bits in the `Volume` register of
/// the relevant voice:
///
/// * For the [`SID voice`][SidVoice] and [`Sinus SID`][SinusSid] effects the 4 lowest `X` bits
/// determine the effect's volume.
/// * For the [`Sync Buzzer`][SyncBuzzer] the 4 lowest `X` bits determine the effect's `Envelope shape`.
/// * For the [`DIGI-DRUM`][DigiDrum] effect the 5 `X` bits determine the played sample number.
/// * The `DIGI-DRUM` sample plays until its end or if it's overridden by another effect.
/// * All other effects are active only for the duration of a single frame.
/// * When the `DIGI-DRUM` is active the volume register from the frame for the relevant voice is being
/// ignored and the relevant voice mixer tone and noise bits are forced to be set.
///
/// The control bits of special effects are interpreted differently depending on the YM-file verion.
///
/// ## YM6!
///
/// The `S` bits in registers 1 and 3 controls any two of the selectable effects:
/// ```text
/// b7 b6 b5 b4
/// - - 0 0 effect disabled
/// - - 0 1 effect active on voice A
/// - - 1 0 effect active on voice B
/// - - 1 1 effect active on voice C
/// 0 0 - - select SID voice effect
/// 0 1 - - select DIGI-DRUM effect
/// 1 0 - - select Sinus SID effect
/// 1 1 - - select Sync Buzzer effect
/// ```
///
/// ## YM4!/YM5!
///
/// The `S` bits in register 1 controls the `SID voice` effect.
/// The `S` bits in register 3 controls the `DIGI-DRUM` effect.
/// ```text
/// b7 b6 b5 b4
/// - - 0 0 effect disabled
/// - - 0 1 effect active on voice A
/// - - 1 0 effect active on voice B
/// - - 1 1 effect active on voice C
/// - 0 - - SID voice timer continues, ignored for DIGI-DRUM
/// - 1 - - SID voice timer restarts, ignored for DIGI-DRUM
///```
///
/// ## YM3!
///
/// There are no special effects in this version.
///
/// ## YM2!
///
/// Only the `DIGI-DRUM` effect is recognized in this format. It is being played on voice C, and | /// uses one of the 40 predefined samples.
///
/// * The effect starts when the highest bit (7) of the `Volume voice C` register (10) is 1.
/// * The sample number is taken from the lowest 7 bits of the `Volume voice C` register (10).
/// * The effect frequency is calculated by `(2457600 / 4) / X`, where `X` is the unsigned 8-bit
/// value stored in the register 12 of the frame.
/// * The value of AY/YM chipset registers 11, 12 and 13 is only written if the value of the
/// frame register 13 is not equal to `0xFF`.
/// * The register 12 of the AY/YM chipset is always being set to `0` in this format.
/// * The register 13 of the AY/YM chipset is always being set to `0x10` in this format.
#[derive(Default, Debug, Clone, Copy)]
pub struct YmFrame {
/// Frame data.
pub data: [u8;16]
}
impl YmSong {
/// Creates a new instance of `YmSong` from the given `frames` and other meta data.
pub fn new(
version: YmVersion,
frames: Box<[YmFrame]>,
loop_frame: u32,
title: String,
created: Option<NaiveDateTime>
) -> YmSong
{
YmSong {
version,
created,
song_attrs: SongAttributes::default(),
title,
author: String::new(),
comments: String::new(),
chipset_frequency: DEFAULT_CHIPSET_FREQUENCY,
frame_frequency: DEFAULT_FRAME_FREQUENCY,
loop_frame,
frames,
dd_samples: Box::new([]),
dd_samples_ends: [0usize;MAX_DD_SAMPLES],
cursor: 0,
voice_effects: Default::default(),
buzzer: Default::default()
}
}
/// Returns `YmSong` with the `author` and `comments` set from the given arguments.
pub fn with_meta(mut self, author: String, comments: String) -> YmSong {
self.author = author;
self.comments = comments;
self
}
/// Returns `YmSong` with the `song_attrs`, `dd_samples` and `dd_samples_ends` set from the given arguments.
pub fn with_samples(
mut self,
song_attrs: SongAttributes,
dd_samples: Box<[u8]>,
dd_samples_ends: [usize;MAX_DD_SAMPLES]
) -> YmSong
{
self.song_attrs = song_attrs;
self.dd_samples = dd_samples;
self.dd_samples_ends = dd_samples_ends;
self
}
/// Returns `YmSong` with the `chipset_frequency` and `frame_frequency` set from the given arguments.
pub fn with_frequency(mut self, chipset_frequency: u32, frame_frequency: u16) -> YmSong {
self.chipset_frequency = chipset_frequency;
self.frame_frequency = frame_frequency;
self
}
/// Returns the song duration.
pub fn song_duration(&self) -> Duration {
let seconds = self.frames.len() as f64 / self.frame_frequency as f64;
Duration::from_secs_f64(seconds)
}
/// Returns the AY/YM chipset clock frequency.
#[inline]
pub fn clock_frequency(&self) -> f32 {
self.chipset_frequency as f32
}
/// Returns the number of AY/YM chipset clock cycles of a single music frame.
pub fn frame_cycles(&self) -> f32 {
self.clock_frequency() / self.frame_frequency as f32
}
/// Calculates the timer interval in clock cycles, from the given `divisor`.
pub fn timer_interval(&self, divisor: NonZeroU32) -> f32 {
let divisor = divisor.get() as f32;
self.clock_frequency() as f32 * divisor / MFP_TIMER_FREQUENCY as f32
}
/// Returns the indicated sample data range in the [YmSong::dd_samples] for the given `sample`.
///
/// # Panics
/// Panics if `sample` value is not below [MAX_DD_SAMPLES].
pub fn sample_data_range(&self, sample: usize) -> Range<usize> {
let end = self.dd_samples_ends[sample];
let start = match sample {
0 => 0,
index => self.dd_samples_ends[index - 1]
};
start..end
}
}
impl YmFrame {
/// Returns special effect control flags from the register 1.
pub fn fx0(&self) -> FxCtrlFlags {
FxCtrlFlags::from_bits_retain(self.data[1])
}
/// Returns special effect control flags from the register 3.
pub fn fx1(&self) -> FxCtrlFlags {
FxCtrlFlags::from_bits_retain(self.data[3])
}
/// Returns the value of the volume register for the indicated `chan`.
///
/// The 2 lowest bits of `chan` indicate the voice channel:
/// ```text
/// b1 b0 voice channel
/// 0 0 A
/// 0 1 B
/// 1 0 C
/// 1 1 invalid (panics in debug mode)
/// ```
pub fn vol(&self, chan: u8) -> u8 {
let chan = chan & 3;
debug_assert_ne!(chan, 3);
self.data[(VOL_A_REG + chan) as usize] & 0x1f
}
/// Calculates the timer divsor for the special effect `fx0`.
pub fn timer_divisor0(&self) -> Option<NonZeroU32> {
calculate_timer_divisor(self.data[6], self.data[14])
}
/// Calculates the timer divsor for the special effect `fx1`.
pub fn timer_divisor1(&self) -> Option<NonZeroU32> {
calculate_timer_divisor(self.data[8], self.data[15])
}
}
fn calculate_timer_divisor(prediv3: u8, div8: u8) -> Option<NonZeroU32> {
let prediv = match prediv3 & 0b11100000 {
0b00000000 => 0,
0b00100000 => 4,
0b01000000 => 10,
0b01100000 => 16,
0b10000000 => 50,
0b10100000 => 64,
0b11000000 => 100,
0b11100000 => 200,
_ => unreachable!()
};
NonZeroU32::new(prediv * div8 as u32)
} | random_line_split | |
coprocessor.rs | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::{result, error};
use std::thread::{self, JoinHandle};
use std::sync::mpsc::{self, Sender, Receiver};
use mio::Token;
use tipb::select::{self, SelectRequest, SelectResponse, Row};
use tipb::schema::IndexInfo;
use protobuf::{Message as PbMsg, RepeatedField};
use byteorder::{BigEndian, ReadBytesExt};
use storage::{Engine, SnapshotStore, engine, txn, mvcc};
use kvproto::kvrpcpb::{Context, LockInfo};
use kvproto::msgpb::{MessageType, Message};
use kvproto::coprocessor::{Request, Response, KeyRange};
use kvproto::errorpb;
use storage::Key;
use util::codec::{Datum, table, datum};
use util::xeval::Evaluator;
use server::{self, SendCh, Msg, ConnData};
pub const REQ_TYPE_SELECT: i64 = 101;
pub const REQ_TYPE_INDEX: i64 = 102;
const DEFAULT_ERROR_CODE: i32 = 1;
quick_error! {
#[derive(Debug)]
pub enum Error {
Region(err: errorpb::Error) {
description("region related failure")
display("region {:?}", err)
}
Locked(l: LockInfo) {
description("key is locked")
display("locked {:?}", l)
}
Other(err: Box<error::Error + Send + Sync>) {
from()
cause(err.as_ref())
description(err.description())
display("unknown error {:?}", err)
}
}
}
pub type Result<T> = result::Result<T, Error>;
impl From<engine::Error> for Error {
fn from(e: engine::Error) -> Error {
match e {
engine::Error::Request(e) => Error::Region(e),
_ => Error::Other(box e),
}
}
}
impl From<txn::Error> for Error {
fn from(e: txn::Error) -> Error {
match e {
txn::Error::Mvcc(mvcc::Error::KeyIsLocked { primary, ts, key }) => {
let mut info = LockInfo::new();
info.set_primary_lock(primary);
info.set_lock_version(ts);
info.set_key(key);
Error::Locked(info)
}
_ => Error::Other(box e),
}
}
}
pub struct RegionEndPoint {
tx: Sender<EndPointMessage>,
handle: Option<JoinHandle<()>>,
}
type ResponseHandler = Box<Fn(Response) -> ()>;
#[derive(Debug)]
enum EndPointMessage {
Job(Request, Token, u64),
Close,
}
fn msg_poller(engine: Arc<Box<Engine>>, rx: Receiver<EndPointMessage>, ch: SendCh) {
info!("EndPoint started.");
let end_point = SnapshotEndPoint::new(engine);
loop {
let msg = rx.recv();
if let Err(e) = msg {
error!("failed to receive job: {:?}", e);
break;
}
let msg = msg.unwrap();
debug!("recv req: {:?}", msg);
match msg {
EndPointMessage::Job(req, token, msg_id) => {
handle_request(req, ch.clone(), token, msg_id, &end_point)
}
EndPointMessage::Close => break,
}
}
info!("EndPoint closing.");
}
impl RegionEndPoint {
pub fn new(engine: Arc<Box<Engine>>, ch: SendCh) -> RegionEndPoint {
let (tx, rx) = mpsc::channel();
let builder = thread::Builder::new().name("EndPoint".to_owned());
let handle = builder.spawn(move || msg_poller(engine, rx, ch)).unwrap();
RegionEndPoint {
tx: tx,
handle: Some(handle),
}
}
pub fn on_request(&self, req: Request, token: Token, msg_id: u64) -> server::Result<()> {
box_try!(self.tx.send(EndPointMessage::Job(req, token, msg_id)));
Ok(())
}
pub fn stop(&mut self) {
if self.handle.is_none() {
return;
}
if let Err(e) = self.tx.send(EndPointMessage::Close) {
error!("failed to ask the coprocessor to stop: {:?}", e);
}
if let Err(e) = self.handle.take().unwrap().join() {
error!("failed to stop the coprocessor: {:?}", e);
}
}
}
fn handle_request(req: Request,
ch: SendCh,
token: Token,
msg_id: u64,
end_point: &SnapshotEndPoint) {
let cb = box move |r| {
let mut resp_msg = Message::new();
resp_msg.set_msg_type(MessageType::CopResp);
resp_msg.set_cop_resp(r);
if let Err(e) = ch.send(Msg::WriteData {
token: token,
data: ConnData::new(msg_id, resp_msg),
}) {
error!("send cop resp failed with token {:?}, msg id {}, err {:?}",
token,
msg_id,
e);
}
};
match req.get_tp() {
REQ_TYPE_SELECT | REQ_TYPE_INDEX => {
let mut sel = SelectRequest::new();
if let Err(e) = sel.merge_from_bytes(req.get_data()) {
on_error(box_err!(e), cb);
return;
}
match end_point.handle_select(req, sel) {
Ok(r) => cb(r),
Err(e) => on_error(e, cb),
}
}
t => on_error(box_err!("unsupported tp {}", t), cb),
}
}
fn on_error(e: Error, cb: ResponseHandler) {
let mut resp = Response::new();
match e {
Error::Region(e) => resp.set_region_error(e),
Error::Locked(info) => resp.set_locked(info),
Error::Other(_) => resp.set_other_error(format!("{}", e)),
}
cb(resp)
}
pub struct SnapshotEndPoint {
engine: Arc<Box<Engine>>,
}
impl SnapshotEndPoint {
pub fn new(engine: Arc<Box<Engine>>) -> SnapshotEndPoint {
// TODO: Spawn a new thread for handling requests asynchronously.
SnapshotEndPoint { engine: engine }
}
fn new_snapshot<'a>(&'a self, ctx: &Context, start_ts: u64) -> Result<SnapshotStore<'a>> {
let snapshot = try!(self.engine.snapshot(ctx));
Ok(SnapshotStore::new(snapshot, start_ts))
}
}
impl SnapshotEndPoint {
pub fn handle_select(&self, mut req: Request, sel: SelectRequest) -> Result<Response> {
let snap = try!(self.new_snapshot(req.get_context(), sel.get_start_ts()));
let range = req.take_ranges().into_vec();
debug!("scanning range: {:?}", range);
let res = if req.get_tp() == REQ_TYPE_SELECT {
get_rows_from_sel(&snap, &sel, range)
} else {
get_rows_from_idx(&snap, &sel, range)
};
let mut resp = Response::new();
let mut sel_resp = SelectResponse::new();
match res {
Ok(rows) => sel_resp.set_rows(RepeatedField::from_vec(rows)),
Err(e) => {
if let Error::Other(_) = e {
// should we handle locked here too?
sel_resp.set_error(to_pb_error(&e));
// TODO add detail error
resp.set_other_error(format!("{}", e));
} else {
// other error should be handle by ti client.
return Err(e);
}
}
}
let data = box_try!(sel_resp.write_to_bytes());
resp.set_data(data);
Ok(resp)
}
}
fn to_pb_error(err: &Error) -> select::Error {
let mut e = select::Error::new();
e.set_code(DEFAULT_ERROR_CODE);
e.set_msg(format!("{}", err));
e
}
fn get_rows_from_sel(snap: &SnapshotStore,
sel: &SelectRequest,
ranges: Vec<KeyRange>)
-> Result<Vec<Row>> {
let mut eval = Evaluator::default();
let mut rows = vec![];
for ran in ranges {
let ran_rows = try!(get_rows_from_range(snap, sel, ran, &mut eval));
rows.extend(ran_rows);
}
Ok(rows)
}
fn prefix_next(key: &[u8]) -> Vec<u8> {
let mut nk = key.to_vec();
if nk.is_empty() {
nk.push(0);
return nk;
}
let mut i = nk.len() - 1;
loop {
if nk[i] == 255 {
nk[i] = 0;
} else {
nk[i] += 1;
return nk;
}
if i == 0 {
nk = key.to_vec();
nk.push(0);
return nk;
}
i -= 1;
}
}
/// `is_point` checks if the key range represents a point.
fn is_point(range: &KeyRange) -> bool {
range.get_end() == &*prefix_next(range.get_start())
}
fn get_rows_from_range(snap: &SnapshotStore,
sel: &SelectRequest,
mut range: KeyRange,
eval: &mut Evaluator)
-> Result<Vec<Row>> {
let mut rows = vec![];
if is_point(&range) {
if let None = try!(snap.get(&Key::from_raw(range.get_start().to_vec()))) {
return Ok(rows);
}
let h = box_try!(table::decode_handle(range.get_start()));
if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) {
rows.push(row);
}
} else {
let mut seek_key = range.take_start();
loop {
trace!("seek {:?}", seek_key);
let mut res = try!(snap.scan(Key::from_raw(seek_key), 1));
if res.is_empty() {
debug!("no more data to scan.");
break;
}
let (key, _) = try!(res.pop().unwrap());
if range.get_end() <= &key {
debug!("reach end key: {:?} >= {:?}", key, range.get_end());
break;
}
let h = box_try!(table::decode_handle(&key));
if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) {
rows.push(row);
}
seek_key = prefix_next(&key);
}
}
Ok(rows)
}
fn get_row_by_handle(snap: &SnapshotStore,
sel: &SelectRequest,
h: i64,
eval: &mut Evaluator)
-> Result<Option<Row>> {
let tid = sel.get_table_info().get_table_id();
let columns = sel.get_table_info().get_columns();
let mut row = Row::new();
let handle = box_try!(datum::encode_value(&[Datum::I64(h)]));
for col in columns {
if col.get_pk_handle() {
row.mut_data().extend(handle.clone());
} else {
let raw_key = table::encode_column_key(tid, h, col.get_column_id());
let key = Key::from_raw(raw_key);
match try!(snap.get(&key)) {
None => return Err(box_err!("key {:?} not exists", key)),
Some(bs) => row.mut_data().extend(bs),
}
}
}
row.set_handle(handle);
if !sel.has_field_where() {
return Ok(Some(row));
}
trace!("filtering row {:?}", row);
if !row.get_data().is_empty() {
let (datums, _) = box_try!(datum::decode(row.get_data()));
for (c, d) in columns.iter().zip(datums) {
eval.insert(c.get_column_id(), d);
}
}
let res = box_try!(eval.eval(sel.get_field_where()));
if let Datum::Null = res {
trace!("got null, skip.");
return Ok(None);
}
if box_try!(res.as_bool()) {
trace!("pass.");
return Ok(Some(row));
}
trace!("got false, skip.");
Ok(None)
}
fn get_rows_from_idx(snap: &SnapshotStore,
sel: &SelectRequest,
ranges: Vec<KeyRange>)
-> Result<Vec<Row>> {
let mut rows = vec![];
for r in ranges {
let part = try!(get_idx_row_from_range(snap, sel.get_index_info(), r));
rows.extend(part);
}
Ok(rows)
}
fn get_idx_row_from_range(snap: &SnapshotStore,
info: &IndexInfo,
mut r: KeyRange)
-> Result<Vec<Row>> | {
let mut rows = vec![];
let mut seek_key = r.take_start();
loop {
trace!("seek {:?}", seek_key);
let mut nk = try!(snap.scan(Key::from_raw(seek_key.clone()), 1));
if nk.is_empty() {
debug!("no more data to scan");
return Ok(rows);
}
let (key, value) = try!(nk.pop().unwrap());
if r.get_end() <= &key {
debug!("reach end key: {:?} >= {:?}", key, r.get_end());
return Ok(rows);
}
let mut datums = box_try!(table::decode_index_key(&key));
let handle = if datums.len() > info.get_columns().len() {
datums.pop().unwrap()
} else {
let h = box_try!((&*value).read_i64::<BigEndian>());
Datum::I64(h)
};
let data = box_try!(datum::encode_value(&datums));
let handle_data = box_try!(datum::encode_value(&[handle]));
let mut row = Row::new();
row.set_handle(handle_data);
row.set_data(data);
rows.push(row);
seek_key = prefix_next(&key);
}
} | identifier_body | |
coprocessor.rs | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::{result, error};
use std::thread::{self, JoinHandle};
use std::sync::mpsc::{self, Sender, Receiver};
use mio::Token;
use tipb::select::{self, SelectRequest, SelectResponse, Row};
use tipb::schema::IndexInfo;
use protobuf::{Message as PbMsg, RepeatedField};
use byteorder::{BigEndian, ReadBytesExt};
use storage::{Engine, SnapshotStore, engine, txn, mvcc};
use kvproto::kvrpcpb::{Context, LockInfo};
use kvproto::msgpb::{MessageType, Message};
use kvproto::coprocessor::{Request, Response, KeyRange};
use kvproto::errorpb;
use storage::Key;
use util::codec::{Datum, table, datum};
use util::xeval::Evaluator;
use server::{self, SendCh, Msg, ConnData};
pub const REQ_TYPE_SELECT: i64 = 101;
pub const REQ_TYPE_INDEX: i64 = 102;
const DEFAULT_ERROR_CODE: i32 = 1;
quick_error! {
#[derive(Debug)]
pub enum Error {
Region(err: errorpb::Error) {
description("region related failure")
display("region {:?}", err)
}
Locked(l: LockInfo) {
description("key is locked")
display("locked {:?}", l)
}
Other(err: Box<error::Error + Send + Sync>) {
from()
cause(err.as_ref())
description(err.description())
display("unknown error {:?}", err)
}
}
}
pub type Result<T> = result::Result<T, Error>;
impl From<engine::Error> for Error {
fn from(e: engine::Error) -> Error {
match e {
engine::Error::Request(e) => Error::Region(e),
_ => Error::Other(box e),
}
}
}
impl From<txn::Error> for Error {
fn from(e: txn::Error) -> Error {
match e {
txn::Error::Mvcc(mvcc::Error::KeyIsLocked { primary, ts, key }) => {
let mut info = LockInfo::new();
info.set_primary_lock(primary);
info.set_lock_version(ts);
info.set_key(key);
Error::Locked(info)
}
_ => Error::Other(box e),
}
}
}
pub struct RegionEndPoint {
tx: Sender<EndPointMessage>,
handle: Option<JoinHandle<()>>,
}
type ResponseHandler = Box<Fn(Response) -> ()>;
#[derive(Debug)]
enum EndPointMessage {
Job(Request, Token, u64),
Close,
}
fn msg_poller(engine: Arc<Box<Engine>>, rx: Receiver<EndPointMessage>, ch: SendCh) {
info!("EndPoint started.");
let end_point = SnapshotEndPoint::new(engine);
loop {
let msg = rx.recv();
if let Err(e) = msg {
error!("failed to receive job: {:?}", e);
break;
}
let msg = msg.unwrap();
debug!("recv req: {:?}", msg);
match msg {
EndPointMessage::Job(req, token, msg_id) => {
handle_request(req, ch.clone(), token, msg_id, &end_point)
}
EndPointMessage::Close => break,
}
}
info!("EndPoint closing.");
}
impl RegionEndPoint {
pub fn new(engine: Arc<Box<Engine>>, ch: SendCh) -> RegionEndPoint {
let (tx, rx) = mpsc::channel();
let builder = thread::Builder::new().name("EndPoint".to_owned());
let handle = builder.spawn(move || msg_poller(engine, rx, ch)).unwrap();
RegionEndPoint {
tx: tx,
handle: Some(handle),
}
}
pub fn on_request(&self, req: Request, token: Token, msg_id: u64) -> server::Result<()> {
box_try!(self.tx.send(EndPointMessage::Job(req, token, msg_id)));
Ok(())
}
pub fn stop(&mut self) {
if self.handle.is_none() {
return;
}
if let Err(e) = self.tx.send(EndPointMessage::Close) {
error!("failed to ask the coprocessor to stop: {:?}", e);
}
if let Err(e) = self.handle.take().unwrap().join() {
error!("failed to stop the coprocessor: {:?}", e);
}
}
}
fn handle_request(req: Request,
ch: SendCh,
token: Token,
msg_id: u64,
end_point: &SnapshotEndPoint) {
let cb = box move |r| {
let mut resp_msg = Message::new();
resp_msg.set_msg_type(MessageType::CopResp);
resp_msg.set_cop_resp(r);
if let Err(e) = ch.send(Msg::WriteData {
token: token,
data: ConnData::new(msg_id, resp_msg),
}) {
error!("send cop resp failed with token {:?}, msg id {}, err {:?}",
token,
msg_id,
e);
}
};
match req.get_tp() {
REQ_TYPE_SELECT | REQ_TYPE_INDEX => {
let mut sel = SelectRequest::new();
if let Err(e) = sel.merge_from_bytes(req.get_data()) {
on_error(box_err!(e), cb);
return;
}
match end_point.handle_select(req, sel) {
Ok(r) => cb(r),
Err(e) => on_error(e, cb),
}
}
t => on_error(box_err!("unsupported tp {}", t), cb),
}
}
fn on_error(e: Error, cb: ResponseHandler) {
let mut resp = Response::new();
match e {
Error::Region(e) => resp.set_region_error(e),
Error::Locked(info) => resp.set_locked(info),
Error::Other(_) => resp.set_other_error(format!("{}", e)),
}
cb(resp)
}
pub struct SnapshotEndPoint {
engine: Arc<Box<Engine>>,
}
impl SnapshotEndPoint {
pub fn new(engine: Arc<Box<Engine>>) -> SnapshotEndPoint {
// TODO: Spawn a new thread for handling requests asynchronously.
SnapshotEndPoint { engine: engine }
}
fn new_snapshot<'a>(&'a self, ctx: &Context, start_ts: u64) -> Result<SnapshotStore<'a>> {
let snapshot = try!(self.engine.snapshot(ctx));
Ok(SnapshotStore::new(snapshot, start_ts))
}
}
impl SnapshotEndPoint {
pub fn handle_select(&self, mut req: Request, sel: SelectRequest) -> Result<Response> {
let snap = try!(self.new_snapshot(req.get_context(), sel.get_start_ts()));
let range = req.take_ranges().into_vec();
debug!("scanning range: {:?}", range);
let res = if req.get_tp() == REQ_TYPE_SELECT {
get_rows_from_sel(&snap, &sel, range)
} else {
get_rows_from_idx(&snap, &sel, range)
};
let mut resp = Response::new();
let mut sel_resp = SelectResponse::new();
match res {
Ok(rows) => sel_resp.set_rows(RepeatedField::from_vec(rows)),
Err(e) => {
if let Error::Other(_) = e {
// should we handle locked here too?
sel_resp.set_error(to_pb_error(&e));
// TODO add detail error
resp.set_other_error(format!("{}", e));
} else {
// other error should be handle by ti client.
return Err(e);
}
}
}
let data = box_try!(sel_resp.write_to_bytes());
resp.set_data(data);
Ok(resp)
}
}
fn to_pb_error(err: &Error) -> select::Error {
let mut e = select::Error::new();
e.set_code(DEFAULT_ERROR_CODE);
e.set_msg(format!("{}", err));
e
}
fn get_rows_from_sel(snap: &SnapshotStore,
sel: &SelectRequest,
ranges: Vec<KeyRange>)
-> Result<Vec<Row>> {
let mut eval = Evaluator::default();
let mut rows = vec![];
for ran in ranges {
let ran_rows = try!(get_rows_from_range(snap, sel, ran, &mut eval));
rows.extend(ran_rows);
}
Ok(rows)
}
fn prefix_next(key: &[u8]) -> Vec<u8> {
let mut nk = key.to_vec();
if nk.is_empty() {
nk.push(0);
return nk;
}
let mut i = nk.len() - 1;
loop {
if nk[i] == 255 {
nk[i] = 0;
} else {
nk[i] += 1;
return nk;
}
if i == 0 {
nk = key.to_vec();
nk.push(0);
return nk;
}
i -= 1;
}
}
/// `is_point` checks if the key range represents a point.
fn is_point(range: &KeyRange) -> bool {
range.get_end() == &*prefix_next(range.get_start())
}
fn get_rows_from_range(snap: &SnapshotStore,
sel: &SelectRequest,
mut range: KeyRange,
eval: &mut Evaluator)
-> Result<Vec<Row>> {
let mut rows = vec![];
if is_point(&range) {
if let None = try!(snap.get(&Key::from_raw(range.get_start().to_vec()))) {
return Ok(rows);
}
let h = box_try!(table::decode_handle(range.get_start()));
if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) {
rows.push(row);
}
} else {
let mut seek_key = range.take_start();
loop {
trace!("seek {:?}", seek_key);
let mut res = try!(snap.scan(Key::from_raw(seek_key), 1));
if res.is_empty() {
debug!("no more data to scan.");
break;
}
let (key, _) = try!(res.pop().unwrap());
if range.get_end() <= &key {
debug!("reach end key: {:?} >= {:?}", key, range.get_end());
break;
}
let h = box_try!(table::decode_handle(&key));
if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) {
rows.push(row);
}
seek_key = prefix_next(&key);
}
}
Ok(rows)
}
fn get_row_by_handle(snap: &SnapshotStore,
sel: &SelectRequest,
h: i64,
eval: &mut Evaluator)
-> Result<Option<Row>> {
let tid = sel.get_table_info().get_table_id();
let columns = sel.get_table_info().get_columns();
let mut row = Row::new();
let handle = box_try!(datum::encode_value(&[Datum::I64(h)]));
for col in columns {
if col.get_pk_handle() {
row.mut_data().extend(handle.clone());
} else {
let raw_key = table::encode_column_key(tid, h, col.get_column_id());
let key = Key::from_raw(raw_key);
match try!(snap.get(&key)) {
None => return Err(box_err!("key {:?} not exists", key)),
Some(bs) => row.mut_data().extend(bs),
}
}
}
row.set_handle(handle);
if !sel.has_field_where() {
return Ok(Some(row));
}
trace!("filtering row {:?}", row);
if !row.get_data().is_empty() {
let (datums, _) = box_try!(datum::decode(row.get_data()));
for (c, d) in columns.iter().zip(datums) {
eval.insert(c.get_column_id(), d);
}
}
let res = box_try!(eval.eval(sel.get_field_where()));
if let Datum::Null = res {
trace!("got null, skip.");
return Ok(None);
}
if box_try!(res.as_bool()) {
trace!("pass.");
return Ok(Some(row));
}
trace!("got false, skip.");
Ok(None)
}
fn get_rows_from_idx(snap: &SnapshotStore,
sel: &SelectRequest,
ranges: Vec<KeyRange>)
-> Result<Vec<Row>> {
let mut rows = vec![];
for r in ranges {
let part = try!(get_idx_row_from_range(snap, sel.get_index_info(), r));
rows.extend(part);
}
Ok(rows)
}
fn get_idx_row_from_range(snap: &SnapshotStore,
info: &IndexInfo,
mut r: KeyRange)
-> Result<Vec<Row>> {
let mut rows = vec![];
let mut seek_key = r.take_start();
loop {
trace!("seek {:?}", seek_key);
let mut nk = try!(snap.scan(Key::from_raw(seek_key.clone()), 1));
if nk.is_empty() {
debug!("no more data to scan");
return Ok(rows);
}
let (key, value) = try!(nk.pop().unwrap());
if r.get_end() <= &key {
debug!("reach end key: {:?} >= {:?}", key, r.get_end()); | } else {
let h = box_try!((&*value).read_i64::<BigEndian>());
Datum::I64(h)
};
let data = box_try!(datum::encode_value(&datums));
let handle_data = box_try!(datum::encode_value(&[handle]));
let mut row = Row::new();
row.set_handle(handle_data);
row.set_data(data);
rows.push(row);
seek_key = prefix_next(&key);
}
} | return Ok(rows);
}
let mut datums = box_try!(table::decode_index_key(&key));
let handle = if datums.len() > info.get_columns().len() {
datums.pop().unwrap() | random_line_split |
coprocessor.rs | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use std::{result, error};
use std::thread::{self, JoinHandle};
use std::sync::mpsc::{self, Sender, Receiver};
use mio::Token;
use tipb::select::{self, SelectRequest, SelectResponse, Row};
use tipb::schema::IndexInfo;
use protobuf::{Message as PbMsg, RepeatedField};
use byteorder::{BigEndian, ReadBytesExt};
use storage::{Engine, SnapshotStore, engine, txn, mvcc};
use kvproto::kvrpcpb::{Context, LockInfo};
use kvproto::msgpb::{MessageType, Message};
use kvproto::coprocessor::{Request, Response, KeyRange};
use kvproto::errorpb;
use storage::Key;
use util::codec::{Datum, table, datum};
use util::xeval::Evaluator;
use server::{self, SendCh, Msg, ConnData};
pub const REQ_TYPE_SELECT: i64 = 101;
pub const REQ_TYPE_INDEX: i64 = 102;
const DEFAULT_ERROR_CODE: i32 = 1;
quick_error! {
#[derive(Debug)]
pub enum Error {
Region(err: errorpb::Error) {
description("region related failure")
display("region {:?}", err)
}
Locked(l: LockInfo) {
description("key is locked")
display("locked {:?}", l)
}
Other(err: Box<error::Error + Send + Sync>) {
from()
cause(err.as_ref())
description(err.description())
display("unknown error {:?}", err)
}
}
}
pub type Result<T> = result::Result<T, Error>;
impl From<engine::Error> for Error {
fn from(e: engine::Error) -> Error {
match e {
engine::Error::Request(e) => Error::Region(e),
_ => Error::Other(box e),
}
}
}
impl From<txn::Error> for Error {
fn from(e: txn::Error) -> Error {
match e {
txn::Error::Mvcc(mvcc::Error::KeyIsLocked { primary, ts, key }) => {
let mut info = LockInfo::new();
info.set_primary_lock(primary);
info.set_lock_version(ts);
info.set_key(key);
Error::Locked(info)
}
_ => Error::Other(box e),
}
}
}
pub struct RegionEndPoint {
tx: Sender<EndPointMessage>,
handle: Option<JoinHandle<()>>,
}
type ResponseHandler = Box<Fn(Response) -> ()>;
#[derive(Debug)]
enum EndPointMessage {
Job(Request, Token, u64),
Close,
}
fn msg_poller(engine: Arc<Box<Engine>>, rx: Receiver<EndPointMessage>, ch: SendCh) {
info!("EndPoint started.");
let end_point = SnapshotEndPoint::new(engine);
loop {
let msg = rx.recv();
if let Err(e) = msg {
error!("failed to receive job: {:?}", e);
break;
}
let msg = msg.unwrap();
debug!("recv req: {:?}", msg);
match msg {
EndPointMessage::Job(req, token, msg_id) => {
handle_request(req, ch.clone(), token, msg_id, &end_point)
}
EndPointMessage::Close => break,
}
}
info!("EndPoint closing.");
}
impl RegionEndPoint {
pub fn new(engine: Arc<Box<Engine>>, ch: SendCh) -> RegionEndPoint {
let (tx, rx) = mpsc::channel();
let builder = thread::Builder::new().name("EndPoint".to_owned());
let handle = builder.spawn(move || msg_poller(engine, rx, ch)).unwrap();
RegionEndPoint {
tx: tx,
handle: Some(handle),
}
}
pub fn on_request(&self, req: Request, token: Token, msg_id: u64) -> server::Result<()> {
box_try!(self.tx.send(EndPointMessage::Job(req, token, msg_id)));
Ok(())
}
pub fn stop(&mut self) {
if self.handle.is_none() {
return;
}
if let Err(e) = self.tx.send(EndPointMessage::Close) {
error!("failed to ask the coprocessor to stop: {:?}", e);
}
if let Err(e) = self.handle.take().unwrap().join() {
error!("failed to stop the coprocessor: {:?}", e);
}
}
}
fn | (req: Request,
ch: SendCh,
token: Token,
msg_id: u64,
end_point: &SnapshotEndPoint) {
let cb = box move |r| {
let mut resp_msg = Message::new();
resp_msg.set_msg_type(MessageType::CopResp);
resp_msg.set_cop_resp(r);
if let Err(e) = ch.send(Msg::WriteData {
token: token,
data: ConnData::new(msg_id, resp_msg),
}) {
error!("send cop resp failed with token {:?}, msg id {}, err {:?}",
token,
msg_id,
e);
}
};
match req.get_tp() {
REQ_TYPE_SELECT | REQ_TYPE_INDEX => {
let mut sel = SelectRequest::new();
if let Err(e) = sel.merge_from_bytes(req.get_data()) {
on_error(box_err!(e), cb);
return;
}
match end_point.handle_select(req, sel) {
Ok(r) => cb(r),
Err(e) => on_error(e, cb),
}
}
t => on_error(box_err!("unsupported tp {}", t), cb),
}
}
fn on_error(e: Error, cb: ResponseHandler) {
let mut resp = Response::new();
match e {
Error::Region(e) => resp.set_region_error(e),
Error::Locked(info) => resp.set_locked(info),
Error::Other(_) => resp.set_other_error(format!("{}", e)),
}
cb(resp)
}
pub struct SnapshotEndPoint {
engine: Arc<Box<Engine>>,
}
impl SnapshotEndPoint {
pub fn new(engine: Arc<Box<Engine>>) -> SnapshotEndPoint {
// TODO: Spawn a new thread for handling requests asynchronously.
SnapshotEndPoint { engine: engine }
}
fn new_snapshot<'a>(&'a self, ctx: &Context, start_ts: u64) -> Result<SnapshotStore<'a>> {
let snapshot = try!(self.engine.snapshot(ctx));
Ok(SnapshotStore::new(snapshot, start_ts))
}
}
impl SnapshotEndPoint {
pub fn handle_select(&self, mut req: Request, sel: SelectRequest) -> Result<Response> {
let snap = try!(self.new_snapshot(req.get_context(), sel.get_start_ts()));
let range = req.take_ranges().into_vec();
debug!("scanning range: {:?}", range);
let res = if req.get_tp() == REQ_TYPE_SELECT {
get_rows_from_sel(&snap, &sel, range)
} else {
get_rows_from_idx(&snap, &sel, range)
};
let mut resp = Response::new();
let mut sel_resp = SelectResponse::new();
match res {
Ok(rows) => sel_resp.set_rows(RepeatedField::from_vec(rows)),
Err(e) => {
if let Error::Other(_) = e {
// should we handle locked here too?
sel_resp.set_error(to_pb_error(&e));
// TODO add detail error
resp.set_other_error(format!("{}", e));
} else {
// other error should be handle by ti client.
return Err(e);
}
}
}
let data = box_try!(sel_resp.write_to_bytes());
resp.set_data(data);
Ok(resp)
}
}
fn to_pb_error(err: &Error) -> select::Error {
let mut e = select::Error::new();
e.set_code(DEFAULT_ERROR_CODE);
e.set_msg(format!("{}", err));
e
}
fn get_rows_from_sel(snap: &SnapshotStore,
sel: &SelectRequest,
ranges: Vec<KeyRange>)
-> Result<Vec<Row>> {
let mut eval = Evaluator::default();
let mut rows = vec![];
for ran in ranges {
let ran_rows = try!(get_rows_from_range(snap, sel, ran, &mut eval));
rows.extend(ran_rows);
}
Ok(rows)
}
fn prefix_next(key: &[u8]) -> Vec<u8> {
let mut nk = key.to_vec();
if nk.is_empty() {
nk.push(0);
return nk;
}
let mut i = nk.len() - 1;
loop {
if nk[i] == 255 {
nk[i] = 0;
} else {
nk[i] += 1;
return nk;
}
if i == 0 {
nk = key.to_vec();
nk.push(0);
return nk;
}
i -= 1;
}
}
/// `is_point` checks if the key range represents a point.
fn is_point(range: &KeyRange) -> bool {
range.get_end() == &*prefix_next(range.get_start())
}
fn get_rows_from_range(snap: &SnapshotStore,
sel: &SelectRequest,
mut range: KeyRange,
eval: &mut Evaluator)
-> Result<Vec<Row>> {
let mut rows = vec![];
if is_point(&range) {
if let None = try!(snap.get(&Key::from_raw(range.get_start().to_vec()))) {
return Ok(rows);
}
let h = box_try!(table::decode_handle(range.get_start()));
if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) {
rows.push(row);
}
} else {
let mut seek_key = range.take_start();
loop {
trace!("seek {:?}", seek_key);
let mut res = try!(snap.scan(Key::from_raw(seek_key), 1));
if res.is_empty() {
debug!("no more data to scan.");
break;
}
let (key, _) = try!(res.pop().unwrap());
if range.get_end() <= &key {
debug!("reach end key: {:?} >= {:?}", key, range.get_end());
break;
}
let h = box_try!(table::decode_handle(&key));
if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) {
rows.push(row);
}
seek_key = prefix_next(&key);
}
}
Ok(rows)
}
fn get_row_by_handle(snap: &SnapshotStore,
sel: &SelectRequest,
h: i64,
eval: &mut Evaluator)
-> Result<Option<Row>> {
let tid = sel.get_table_info().get_table_id();
let columns = sel.get_table_info().get_columns();
let mut row = Row::new();
let handle = box_try!(datum::encode_value(&[Datum::I64(h)]));
for col in columns {
if col.get_pk_handle() {
row.mut_data().extend(handle.clone());
} else {
let raw_key = table::encode_column_key(tid, h, col.get_column_id());
let key = Key::from_raw(raw_key);
match try!(snap.get(&key)) {
None => return Err(box_err!("key {:?} not exists", key)),
Some(bs) => row.mut_data().extend(bs),
}
}
}
row.set_handle(handle);
if !sel.has_field_where() {
return Ok(Some(row));
}
trace!("filtering row {:?}", row);
if !row.get_data().is_empty() {
let (datums, _) = box_try!(datum::decode(row.get_data()));
for (c, d) in columns.iter().zip(datums) {
eval.insert(c.get_column_id(), d);
}
}
let res = box_try!(eval.eval(sel.get_field_where()));
if let Datum::Null = res {
trace!("got null, skip.");
return Ok(None);
}
if box_try!(res.as_bool()) {
trace!("pass.");
return Ok(Some(row));
}
trace!("got false, skip.");
Ok(None)
}
fn get_rows_from_idx(snap: &SnapshotStore,
sel: &SelectRequest,
ranges: Vec<KeyRange>)
-> Result<Vec<Row>> {
let mut rows = vec![];
for r in ranges {
let part = try!(get_idx_row_from_range(snap, sel.get_index_info(), r));
rows.extend(part);
}
Ok(rows)
}
fn get_idx_row_from_range(snap: &SnapshotStore,
info: &IndexInfo,
mut r: KeyRange)
-> Result<Vec<Row>> {
let mut rows = vec![];
let mut seek_key = r.take_start();
loop {
trace!("seek {:?}", seek_key);
let mut nk = try!(snap.scan(Key::from_raw(seek_key.clone()), 1));
if nk.is_empty() {
debug!("no more data to scan");
return Ok(rows);
}
let (key, value) = try!(nk.pop().unwrap());
if r.get_end() <= &key {
debug!("reach end key: {:?} >= {:?}", key, r.get_end());
return Ok(rows);
}
let mut datums = box_try!(table::decode_index_key(&key));
let handle = if datums.len() > info.get_columns().len() {
datums.pop().unwrap()
} else {
let h = box_try!((&*value).read_i64::<BigEndian>());
Datum::I64(h)
};
let data = box_try!(datum::encode_value(&datums));
let handle_data = box_try!(datum::encode_value(&[handle]));
let mut row = Row::new();
row.set_handle(handle_data);
row.set_data(data);
rows.push(row);
seek_key = prefix_next(&key);
}
}
| handle_request | identifier_name |
ReviewIntangibleAssets.js | //资产基础信息维护列表
//所有元素选择器
var selector = {
$grid: function () { return $("#jqxTable") },
$btnSearch: function () { return $("#btnSearch") },
$btnReset: function () { return $("#btnReset") },
$EditPermission: function () { return $("#EditPermission") }
}; //selector end
var isEdit = false;
var vguid = "";
var $page = function () {
this.init = function () {
addEvent();
var arr = [];
var d = new Date;
d.setMonth(d.getMonth() + 1);
for (var i = 0; i < 3; i++) {
debugger;
| ength == 0) {
arr.push("Default");
}
var dataAdapter = new $.jqx.dataAdapter(arr);
$("#SubmitYearMonth").jqxComboBox({ selectedIndex: 0, source: dataAdapter, width: 198, height: 33 });
$("#SubmitYearMonth").jqxComboBox({ itemHeight: 33 });
$("#SubmitYearMonth input").click(function () {
$("#SubmitYearMonth").jqxComboBox('clearSelection');
})
$("#dropdownlistWrapperSubmitYearMonth Input")[0].style.paddingLeft = "10px";
}
//所有事件
function addEvent() {
//加载列表数据
initTable();
selector.$btnSearch().unbind("click").on("click", function () {
initTable();
});
//重置按钮事件
selector.$btnReset().on("click", function () {
$("#OSNO").val("");
});
$("#CreditDialog_OKBtn").on("click",
function () {
$("#CreditDialog").modal("hide");
}
);
//填写信息后提交,调用清算平台、待付款请求生成支付凭证接口
//先调用接口,成功后再提交
$("#btnSubmit").on("click", function () {
var selection = [];
var grid = $("#jqxTable");
var checedBoxs = grid.find(".jqx_datatable_checkbox:checked");
checedBoxs.each(function () {
var th = $(this);
if (th.is(":checked")) {
var index = th.attr("index");
var data = grid.jqxDataTable('getRows')[index];
selection.push(data.VGUID);
}
});
if (selection.length < 1) {
jqxNotification("请选择数据!", null, "error");
} else {
$("#SubmitAssetReviewDialog").modal("show");
}
});
$("#SubmitAssetReviewDialog_OKBtn").on("click", function () {
var selection = [];
var grid = $("#jqxTable");
var checedBoxs = grid.find(".jqx_datatable_checkbox:checked");
checedBoxs.each(function () {
var th = $(this);
if (th.is(":checked")) {
var index = th.attr("index");
var data = grid.jqxDataTable('getRows')[index];
selection.push(data.VGUID);
}
});
if (selection.length < 1) {
jqxNotification("请选择数据!", null, "error");
} else {
SubmitTaxFeeOrder(selection);
}
});
$("#SubmitAssetReviewDialog_CancelBtn").on("click",
function () {
$("#SubmitAssetReviewDialog").modal("hide");
}
);
}; //addEvent end
function SubmitTaxFeeOrder(selection) {
$.ajax({
url: "/AssetManagement/ReviewIntangibleAssets/SubmitIntangibleAssets",
data: { vguids: selection },
type: "post",
success: function (msg) {
switch (msg.Status) {
case "0":
jqxNotification("提交失败!", null, "error");
break;
case "1":
jqxNotification("提交成功!", null, "success");
document.getElementById('ifrPrint').src = msg.ResultInfo;
$("#CreditDialog").modal("show");
$("#jqxTable").jqxDataTable('updateBoundData');
break;
case "2":
jqxNotification(msg.ResultInfo, null, "error");
break;
}
}
});
}
function initTable() {
//var DateEnd = $("#TransactionDateEnd").val(); "AccountingPeriod": $("#AccountingPeriod").val("")
var source =
{
datafields:
[
{ name: "checkbox", type: null },
{ name: 'VGUID', type: 'string' },
{ name: 'OrderNumber', type: 'string' },
{ name: 'PayItem', type: 'string' },
{ name: 'VehicleModel', type: 'string' },
{ name: 'PaymentInformation', type: 'string' },
{ name: 'OrderQuantity', type: 'number' },
{ name: 'UnitPrice', type: 'float' },
{ name: 'SumPayment', type: 'float' },
{ name: 'PurchaseDescription', type: 'string' },
{ name: 'PaymentDate', type: 'date' },
{ name: 'BankStatus', type: 'string' },
{ name: 'ContractName', type: 'string' },
{ name: 'ContractFilePath', type: 'string' },
{ name: 'PayType', type: 'string' },
{ name: 'PayCompany', type: 'string' },
{ name: 'OSNO', type: 'string' },
{ name: 'SubmitStatus', type: 'number' },
{ name: 'PaymentVoucherVguid', type: 'string' },
{ name: 'CreateDate', type: 'date' },
{ name: 'ChangeDate', type: 'date' },
{ name: 'CreateUser', type: 'string' },
{ name: 'ChangeUser', type: 'string' }
],
datatype: "json",
id: "VGUID",
data: { "OSNO": $("#OSNO").val()},
url: "/AssetManagement/ReviewIntangibleAssets/GetIntangibleAssetsOrderListDatas" //获取数据源的路径
};
var typeAdapter = new $.jqx.dataAdapter(source, {
downloadComplete: function (data) {
source.totalrecords = data.TotalRows;
}
});
//创建卡信息列表(主表)
selector.$grid().jqxDataTable(
{
pageable: true,
width: "100%",
height: 400,
pageSize: 10,
serverProcessing: true,
pagerButtonsCount: 10,
source: typeAdapter,
theme: "office",
columnsHeight: 40,
columns: [
{ text: "", datafield: "checkbox", width: 35, pinned: true, align: 'center', cellsAlign: 'center', cellsRenderer: cellsRendererFunc, renderer: rendererFunc, rendered: renderedFunc, autoRowHeight: false },
{ text: '采购编号', datafield: 'OrderNumber', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '订单编号', datafield: 'OSNO', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款项目', datafield: 'PayItem', width: 300, align: 'center', cellsAlign: 'center' },
{ text: '供应商名称', datafield: 'PaymentInformation', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '数量', datafield: 'OrderQuantity', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '单价', datafield: 'UnitPrice', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '合同金额', datafield: 'SumPayment', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '采购说明', datafield: 'PurchaseDescription', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款方式', datafield: 'PayType', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款公司', datafield: 'PayCompany', width: 200, align: 'center', cellsAlign: 'center' },
{ text: '创建时间', datafield: 'CreateDate', width: 100, align: 'center', cellsAlign: 'center', datatype: 'date', cellsformat: "yyyy-MM-dd HH:mm:ss" },
{ text: '创建人', datafield: 'CreateUser', width: 100, align: 'center', cellsAlign: 'center' },
{ text: '修改时间', datafield: 'ChangeDate', width: 100, align: 'center', cellsAlign: 'center', datatype: 'date', cellsformat: "yyyy-MM-dd HH:mm:ss" },
{ text: '修改人', datafield: 'ChangeUser', width: 100, align: 'center', cellsAlign: 'center' },
{ text: '付款项目', datafield: 'PaymentVoucherVguid', width: 300, align: 'center', hidden: true, cellsAlign: 'center' },
{ text: 'VGUID', datafield: 'VGUID', hidden: true }
]
});
}
function cellsRendererFunc(row, column, value, rowData) {
return "<input class=\"jqx_datatable_checkbox\" index=\"" + row + "\" type=\"checkbox\" style=\"margin:auto;width: 17px;height: 17px;\" />";
}
function rendererFunc() {
var checkBox = "<div id='jqx_datatable_checkbox_all' class='jqx_datatable_checkbox_all' style='z-index: 999; margin-left:7px ;margin-top: 7px;'>";
checkBox += "</div>";
return checkBox;
}
function renderedFunc(element) {
var grid = selector.$grid();
element.jqxCheckBox();
element.on('change', function (event) {
var checked = element.jqxCheckBox('checked');
if (checked) {
var rows = grid.jqxDataTable('getRows');
for (var i = 0; i < rows.length; i++) {
grid.jqxDataTable('selectRow', i);
grid.find(".jqx_datatable_checkbox").attr("checked", "checked")
}
} else {
grid.jqxDataTable('clearSelection');
grid.find(".jqx_datatable_checkbox").removeAttr("checked", "checked")
}
});
return true;
}
};
$(function () {
var page = new $page();
page.init();
});
| var m = d.getMonth() - i;
var y = d.getFullYear();
if (m <= 0) {
m = m + 12;
y = y - 1;
}
m = (m < 10 ? "0" + m : m);
arr.push(y.toString() + "-" + m.toString());
}
debugger;
if (arr.l | conditional_block |
ReviewIntangibleAssets.js | //资产基础信息维护列表
//所有元素选择器
var selector = {
$grid: function () { return $("#jqxTable") },
$btnSearch: function () { return $("#btnSearch") },
$btnReset: function () { return $("#btnReset") },
$EditPermission: function () { return $("#EditPermission") }
}; //selector end
var isEdit = false;
var vguid = "";
var $page = function () {
this.init = function () {
addEvent();
var arr = [];
var d = new Date;
d.setMonth(d.getMonth() + 1);
for (var i = 0; i < 3; i++) {
debugger;
var m = d.getMonth() - i;
var y = d.getFullYear();
if (m <= 0) {
m = m + 12;
y = y - 1;
}
m = (m < 10 ? "0" + m : m);
arr.push(y.toString() + "-" + m.toString());
}
debugger;
if (arr.length == 0) {
arr.push("Default");
}
var dataAdapter = new $.jqx.dataAdapter(arr);
$("#SubmitYearMonth").jqxComboBox({ selectedIndex: 0, source: dataAdapter, width: 198, height: 33 });
$("#SubmitYearMonth").jqxComboBox({ itemHeight: 33 });
$("#SubmitYearMonth input").click(function () {
$("#SubmitYearMonth").jqxComboBox('clearSelection');
})
$("#dropdownlistWrapperSubmitYearMonth Input")[0].style.paddingLeft = "10px";
}
//所有事件
function addEvent() {
//加载列表数据
initTable();
selector.$btnSearch().unbind("click").on("click", function () {
initTable();
});
//重置按钮事件
selector.$btnReset().on("click", function () {
$("#OSNO").val("");
});
$("#CreditDialog_OKBtn").on("click",
function () {
$("#CreditDialog").modal("hide");
}
);
//填写信息后提交,调用清算平台、待付款请求生成支付凭证接口
//先调用接口,成功后再提交
$("#btnSubmit").on("click", function () {
var selection = [];
var grid = $("#jqxTable");
var checedBoxs = grid.find(".jqx_datatable_checkbox:checked");
checedBoxs.each(function () {
var th = $(this);
if (th.is(":checked")) {
var index = th.attr("index");
var data = grid.jqxDataTable('getRows')[index];
selection.push(data.VGUID);
}
});
if (selection.length < 1) {
jqxNotification("请选择数据!", null, "error");
} else {
$("#SubmitAssetReviewDialog").modal("show");
}
});
$("#SubmitAssetReviewDialog_OKBtn").on("click", function () {
var selection = [];
var grid = $("#jqxTable");
var checedBoxs = grid.find(".jqx_datatable_checkbox:checked");
checedBoxs.each(function () {
var th = $(this);
if (th.is(":checked")) {
var index = th.attr("index");
var data = grid.jqxDataTable('getRows')[index];
selection.push(data.VGUID);
}
});
if (selection.length < 1) {
jqxNotification("请选择数据!", null, "error");
} else {
SubmitTaxFeeOrder(selection);
}
});
$("#SubmitAssetReviewDialog_CancelBtn").on("click",
function () {
$("#SubmitAssetReviewDialog").modal("hide");
}
);
}; //addEvent end
function SubmitTaxFeeOrder(selection) {
$.ajax({
url: "/AssetManagement/ReviewIntangibleAssets/SubmitIntangibleAssets",
data: { vguids: selection },
type: "post",
success: function (msg) {
switch (msg.Status) {
case "0":
jqxNotification("提交失败!", null, "error");
break;
case "1":
jqxNotification("提交成功!", null, "success");
document.getElementById('ifrPrint').src = msg.ResultInfo;
$("#CreditDialog").modal("show");
$("#jqxTable").jqxDataTable('updateBoundData');
break;
case "2":
jqxNotification(msg.ResultInfo, null, "error");
break;
}
}
});
}
function initTable() {
//var DateEnd = $("#TransactionDateEnd").val(); "AccountingPeriod": $("#AccountingPeriod").val("")
var source =
{
datafields:
[
{ name: "checkbox", type: null },
{ name: 'VGUID', type: 'string' },
{ name: 'OrderNumber', type: 'string' },
{ name: 'PayItem', type: 'string' },
{ name: 'VehicleModel', type: 'string' },
{ name: 'PaymentInformation', type: 'string' },
{ name: 'OrderQuantity', type: 'number' },
{ name: 'UnitPrice', type: 'float' },
{ name: 'SumPayment', type: 'float' },
{ name: 'PurchaseDescription', type: 'string' },
{ name: 'PaymentDate', type: 'date' },
{ name: 'BankStatus', type: 'string' },
{ name: 'ContractName', type: 'string' },
{ name: 'ContractFilePath', type: 'string' },
{ name: 'PayType', type: 'string' },
{ name: 'PayCompany', type: 'string' },
{ name: 'OSNO', type: 'string' },
{ name: 'SubmitStatus', type: 'number' },
{ name: 'PaymentVoucherVguid', type: 'string' },
{ name: 'CreateDate', type: 'date' },
{ name: 'ChangeDate', type: 'date' },
{ name: 'CreateUser', type: 'string' },
{ name: 'ChangeUser', type: 'string' }
],
datatype: "json",
id: "VGUID",
data: { "OSNO": $("#OSNO").val()},
url: "/AssetManagement/ReviewIntangibleAssets/GetIntangibleAssetsOrderListDatas" //获取数据源的路径
};
var typeAdapter = new $.jqx.dataAdapter(source, {
downloadComplete: function (data) {
source.totalrecords = data.TotalRows;
}
});
//创建卡信息列表(主表)
selector.$grid().jqxDataTable(
{
pageable: true,
width: "100%",
height: 400,
pageSize: 10,
serverProcessing: true,
pagerButtonsCount: 10,
source: typeAdapter,
theme: "office",
columnsHeight: 40,
columns: [
{ text: "", datafield: "checkbox", width: 35, pinned: true, align: 'center', cellsAlign: 'center', cellsRenderer: cellsRendererFunc, renderer: rendererFunc, rendered: renderedFunc, autoRowHeight: false },
{ text: '采购编号', datafield: 'OrderNumber', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '订单编号', datafield: 'OSNO', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款项目', datafield: 'PayItem', width: 300, align: 'center', cellsAlign: 'center' },
{ text: '供应商名称', datafield: 'PaymentInformation', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '数量', datafield: 'OrderQuantity', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '单价', datafield: 'UnitPrice', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '合同金额', datafield: 'SumPayment', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '采购说明', datafield: 'PurchaseDescription', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款方式', datafield: 'PayType', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款公司', datafield: 'PayCompany', width: 200, align: 'center', cellsAlign: 'center' },
{ text: '创建时间', datafield: 'CreateDate', width: 100, align: 'center', cellsAlign: 'center', datatype: 'date', cellsformat: "yyyy-MM-dd HH:mm:ss" },
{ text: '创建人', datafield: 'CreateUser', width: 100, align: 'center', cellsAlign: 'center' },
{ text: '修改时间', datafield: 'ChangeDate', width: 100, align: 'center', cellsAlign: 'center', datatype: 'date', cellsformat: "yyyy-MM-dd HH:mm:ss" },
{ text: '修改人', datafield: 'ChangeUser', width: 100, align: 'center', cellsAlign: 'center' },
{ text: '付款项目', datafield: 'PaymentVoucherVguid', width: 300, align: 'center', hidden: true, cellsAlign: 'center' },
{ text: 'VGUID', datafield: 'VGUID', hidden: true }
]
});
}
function cellsRendererFunc(row, column, value, rowData) {
return "<input class=\"jqx_datatable_checkbox\" index=\"" + row + "\" type=\"checkbox\" style=\"margin:auto;width: 17px;height: 17px;\" />";
}
function rendererFunc() {
var checkBox = "<div id='jqx_datatable_checkbox_all' class='jqx_datatable_checkbox_all' styl | margin-left:7px ;margin-top: 7px;'>";
checkBox += "</div>";
return checkBox;
}
function renderedFunc(element) {
var grid = selector.$grid();
element.jqxCheckBox();
element.on('change', function (event) {
var checked = element.jqxCheckBox('checked');
if (checked) {
var rows = grid.jqxDataTable('getRows');
for (var i = 0; i < rows.length; i++) {
grid.jqxDataTable('selectRow', i);
grid.find(".jqx_datatable_checkbox").attr("checked", "checked")
}
} else {
grid.jqxDataTable('clearSelection');
grid.find(".jqx_datatable_checkbox").removeAttr("checked", "checked")
}
});
return true;
}
};
$(function () {
var page = new $page();
page.init();
});
| e='z-index: 999; | identifier_name |
ReviewIntangibleAssets.js | //资产基础信息维护列表
//所有元素选择器
var selector = {
$grid: function () { return $("#jqxTable") },
$btnSearch: function () { return $("#btnSearch") },
$btnReset: function () { return $("#btnReset") },
$EditPermission: function () { return $("#EditPermission") }
}; //selector end
var isEdit = false;
var vguid = "";
var $page = function () {
this.init = function () {
addEvent();
var arr = [];
var d = new Date;
d.setMonth(d.getMonth() + 1);
for (var i = 0; i < 3; i++) {
debugger;
var m = d.getMonth() - i;
var y = d.getFullYear();
if (m <= 0) {
m = m + 12;
y = y - 1;
}
m = (m < 10 ? "0" + m : m);
arr.push(y.toString() + "-" + m.toString());
}
debugger;
if (arr.length == 0) {
arr.push("Default");
}
var dataAdapter = new $.jqx.dataAdapter(arr);
$("#SubmitYearMonth").jqxComboBox({ selectedIndex: 0, source: dataAdapter, width: 198, height: 33 });
$("#SubmitYearMonth").jqxComboBox({ itemHeight: 33 });
$("#SubmitYearMonth input").click(function () {
$("#SubmitYearMonth").jqxComboBox('clearSelection');
})
$("#dropdownlistWrapperSubmitYearMonth Input")[0].style.paddingLeft = "10px";
}
//所有事件
function addEvent() {
//加载列表数据
initTable();
selector.$btnSearch().unbind("click").on("click", function () {
initTable();
});
//重置按钮事件
selector.$btnReset().on("click", function () {
$("#OSNO").val("");
});
$("#CreditDialog_OKBtn").on("click",
function () {
$("#CreditDialog").modal("hide");
}
);
//填写信息后提交,调用清算平台、待付款请求生成支付凭证接口
//先调用接口,成功后再提交
$("#btnSubmit").on("click", function () {
var selection = [];
var grid = $("#jqxTable");
var checedBoxs = grid.find(".jqx_datatable_checkbox:checked");
checedBoxs.each(function () {
var th = $(this);
if (th.is(":checked")) {
var index = th.attr("index");
var data = grid.jqxDataTable('getRows')[index];
selection.push(data.VGUID);
}
});
if (selection.length < 1) {
jqxNotification("请选择数据!", null, "error");
} else {
$("#SubmitAssetReviewDialog").modal("show");
}
});
$("#SubmitAssetReviewDialog_OKBtn").on("click", function () {
var selection = [];
var grid = $("#jqxTable");
var checedBoxs = grid.find(".jqx_datatable_checkbox:checked");
checedBoxs.each(function () {
var th = $(this);
if (th.is(":checked")) {
var index = th.attr("index");
var data = grid.jqxDataTable('getRows')[index];
selection.push(data.VGUID);
}
});
if (selection.length < 1) {
jqxNotification("请选择数据!", null, "error");
} else {
SubmitTaxFeeOrder(selection);
}
});
$("#SubmitAssetReviewDialog_CancelBtn").on("click",
function () {
$("#SubmitAssetReviewDialog").modal("hide");
}
);
}; //addEvent end
function SubmitTaxFeeOrder(selection) {
$.ajax({
url: "/AssetManagement/ReviewIntangibleAssets/SubmitIntangibleAssets",
data: { vguids: selection },
type: "post",
success: function (msg) {
switch (msg.Status) {
case "0":
jqxNotification("提交失败!", null, "error");
break;
case "1":
jqxNotification("提交成功!", null, "success");
document.getElementById('ifrPrint').src = msg.ResultInfo;
$("#CreditDialog").modal("show");
$("#jqxTable").jqxDataTable('updateBoundData');
break;
case "2":
jqxNotification(msg.ResultInfo, null, "error");
break;
}
}
});
}
function initTable() {
//var DateEnd = $("#TransactionDateEnd").val(); "AccountingPeriod": $("#AccountingPeriod").val("")
var source =
{
datafields:
[
{ name: "checkbox", type: null },
{ name: 'VGUID', type: 'string' },
{ name: 'OrderNumber', type: 'string' },
{ name: 'PayItem', type: 'string' },
{ name: 'VehicleModel', type: 'string' },
{ name: 'PaymentInformation', type: 'string' },
{ name: 'OrderQuantity', type: 'number' },
{ name: 'UnitPrice', type: 'float' },
{ name: 'SumPayment', type: 'float' },
{ name: 'PurchaseDescription', type: 'string' },
{ name: 'PaymentDate', type: 'date' },
{ name: 'BankStatus', type: 'string' },
{ name: 'ContractName', type: 'string' },
{ name: 'ContractFilePath', type: 'string' },
{ name: 'PayType', type: 'string' },
{ name: 'PayCompany', type: 'string' },
{ name: 'OSNO', type: 'string' },
{ name: 'SubmitStatus', type: 'number' },
{ name: 'PaymentVoucherVguid', type: 'string' },
{ name: 'CreateDate', type: 'date' },
{ name: 'ChangeDate', type: 'date' },
{ name: 'CreateUser', type: 'string' },
{ name: 'ChangeUser', type: 'string' }
],
datatype: "json",
id: "VGUID",
data: { "OSNO": $("#OSNO").val()},
url: "/AssetManagement/ReviewIntangibleAssets/GetIntangibleAssetsOrderListDatas" //获取数据源的路径
};
var typeAdapter = new $.jqx.dataAdapter(source, {
downloadComplete: function (data) {
source.totalrecords = data.TotalRows;
}
});
//创建卡信息列表(主表)
selector.$grid().jqxDataTable(
{
pageable: true,
width: "100%",
height: 400,
pageSize: 10,
serverProcessing: true,
pagerButtonsCount: 10,
source: typeAdapter,
theme: "office",
columnsHeight: 40,
columns: [
{ text: "", datafield: "checkbox", width: 35, pinned: true, align: 'center', cellsAlign: 'center', cellsRenderer: cellsRendererFunc, renderer: rendererFunc, rendered: renderedFunc, autoRowHeight: false },
{ text: '采购编号', datafield: 'OrderNumber', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '订单编号', datafield: 'OSNO', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款项目', datafield: 'PayItem', width: 300, align: 'center', cellsAlign: 'center' },
{ text: '供应商名称', datafield: 'PaymentInformation', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '数量', datafield: 'OrderQuantity', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '单价', datafield: 'UnitPrice', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '合同金额', datafield: 'SumPayment', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '采购说明', datafield: 'PurchaseDescription', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款方式', datafield: 'PayType', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款公司', datafield: 'PayCompany', width: 200, align: 'center', cellsAlign: 'center' },
{ text: '创建时间', datafield: 'CreateDate', width: 100, align: 'center', cellsAlign: 'center', datatype: 'date', cellsformat: "yyyy-MM-dd HH:mm:ss" },
{ text: '创建人', datafield: 'CreateUser', width: 100, align: 'center', cellsAlign: 'center' },
{ text: '修改时间', datafield: 'ChangeDate', width: 100, align: 'center', cellsAlign: 'center', datatype: 'date', cellsformat: "yyyy-MM-dd HH:mm:ss" },
{ text: '修改人', datafield: 'ChangeUser', width: 100, align: 'center', cellsAlign: 'center' },
{ text: '付款项目', datafield: 'PaymentVoucherVguid', width: 300, align: 'center', hidden: true, cellsAlign: 'center' },
{ text: 'VGUID', datafield: 'VGUID', hidden: true }
]
});
}
function cellsRendererFunc(row, column, value, rowData) {
return "<input class=\"jqx_datatable_checkbox\" index=\"" + row + "\" type=\"checkbox\" style=\"margin:auto;width: 17px;height: 17px;\" />";
}
function rendererFunc() {
var checkBox = "<div id='jqx_datatable_checkbox_all' class='jqx_datatable_checkbox_all' style='z-index: 999; margin-left:7px ;margin-top: 7px;'>";
checkBox += "</div>";
return checkBox;
}
function renderedFunc(element) { | element.jqxCheckBox();
element.on('change', function (event) {
var checked = element.jqxCheckBox('checked');
if (checked) {
var rows = grid.jqxDataTable('getRows');
for (var i = 0; i < rows.length; i++) {
grid.jqxDataTable('selectRow', i);
grid.find(".jqx_datatable_checkbox").attr("checked", "checked")
}
} else {
grid.jqxDataTable('clearSelection');
grid.find(".jqx_datatable_checkbox").removeAttr("checked", "checked")
}
});
return true;
}
};
$(function () {
var page = new $page();
page.init();
}); | var grid = selector.$grid(); | random_line_split |
ReviewIntangibleAssets.js | //资产基础信息维护列表
//所有元素选择器
var selector = {
$grid: function () { return $("#jqxTable") },
$btnSearch: function () { return $("#btnSearch") },
$btnReset: function () { return $("#btnReset") },
$EditPermission: function () { return $("#EditPermission") }
}; //selector end
var isEdit = false;
var vguid = "";
var $page = function () {
this.init = function () {
addEvent();
var arr = [];
var d = new Date;
d.setMonth(d.getMonth() + 1);
for (var i = 0; i < 3; i++) {
debugger;
var m = d.getMonth() - i;
var y = d.getFullYear();
if (m <= 0) {
m = m + 12;
y = y - 1;
}
m = (m < 10 ? "0" + m : m);
arr.push(y.toString() + "-" + m.toString());
}
debugger;
if (arr.length == 0) {
arr.push("Default");
}
var dataAdapter = new $.jqx.dataAdapter(arr);
$("#SubmitYearMonth").jqxComboBox({ selectedIndex: 0, source: dataAdapter, width: 198, height: 33 });
$("#SubmitYearMonth").jqxComboBox({ itemHeight: 33 });
$("#SubmitYearMonth input").click(function () {
$("#SubmitYearMonth").jqxComboBox('clearSelection');
})
$("#dropdownlistWrapperSubmitYearMonth Input")[0].style.paddingLeft = "10px";
}
//所有事件
function addEvent() {
//加载列表数据
initTable();
selector.$btnSearch().unbind("click").on("click", function () {
initTable();
});
//重置按钮事件
selector.$btnReset().on("click", function () {
$("#OSNO").val("");
});
$("#CreditDialog_OKBtn").on("click",
function () {
$("#CreditDialog").modal("hide");
}
);
//填写信息后提交,调用清算平台、待付款请求生成支付凭证接口
//先调用接口,成功后再提交
$("#btnSubmit").on("click", function () {
var selection = [];
var grid = $("#jqxTable");
var checedBoxs = grid.find(".jqx_datatable_checkbox:checked");
checedBoxs.each(function () {
var th = $(this);
if (th.is(":checked")) {
var index = th.attr("index");
var data = grid.jqxDataTable('getRows')[index];
selection.push(data.VGUID);
}
});
if (selection.length < 1) {
jqxNotification("请选择数据!", null, "error");
} else {
$("#SubmitAssetReviewDialog").modal("show");
}
});
$("#SubmitAssetReviewDialog_OKBtn").on("click", function () {
var selection = [];
var grid = $("#jqxTable");
var checedBoxs = grid.find(".jqx_datatable_checkbox:checked");
checedBoxs.each(function () {
var th = $(this);
if (th.is(":checked")) {
var index = th.attr("index");
var data = grid.jqxDataTable('getRows')[index];
selection.push(data.VGUID);
}
});
if (selection.length < 1) {
jqxNotification("请选择数据!", null, "error");
} else {
SubmitTaxFeeOrder(selection);
}
});
$("#SubmitAssetReviewDialog_CancelBtn").on("click",
function () {
$("#SubmitAssetReviewDialog").modal("hide");
}
);
}; //addEvent end
function SubmitTaxFeeOrder(selection) {
$.ajax({
url: "/AssetManagement/ReviewIntangibleAssets/SubmitIntangibleAssets",
data: { vguids: selection },
type: "post",
| ields:
[
{ name: "checkbox", type: null },
{ name: 'VGUID', type: 'string' },
{ name: 'OrderNumber', type: 'string' },
{ name: 'PayItem', type: 'string' },
{ name: 'VehicleModel', type: 'string' },
{ name: 'PaymentInformation', type: 'string' },
{ name: 'OrderQuantity', type: 'number' },
{ name: 'UnitPrice', type: 'float' },
{ name: 'SumPayment', type: 'float' },
{ name: 'PurchaseDescription', type: 'string' },
{ name: 'PaymentDate', type: 'date' },
{ name: 'BankStatus', type: 'string' },
{ name: 'ContractName', type: 'string' },
{ name: 'ContractFilePath', type: 'string' },
{ name: 'PayType', type: 'string' },
{ name: 'PayCompany', type: 'string' },
{ name: 'OSNO', type: 'string' },
{ name: 'SubmitStatus', type: 'number' },
{ name: 'PaymentVoucherVguid', type: 'string' },
{ name: 'CreateDate', type: 'date' },
{ name: 'ChangeDate', type: 'date' },
{ name: 'CreateUser', type: 'string' },
{ name: 'ChangeUser', type: 'string' }
],
datatype: "json",
id: "VGUID",
data: { "OSNO": $("#OSNO").val()},
url: "/AssetManagement/ReviewIntangibleAssets/GetIntangibleAssetsOrderListDatas" //获取数据源的路径
};
var typeAdapter = new $.jqx.dataAdapter(source, {
downloadComplete: function (data) {
source.totalrecords = data.TotalRows;
}
});
//创建卡信息列表(主表)
selector.$grid().jqxDataTable(
{
pageable: true,
width: "100%",
height: 400,
pageSize: 10,
serverProcessing: true,
pagerButtonsCount: 10,
source: typeAdapter,
theme: "office",
columnsHeight: 40,
columns: [
{ text: "", datafield: "checkbox", width: 35, pinned: true, align: 'center', cellsAlign: 'center', cellsRenderer: cellsRendererFunc, renderer: rendererFunc, rendered: renderedFunc, autoRowHeight: false },
{ text: '采购编号', datafield: 'OrderNumber', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '订单编号', datafield: 'OSNO', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款项目', datafield: 'PayItem', width: 300, align: 'center', cellsAlign: 'center' },
{ text: '供应商名称', datafield: 'PaymentInformation', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '数量', datafield: 'OrderQuantity', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '单价', datafield: 'UnitPrice', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '合同金额', datafield: 'SumPayment', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '采购说明', datafield: 'PurchaseDescription', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款方式', datafield: 'PayType', width: 150, align: 'center', cellsAlign: 'center' },
{ text: '付款公司', datafield: 'PayCompany', width: 200, align: 'center', cellsAlign: 'center' },
{ text: '创建时间', datafield: 'CreateDate', width: 100, align: 'center', cellsAlign: 'center', datatype: 'date', cellsformat: "yyyy-MM-dd HH:mm:ss" },
{ text: '创建人', datafield: 'CreateUser', width: 100, align: 'center', cellsAlign: 'center' },
{ text: '修改时间', datafield: 'ChangeDate', width: 100, align: 'center', cellsAlign: 'center', datatype: 'date', cellsformat: "yyyy-MM-dd HH:mm:ss" },
{ text: '修改人', datafield: 'ChangeUser', width: 100, align: 'center', cellsAlign: 'center' },
{ text: '付款项目', datafield: 'PaymentVoucherVguid', width: 300, align: 'center', hidden: true, cellsAlign: 'center' },
{ text: 'VGUID', datafield: 'VGUID', hidden: true }
]
});
}
function cellsRendererFunc(row, column, value, rowData) {
return "<input class=\"jqx_datatable_checkbox\" index=\"" + row + "\" type=\"checkbox\" style=\"margin:auto;width: 17px;height: 17px;\" />";
}
function rendererFunc() {
var checkBox = "<div id='jqx_datatable_checkbox_all' class='jqx_datatable_checkbox_all' style='z-index: 999; margin-left:7px ;margin-top: 7px;'>";
checkBox += "</div>";
return checkBox;
}
function renderedFunc(element) {
var grid = selector.$grid();
element.jqxCheckBox();
element.on('change', function (event) {
var checked = element.jqxCheckBox('checked');
if (checked) {
var rows = grid.jqxDataTable('getRows');
for (var i = 0; i < rows.length; i++) {
grid.jqxDataTable('selectRow', i);
grid.find(".jqx_datatable_checkbox").attr("checked", "checked")
}
} else {
grid.jqxDataTable('clearSelection');
grid.find(".jqx_datatable_checkbox").removeAttr("checked", "checked")
}
});
return true;
}
};
$(function () {
var page = new $page();
page.init();
});
| success: function (msg) {
switch (msg.Status) {
case "0":
jqxNotification("提交失败!", null, "error");
break;
case "1":
jqxNotification("提交成功!", null, "success");
document.getElementById('ifrPrint').src = msg.ResultInfo;
$("#CreditDialog").modal("show");
$("#jqxTable").jqxDataTable('updateBoundData');
break;
case "2":
jqxNotification(msg.ResultInfo, null, "error");
break;
}
}
});
}
function initTable() {
//var DateEnd = $("#TransactionDateEnd").val(); "AccountingPeriod": $("#AccountingPeriod").val("")
var source =
{
dataf | identifier_body |
__main__.py | # Copyright 2014 devbliss GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pkg_resources
import subprocess
import os
import os.path
from docopt import docopt
import re
__version__ = pkg_resources.get_distribution("git_devbliss").version
github_devbliss = pkg_resources.load_entry_point(
"git_devbliss", "console_scripts", "github-devbliss")
def main():
'''
Usage:
git-devbliss ( feature | bug | refactor | research ) DESCRIPTION
git-devbliss hotfix VERSION DESCRIPTION
git-devbliss finish [BASE_BRANCH]
git-devbliss release VERSION
git-devbliss status
git-devbliss delete [-f]
git-devbliss issue [TITLE]
git-devbliss review PULL_REQUEST_ID
git-devbliss merge-button PULL_REQUEST_ID
git-devbliss close-button PULL_REQUEST_ID
git-devbliss cleanup
Options:
feature, bug, refactor, research
Branch from master (normal branches)
hotfix Branch from a tag (fix a bug in an already released version)
finish Open a pull request for the current branch
release Create a new tag, commit and push
status List branches, pull requests, and issues
issue Quickly post an issue to GitHub
delete Delete the current branch on github.com
review Review a pull request with the given id
merge-button Merge a pull request with the given id
close-button Close a pull request with the given id without merging
cleanup Cleans up the repository
-v --version Print version number of git-devbliss
'''
try:
# check whether the pwd is a git repository
git('rev-parse --abbrev-ref HEAD', pipe=True)
# check whether origin points to github.com
git('remote -v | grep "^origin.*github.*:.*(fetch)$"', pipe=True)
except subprocess.CalledProcessError:
print('Fatal: origin does not point to a github.com repository',
file=sys.stderr)
sys.exit(1)
args = docopt(main.__doc__, version=__version__)
if(args['feature']):
branch('feature', args['DESCRIPTION'])
elif(args['bug']):
branch('bug', args['DESCRIPTION'])
elif(args['refactor']):
branch('refactor', args['DESCRIPTION'])
elif(args['research']):
branch('research', args['DESCRIPTION'])
elif(args['hotfix']):
hotfix(args['VERSION'], args['DESCRIPTION'])
elif(args['finish']):
finish(args['BASE_BRANCH'])
elif(args['release']):
release(args['VERSION'])
elif(args['status']):
github_devbliss(['status'])
elif(args['delete']):
delete(args['-f'])
elif(args['issue']):
github_devbliss(['issue', args['TITLE']])
elif(args['review']):
github_devbliss(['review', args['PULL_REQUEST_ID']])
elif(args['merge-button']):
github_devbliss(['merge-button', args['PULL_REQUEST_ID']])
elif(args['close-button']):
github_devbliss(['close-button', args['PULL_REQUEST_ID']])
elif(args['cleanup']):
cleanup()
def hotfix(tag, description):
if [_tag for _tag in git('tag', pipe=True).split('\n') if tag == _tag]:
git('fetch origin')
git('checkout --quiet {}'.format(tag))
git('checkout --quiet -b hotfix/{}'.format(description))
git('push --set-upstream origin hotfix/{}'.format(description))
else:
print('Tag not found: {}'.format(tag), file=sys.stderr)
print('Available tags:')
git('tag')
sys.exit(2)
def git(command, pipe=False):
if pipe:
return subprocess.check_output('git {}'.format(command),
shell=True).decode()
else:
return os.system('git {}'.format(command))
def is_repository_clean():
status = git('status --short --untracked-files=no | wc -l', pipe=True)
return status.strip() == "0"
def is_synced_origin(remote_branch):
return git('rev-parse HEAD', pipe=True) == git(
'rev-parse origin/{}'.format(remote_branch), pipe=True)
def | ():
# check if pwd is repository root in order to run makefile hooks properly
rev_parse = git('rev-parse --show-toplevel', pipe=True).strip()
if os.path.abspath(rev_parse) != os.path.abspath(os.getcwd()):
print('You need to run this command from the toplevel'
' of the working tree.', file=sys.stderr)
sys.exit(2)
def call_hook(hook, env_vars=''):
check_repo_toplevel()
if os.path.isfile('Makefile'):
os.system(
'{env_vars} make {hook} || echo "Warning: Makefile has no target'
' named {hook}"'.format(**locals()))
if not is_repository_clean():
git('commit --quiet -am "Ran git devbliss {hook} hook"'.format(
**locals()))
else:
print('Warning: No Makefile found. All make hooks have been skipped.',
file=sys.stderr)
def branch(branch_type, branch_name):
if branch_name == 'finish':
print('You are creating a branch "{branch_type}/{branch_name}". '
'Did you mean to type "git devbliss finish"?'.format(**locals()))
print('You can delete this branch with "git devbliss delete'
' {branch_type}/{branch_name}"'.format(**locals()))
git('checkout --quiet master')
git('pull --quiet origin master')
try:
git('checkout --quiet -b {branch_type}/{branch_name}'.format(
**locals()))
except subprocess.CalledProcessError:
git('checkout --quiet {branch_type}/{branch_name}'.format(
**locals()))
git('push --set-upstream origin {branch_type}/{branch_name}'.format(
**locals()))
def release(version):
if not re.match(r'^\d+\.\d+\.\d+$', version):
print('Invalid version number', file=sys.stderr)
sys.exit(2)
git('fetch --quiet origin')
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if not is_repository_clean():
print('Error: Repository is not clean. Aborting.', file=sys.stderr)
sys.exit(1)
if not is_synced_origin('master') and 'hotfix/' not in branch:
print('Error: Local branch is not in sync with origin. Aborting.',
file=sys.stderr)
print('Do "git pull && git push" and try agin.', file=sys.stderr)
sys.exit(1)
call_hook('release', 'DEVBLISS_VERSION="{}"'.format(version))
git('diff')
print("Have these changes been reviewed?")
print("[enter / ctrl+c to cancel]")
try:
input()
except KeyboardInterrupt:
sys.exit(2)
git('commit --quiet --allow-empty -m "Release: {version}"'.format(
**locals()))
git('push origin {branch}'.format(**locals()))
git('tag {version}'.format(**locals()))
git('push --tags origin')
if branch == 'master':
print()
github_devbliss(['pull-request'])
def delete(force=False):
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if branch == 'master':
print("Won't delete master branch. Aborting.", file=sys.stderr)
sys.exit(2)
if force or input(
'Really delete the remote branch? [y/N] ').capitalize() == 'Y':
git('push --delete origin {}'.format(branch))
print('To restore the remote branch, type')
print(' git push --set-upstream origin {}'.format(branch))
print('To delete your local branch, type')
print(' git checkout master && git branch -d {}'.format(branch))
def cleanup():
git('fetch')
print("Deleting remote tracking branches whose "
"tracked branches on server are gone...")
git('remote prune origin')
print("Searching all remote branches except release "
"that are already merged into master...")
get_remote_merged_branches = None
try:
get_remote_merged_branches = git('branch -r --merged origin/master'
' | grep -v master | grep -v release',
pipe=True)
except subprocess.CalledProcessError:
print('No remote merged branches found')
if get_remote_merged_branches:
print(get_remote_merged_branches)
if input("Do you want to delete those branches on the server? [y/N]"
).capitalize() == 'Y':
print("Deleting...")
os.system("echo '{}' | sed 's#origin/##' | xargs -I {{}}"
" git push origin :{{}}".format(
get_remote_merged_branches))
git('remote prune origin')
else:
print("ok, will not delete anything.")
print("Deleting all local branches (except current)"
" that are already merged into local master...")
git("branch --merged master | grep -v master "
"| grep -v '\*' | xargs git branch -d")
print("Checking for unmerged local branches...")
git('branch --no-merged master')
def finish(base_branch):
base_branch_used = bool(base_branch)
base_branch = base_branch or 'master'
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if not is_repository_clean():
print("Error: Repository is not clean. Aborting.", file=sys.stderr)
sys.exit(1)
if branch not in git('branch --contains {}'.format(base_branch),
pipe=True):
if 'hotfix/' in branch and not base_branch_used:
print("Warning: Master is not merged into the current branch.")
else:
print("Error: Won't finish. {} is not merged into the"
" current branch.".format(base_branch), file=sys.stderr)
print("Please do 'git merge {}', make sure all conflicts"
" are merged and try again.".format(base_branch),
file=sys.stderr)
sys.exit(1)
env_vars = 'DEVBLISS_BRANCH_TYPE=' + branch.split('/')[0]
call_hook('finish', env_vars)
call_hook('changelog', env_vars)
call_hook('version', env_vars)
git('push origin {}'.format(branch))
print()
args = ['pull-request']
if base_branch:
args = args + [base_branch]
github_devbliss(args)
print()
github_devbliss(['open-pulls'])
if __name__ == '__main__':
sys.exit(main()) # pragma nocover
| check_repo_toplevel | identifier_name |
__main__.py | # Copyright 2014 devbliss GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pkg_resources
import subprocess
import os
import os.path
from docopt import docopt
import re
__version__ = pkg_resources.get_distribution("git_devbliss").version
github_devbliss = pkg_resources.load_entry_point(
"git_devbliss", "console_scripts", "github-devbliss")
def main():
'''
Usage:
git-devbliss ( feature | bug | refactor | research ) DESCRIPTION
git-devbliss hotfix VERSION DESCRIPTION
git-devbliss finish [BASE_BRANCH]
git-devbliss release VERSION
git-devbliss status
git-devbliss delete [-f]
git-devbliss issue [TITLE]
git-devbliss review PULL_REQUEST_ID
git-devbliss merge-button PULL_REQUEST_ID
git-devbliss close-button PULL_REQUEST_ID
git-devbliss cleanup
Options:
feature, bug, refactor, research
Branch from master (normal branches)
hotfix Branch from a tag (fix a bug in an already released version)
finish Open a pull request for the current branch
release Create a new tag, commit and push
status List branches, pull requests, and issues
issue Quickly post an issue to GitHub
delete Delete the current branch on github.com
review Review a pull request with the given id
merge-button Merge a pull request with the given id
close-button Close a pull request with the given id without merging
cleanup Cleans up the repository
-v --version Print version number of git-devbliss
'''
try:
# check whether the pwd is a git repository
git('rev-parse --abbrev-ref HEAD', pipe=True)
# check whether origin points to github.com
git('remote -v | grep "^origin.*github.*:.*(fetch)$"', pipe=True)
except subprocess.CalledProcessError:
print('Fatal: origin does not point to a github.com repository',
file=sys.stderr)
sys.exit(1)
args = docopt(main.__doc__, version=__version__)
if(args['feature']):
branch('feature', args['DESCRIPTION'])
elif(args['bug']):
branch('bug', args['DESCRIPTION'])
elif(args['refactor']):
branch('refactor', args['DESCRIPTION'])
elif(args['research']):
branch('research', args['DESCRIPTION'])
elif(args['hotfix']):
hotfix(args['VERSION'], args['DESCRIPTION'])
elif(args['finish']):
finish(args['BASE_BRANCH'])
elif(args['release']):
release(args['VERSION'])
elif(args['status']):
github_devbliss(['status'])
elif(args['delete']):
delete(args['-f'])
elif(args['issue']):
github_devbliss(['issue', args['TITLE']])
elif(args['review']):
github_devbliss(['review', args['PULL_REQUEST_ID']])
elif(args['merge-button']):
github_devbliss(['merge-button', args['PULL_REQUEST_ID']])
elif(args['close-button']):
github_devbliss(['close-button', args['PULL_REQUEST_ID']])
elif(args['cleanup']):
cleanup()
def hotfix(tag, description):
if [_tag for _tag in git('tag', pipe=True).split('\n') if tag == _tag]:
git('fetch origin')
git('checkout --quiet {}'.format(tag))
git('checkout --quiet -b hotfix/{}'.format(description))
git('push --set-upstream origin hotfix/{}'.format(description))
else:
print('Tag not found: {}'.format(tag), file=sys.stderr)
print('Available tags:')
git('tag')
sys.exit(2)
def git(command, pipe=False):
if pipe:
return subprocess.check_output('git {}'.format(command),
shell=True).decode()
else:
return os.system('git {}'.format(command))
def is_repository_clean():
status = git('status --short --untracked-files=no | wc -l', pipe=True)
return status.strip() == "0"
def is_synced_origin(remote_branch):
return git('rev-parse HEAD', pipe=True) == git(
'rev-parse origin/{}'.format(remote_branch), pipe=True)
def check_repo_toplevel():
# check if pwd is repository root in order to run makefile hooks properly
rev_parse = git('rev-parse --show-toplevel', pipe=True).strip()
if os.path.abspath(rev_parse) != os.path.abspath(os.getcwd()):
print('You need to run this command from the toplevel'
' of the working tree.', file=sys.stderr)
sys.exit(2)
def call_hook(hook, env_vars=''):
check_repo_toplevel()
if os.path.isfile('Makefile'):
os.system(
'{env_vars} make {hook} || echo "Warning: Makefile has no target'
' named {hook}"'.format(**locals()))
if not is_repository_clean():
git('commit --quiet -am "Ran git devbliss {hook} hook"'.format(
**locals()))
else:
print('Warning: No Makefile found. All make hooks have been skipped.',
file=sys.stderr)
def branch(branch_type, branch_name):
|
def release(version):
if not re.match(r'^\d+\.\d+\.\d+$', version):
print('Invalid version number', file=sys.stderr)
sys.exit(2)
git('fetch --quiet origin')
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if not is_repository_clean():
print('Error: Repository is not clean. Aborting.', file=sys.stderr)
sys.exit(1)
if not is_synced_origin('master') and 'hotfix/' not in branch:
print('Error: Local branch is not in sync with origin. Aborting.',
file=sys.stderr)
print('Do "git pull && git push" and try agin.', file=sys.stderr)
sys.exit(1)
call_hook('release', 'DEVBLISS_VERSION="{}"'.format(version))
git('diff')
print("Have these changes been reviewed?")
print("[enter / ctrl+c to cancel]")
try:
input()
except KeyboardInterrupt:
sys.exit(2)
git('commit --quiet --allow-empty -m "Release: {version}"'.format(
**locals()))
git('push origin {branch}'.format(**locals()))
git('tag {version}'.format(**locals()))
git('push --tags origin')
if branch == 'master':
print()
github_devbliss(['pull-request'])
def delete(force=False):
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if branch == 'master':
print("Won't delete master branch. Aborting.", file=sys.stderr)
sys.exit(2)
if force or input(
'Really delete the remote branch? [y/N] ').capitalize() == 'Y':
git('push --delete origin {}'.format(branch))
print('To restore the remote branch, type')
print(' git push --set-upstream origin {}'.format(branch))
print('To delete your local branch, type')
print(' git checkout master && git branch -d {}'.format(branch))
def cleanup():
git('fetch')
print("Deleting remote tracking branches whose "
"tracked branches on server are gone...")
git('remote prune origin')
print("Searching all remote branches except release "
"that are already merged into master...")
get_remote_merged_branches = None
try:
get_remote_merged_branches = git('branch -r --merged origin/master'
' | grep -v master | grep -v release',
pipe=True)
except subprocess.CalledProcessError:
print('No remote merged branches found')
if get_remote_merged_branches:
print(get_remote_merged_branches)
if input("Do you want to delete those branches on the server? [y/N]"
).capitalize() == 'Y':
print("Deleting...")
os.system("echo '{}' | sed 's#origin/##' | xargs -I {{}}"
" git push origin :{{}}".format(
get_remote_merged_branches))
git('remote prune origin')
else:
print("ok, will not delete anything.")
print("Deleting all local branches (except current)"
" that are already merged into local master...")
git("branch --merged master | grep -v master "
"| grep -v '\*' | xargs git branch -d")
print("Checking for unmerged local branches...")
git('branch --no-merged master')
def finish(base_branch):
base_branch_used = bool(base_branch)
base_branch = base_branch or 'master'
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if not is_repository_clean():
print("Error: Repository is not clean. Aborting.", file=sys.stderr)
sys.exit(1)
if branch not in git('branch --contains {}'.format(base_branch),
pipe=True):
if 'hotfix/' in branch and not base_branch_used:
print("Warning: Master is not merged into the current branch.")
else:
print("Error: Won't finish. {} is not merged into the"
" current branch.".format(base_branch), file=sys.stderr)
print("Please do 'git merge {}', make sure all conflicts"
" are merged and try again.".format(base_branch),
file=sys.stderr)
sys.exit(1)
env_vars = 'DEVBLISS_BRANCH_TYPE=' + branch.split('/')[0]
call_hook('finish', env_vars)
call_hook('changelog', env_vars)
call_hook('version', env_vars)
git('push origin {}'.format(branch))
print()
args = ['pull-request']
if base_branch:
args = args + [base_branch]
github_devbliss(args)
print()
github_devbliss(['open-pulls'])
if __name__ == '__main__':
sys.exit(main()) # pragma nocover
| if branch_name == 'finish':
print('You are creating a branch "{branch_type}/{branch_name}". '
'Did you mean to type "git devbliss finish"?'.format(**locals()))
print('You can delete this branch with "git devbliss delete'
' {branch_type}/{branch_name}"'.format(**locals()))
git('checkout --quiet master')
git('pull --quiet origin master')
try:
git('checkout --quiet -b {branch_type}/{branch_name}'.format(
**locals()))
except subprocess.CalledProcessError:
git('checkout --quiet {branch_type}/{branch_name}'.format(
**locals()))
git('push --set-upstream origin {branch_type}/{branch_name}'.format(
**locals())) | identifier_body |
__main__.py | # Copyright 2014 devbliss GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pkg_resources
import subprocess
import os
import os.path
from docopt import docopt
import re
__version__ = pkg_resources.get_distribution("git_devbliss").version
github_devbliss = pkg_resources.load_entry_point(
"git_devbliss", "console_scripts", "github-devbliss")
def main():
'''
Usage:
git-devbliss ( feature | bug | refactor | research ) DESCRIPTION
git-devbliss hotfix VERSION DESCRIPTION
git-devbliss finish [BASE_BRANCH]
git-devbliss release VERSION
git-devbliss status
git-devbliss delete [-f]
git-devbliss issue [TITLE]
git-devbliss review PULL_REQUEST_ID
git-devbliss merge-button PULL_REQUEST_ID
git-devbliss close-button PULL_REQUEST_ID
git-devbliss cleanup
Options:
feature, bug, refactor, research
Branch from master (normal branches)
hotfix Branch from a tag (fix a bug in an already released version)
finish Open a pull request for the current branch
release Create a new tag, commit and push
status List branches, pull requests, and issues
issue Quickly post an issue to GitHub
delete Delete the current branch on github.com
review Review a pull request with the given id
merge-button Merge a pull request with the given id
close-button Close a pull request with the given id without merging
cleanup Cleans up the repository
-v --version Print version number of git-devbliss
'''
try:
# check whether the pwd is a git repository
git('rev-parse --abbrev-ref HEAD', pipe=True)
# check whether origin points to github.com
git('remote -v | grep "^origin.*github.*:.*(fetch)$"', pipe=True)
except subprocess.CalledProcessError:
print('Fatal: origin does not point to a github.com repository',
file=sys.stderr)
sys.exit(1)
args = docopt(main.__doc__, version=__version__)
if(args['feature']):
branch('feature', args['DESCRIPTION'])
elif(args['bug']):
branch('bug', args['DESCRIPTION'])
elif(args['refactor']):
branch('refactor', args['DESCRIPTION'])
elif(args['research']):
branch('research', args['DESCRIPTION'])
elif(args['hotfix']):
hotfix(args['VERSION'], args['DESCRIPTION'])
elif(args['finish']):
finish(args['BASE_BRANCH'])
elif(args['release']):
release(args['VERSION'])
elif(args['status']):
github_devbliss(['status'])
elif(args['delete']):
delete(args['-f'])
elif(args['issue']):
github_devbliss(['issue', args['TITLE']])
elif(args['review']):
github_devbliss(['review', args['PULL_REQUEST_ID']])
elif(args['merge-button']):
github_devbliss(['merge-button', args['PULL_REQUEST_ID']])
elif(args['close-button']):
github_devbliss(['close-button', args['PULL_REQUEST_ID']])
elif(args['cleanup']):
cleanup()
def hotfix(tag, description):
if [_tag for _tag in git('tag', pipe=True).split('\n') if tag == _tag]:
git('fetch origin')
git('checkout --quiet {}'.format(tag))
git('checkout --quiet -b hotfix/{}'.format(description))
git('push --set-upstream origin hotfix/{}'.format(description))
else:
print('Tag not found: {}'.format(tag), file=sys.stderr)
print('Available tags:')
git('tag')
sys.exit(2)
def git(command, pipe=False):
if pipe:
return subprocess.check_output('git {}'.format(command),
shell=True).decode()
else:
return os.system('git {}'.format(command))
def is_repository_clean():
status = git('status --short --untracked-files=no | wc -l', pipe=True)
return status.strip() == "0"
def is_synced_origin(remote_branch):
return git('rev-parse HEAD', pipe=True) == git(
'rev-parse origin/{}'.format(remote_branch), pipe=True)
def check_repo_toplevel():
# check if pwd is repository root in order to run makefile hooks properly
rev_parse = git('rev-parse --show-toplevel', pipe=True).strip()
if os.path.abspath(rev_parse) != os.path.abspath(os.getcwd()):
print('You need to run this command from the toplevel'
' of the working tree.', file=sys.stderr)
sys.exit(2)
def call_hook(hook, env_vars=''):
check_repo_toplevel()
if os.path.isfile('Makefile'):
os.system(
'{env_vars} make {hook} || echo "Warning: Makefile has no target'
' named {hook}"'.format(**locals()))
if not is_repository_clean():
git('commit --quiet -am "Ran git devbliss {hook} hook"'.format(
**locals()))
else:
print('Warning: No Makefile found. All make hooks have been skipped.',
file=sys.stderr)
def branch(branch_type, branch_name):
if branch_name == 'finish':
print('You are creating a branch "{branch_type}/{branch_name}". '
'Did you mean to type "git devbliss finish"?'.format(**locals())) | try:
git('checkout --quiet -b {branch_type}/{branch_name}'.format(
**locals()))
except subprocess.CalledProcessError:
git('checkout --quiet {branch_type}/{branch_name}'.format(
**locals()))
git('push --set-upstream origin {branch_type}/{branch_name}'.format(
**locals()))
def release(version):
if not re.match(r'^\d+\.\d+\.\d+$', version):
print('Invalid version number', file=sys.stderr)
sys.exit(2)
git('fetch --quiet origin')
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if not is_repository_clean():
print('Error: Repository is not clean. Aborting.', file=sys.stderr)
sys.exit(1)
if not is_synced_origin('master') and 'hotfix/' not in branch:
print('Error: Local branch is not in sync with origin. Aborting.',
file=sys.stderr)
print('Do "git pull && git push" and try agin.', file=sys.stderr)
sys.exit(1)
call_hook('release', 'DEVBLISS_VERSION="{}"'.format(version))
git('diff')
print("Have these changes been reviewed?")
print("[enter / ctrl+c to cancel]")
try:
input()
except KeyboardInterrupt:
sys.exit(2)
git('commit --quiet --allow-empty -m "Release: {version}"'.format(
**locals()))
git('push origin {branch}'.format(**locals()))
git('tag {version}'.format(**locals()))
git('push --tags origin')
if branch == 'master':
print()
github_devbliss(['pull-request'])
def delete(force=False):
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if branch == 'master':
print("Won't delete master branch. Aborting.", file=sys.stderr)
sys.exit(2)
if force or input(
'Really delete the remote branch? [y/N] ').capitalize() == 'Y':
git('push --delete origin {}'.format(branch))
print('To restore the remote branch, type')
print(' git push --set-upstream origin {}'.format(branch))
print('To delete your local branch, type')
print(' git checkout master && git branch -d {}'.format(branch))
def cleanup():
git('fetch')
print("Deleting remote tracking branches whose "
"tracked branches on server are gone...")
git('remote prune origin')
print("Searching all remote branches except release "
"that are already merged into master...")
get_remote_merged_branches = None
try:
get_remote_merged_branches = git('branch -r --merged origin/master'
' | grep -v master | grep -v release',
pipe=True)
except subprocess.CalledProcessError:
print('No remote merged branches found')
if get_remote_merged_branches:
print(get_remote_merged_branches)
if input("Do you want to delete those branches on the server? [y/N]"
).capitalize() == 'Y':
print("Deleting...")
os.system("echo '{}' | sed 's#origin/##' | xargs -I {{}}"
" git push origin :{{}}".format(
get_remote_merged_branches))
git('remote prune origin')
else:
print("ok, will not delete anything.")
print("Deleting all local branches (except current)"
" that are already merged into local master...")
git("branch --merged master | grep -v master "
"| grep -v '\*' | xargs git branch -d")
print("Checking for unmerged local branches...")
git('branch --no-merged master')
def finish(base_branch):
base_branch_used = bool(base_branch)
base_branch = base_branch or 'master'
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if not is_repository_clean():
print("Error: Repository is not clean. Aborting.", file=sys.stderr)
sys.exit(1)
if branch not in git('branch --contains {}'.format(base_branch),
pipe=True):
if 'hotfix/' in branch and not base_branch_used:
print("Warning: Master is not merged into the current branch.")
else:
print("Error: Won't finish. {} is not merged into the"
" current branch.".format(base_branch), file=sys.stderr)
print("Please do 'git merge {}', make sure all conflicts"
" are merged and try again.".format(base_branch),
file=sys.stderr)
sys.exit(1)
env_vars = 'DEVBLISS_BRANCH_TYPE=' + branch.split('/')[0]
call_hook('finish', env_vars)
call_hook('changelog', env_vars)
call_hook('version', env_vars)
git('push origin {}'.format(branch))
print()
args = ['pull-request']
if base_branch:
args = args + [base_branch]
github_devbliss(args)
print()
github_devbliss(['open-pulls'])
if __name__ == '__main__':
sys.exit(main()) # pragma nocover | print('You can delete this branch with "git devbliss delete'
' {branch_type}/{branch_name}"'.format(**locals()))
git('checkout --quiet master')
git('pull --quiet origin master') | random_line_split |
__main__.py | # Copyright 2014 devbliss GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pkg_resources
import subprocess
import os
import os.path
from docopt import docopt
import re
__version__ = pkg_resources.get_distribution("git_devbliss").version
github_devbliss = pkg_resources.load_entry_point(
"git_devbliss", "console_scripts", "github-devbliss")
def main():
'''
Usage:
git-devbliss ( feature | bug | refactor | research ) DESCRIPTION
git-devbliss hotfix VERSION DESCRIPTION
git-devbliss finish [BASE_BRANCH]
git-devbliss release VERSION
git-devbliss status
git-devbliss delete [-f]
git-devbliss issue [TITLE]
git-devbliss review PULL_REQUEST_ID
git-devbliss merge-button PULL_REQUEST_ID
git-devbliss close-button PULL_REQUEST_ID
git-devbliss cleanup
Options:
feature, bug, refactor, research
Branch from master (normal branches)
hotfix Branch from a tag (fix a bug in an already released version)
finish Open a pull request for the current branch
release Create a new tag, commit and push
status List branches, pull requests, and issues
issue Quickly post an issue to GitHub
delete Delete the current branch on github.com
review Review a pull request with the given id
merge-button Merge a pull request with the given id
close-button Close a pull request with the given id without merging
cleanup Cleans up the repository
-v --version Print version number of git-devbliss
'''
try:
# check whether the pwd is a git repository
git('rev-parse --abbrev-ref HEAD', pipe=True)
# check whether origin points to github.com
git('remote -v | grep "^origin.*github.*:.*(fetch)$"', pipe=True)
except subprocess.CalledProcessError:
print('Fatal: origin does not point to a github.com repository',
file=sys.stderr)
sys.exit(1)
args = docopt(main.__doc__, version=__version__)
if(args['feature']):
branch('feature', args['DESCRIPTION'])
elif(args['bug']):
branch('bug', args['DESCRIPTION'])
elif(args['refactor']):
branch('refactor', args['DESCRIPTION'])
elif(args['research']):
branch('research', args['DESCRIPTION'])
elif(args['hotfix']):
hotfix(args['VERSION'], args['DESCRIPTION'])
elif(args['finish']):
finish(args['BASE_BRANCH'])
elif(args['release']):
release(args['VERSION'])
elif(args['status']):
github_devbliss(['status'])
elif(args['delete']):
delete(args['-f'])
elif(args['issue']):
github_devbliss(['issue', args['TITLE']])
elif(args['review']):
github_devbliss(['review', args['PULL_REQUEST_ID']])
elif(args['merge-button']):
github_devbliss(['merge-button', args['PULL_REQUEST_ID']])
elif(args['close-button']):
github_devbliss(['close-button', args['PULL_REQUEST_ID']])
elif(args['cleanup']):
cleanup()
def hotfix(tag, description):
if [_tag for _tag in git('tag', pipe=True).split('\n') if tag == _tag]:
git('fetch origin')
git('checkout --quiet {}'.format(tag))
git('checkout --quiet -b hotfix/{}'.format(description))
git('push --set-upstream origin hotfix/{}'.format(description))
else:
print('Tag not found: {}'.format(tag), file=sys.stderr)
print('Available tags:')
git('tag')
sys.exit(2)
def git(command, pipe=False):
if pipe:
return subprocess.check_output('git {}'.format(command),
shell=True).decode()
else:
return os.system('git {}'.format(command))
def is_repository_clean():
status = git('status --short --untracked-files=no | wc -l', pipe=True)
return status.strip() == "0"
def is_synced_origin(remote_branch):
return git('rev-parse HEAD', pipe=True) == git(
'rev-parse origin/{}'.format(remote_branch), pipe=True)
def check_repo_toplevel():
# check if pwd is repository root in order to run makefile hooks properly
rev_parse = git('rev-parse --show-toplevel', pipe=True).strip()
if os.path.abspath(rev_parse) != os.path.abspath(os.getcwd()):
print('You need to run this command from the toplevel'
' of the working tree.', file=sys.stderr)
sys.exit(2)
def call_hook(hook, env_vars=''):
check_repo_toplevel()
if os.path.isfile('Makefile'):
|
else:
print('Warning: No Makefile found. All make hooks have been skipped.',
file=sys.stderr)
def branch(branch_type, branch_name):
if branch_name == 'finish':
print('You are creating a branch "{branch_type}/{branch_name}". '
'Did you mean to type "git devbliss finish"?'.format(**locals()))
print('You can delete this branch with "git devbliss delete'
' {branch_type}/{branch_name}"'.format(**locals()))
git('checkout --quiet master')
git('pull --quiet origin master')
try:
git('checkout --quiet -b {branch_type}/{branch_name}'.format(
**locals()))
except subprocess.CalledProcessError:
git('checkout --quiet {branch_type}/{branch_name}'.format(
**locals()))
git('push --set-upstream origin {branch_type}/{branch_name}'.format(
**locals()))
def release(version):
if not re.match(r'^\d+\.\d+\.\d+$', version):
print('Invalid version number', file=sys.stderr)
sys.exit(2)
git('fetch --quiet origin')
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if not is_repository_clean():
print('Error: Repository is not clean. Aborting.', file=sys.stderr)
sys.exit(1)
if not is_synced_origin('master') and 'hotfix/' not in branch:
print('Error: Local branch is not in sync with origin. Aborting.',
file=sys.stderr)
print('Do "git pull && git push" and try agin.', file=sys.stderr)
sys.exit(1)
call_hook('release', 'DEVBLISS_VERSION="{}"'.format(version))
git('diff')
print("Have these changes been reviewed?")
print("[enter / ctrl+c to cancel]")
try:
input()
except KeyboardInterrupt:
sys.exit(2)
git('commit --quiet --allow-empty -m "Release: {version}"'.format(
**locals()))
git('push origin {branch}'.format(**locals()))
git('tag {version}'.format(**locals()))
git('push --tags origin')
if branch == 'master':
print()
github_devbliss(['pull-request'])
def delete(force=False):
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if branch == 'master':
print("Won't delete master branch. Aborting.", file=sys.stderr)
sys.exit(2)
if force or input(
'Really delete the remote branch? [y/N] ').capitalize() == 'Y':
git('push --delete origin {}'.format(branch))
print('To restore the remote branch, type')
print(' git push --set-upstream origin {}'.format(branch))
print('To delete your local branch, type')
print(' git checkout master && git branch -d {}'.format(branch))
def cleanup():
git('fetch')
print("Deleting remote tracking branches whose "
"tracked branches on server are gone...")
git('remote prune origin')
print("Searching all remote branches except release "
"that are already merged into master...")
get_remote_merged_branches = None
try:
get_remote_merged_branches = git('branch -r --merged origin/master'
' | grep -v master | grep -v release',
pipe=True)
except subprocess.CalledProcessError:
print('No remote merged branches found')
if get_remote_merged_branches:
print(get_remote_merged_branches)
if input("Do you want to delete those branches on the server? [y/N]"
).capitalize() == 'Y':
print("Deleting...")
os.system("echo '{}' | sed 's#origin/##' | xargs -I {{}}"
" git push origin :{{}}".format(
get_remote_merged_branches))
git('remote prune origin')
else:
print("ok, will not delete anything.")
print("Deleting all local branches (except current)"
" that are already merged into local master...")
git("branch --merged master | grep -v master "
"| grep -v '\*' | xargs git branch -d")
print("Checking for unmerged local branches...")
git('branch --no-merged master')
def finish(base_branch):
base_branch_used = bool(base_branch)
base_branch = base_branch or 'master'
branch = git('rev-parse --abbrev-ref HEAD', pipe=True)
if not is_repository_clean():
print("Error: Repository is not clean. Aborting.", file=sys.stderr)
sys.exit(1)
if branch not in git('branch --contains {}'.format(base_branch),
pipe=True):
if 'hotfix/' in branch and not base_branch_used:
print("Warning: Master is not merged into the current branch.")
else:
print("Error: Won't finish. {} is not merged into the"
" current branch.".format(base_branch), file=sys.stderr)
print("Please do 'git merge {}', make sure all conflicts"
" are merged and try again.".format(base_branch),
file=sys.stderr)
sys.exit(1)
env_vars = 'DEVBLISS_BRANCH_TYPE=' + branch.split('/')[0]
call_hook('finish', env_vars)
call_hook('changelog', env_vars)
call_hook('version', env_vars)
git('push origin {}'.format(branch))
print()
args = ['pull-request']
if base_branch:
args = args + [base_branch]
github_devbliss(args)
print()
github_devbliss(['open-pulls'])
if __name__ == '__main__':
sys.exit(main()) # pragma nocover
| os.system(
'{env_vars} make {hook} || echo "Warning: Makefile has no target'
' named {hook}"'.format(**locals()))
if not is_repository_clean():
git('commit --quiet -am "Ran git devbliss {hook} hook"'.format(
**locals())) | conditional_block |
server.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Grin server implementation, glues the different parts of the system (mostly
//! the peer-to-peer server, the blockchain and the transaction pool) and acts
//! as a facade.
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::sync::{mpsc, Arc};
use std::{convert::TryInto, fs};
use std::{
thread::{self, JoinHandle},
time::{self, Duration},
};
use fs2::FileExt;
use walkdir::WalkDir;
use crate::api;
use crate::api::TLSConfig;
use crate::chain::{self, SyncState, SyncStatus};
use crate::common::adapters::{
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
};
use crate::common::hooks::{init_chain_hooks, init_net_hooks};
use crate::common::stats::{
ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats,
};
use crate::common::types::{Error, ServerConfig, StratumServerConfig};
use crate::core::core::hash::{Hashed, ZERO_HASH};
use crate::core::ser::ProtocolVersion;
use crate::core::{consensus, genesis, global, pow};
use crate::grin::{dandelion_monitor, seed, sync};
use crate::mining::stratumserver;
use crate::mining::test_miner::Miner;
use crate::p2p;
use crate::p2p::types::{Capabilities, PeerAddr};
use crate::pool;
use crate::util::file::get_first_line;
use crate::util::{RwLock, StopState};
use futures::channel::oneshot;
use grin_util::logger::LogEntry;
/// Arcified thread-safe TransactionPool with type parameters used by server components
pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter>>>;
/// Grin server holding internal structures.
pub struct Server {
/// server config
pub config: ServerConfig,
/// handle to our network server
pub p2p: Arc<p2p::Server>,
/// data store access
pub chain: Arc<chain::Chain>,
/// in-memory transaction pool
pub tx_pool: ServerTxPool,
/// Whether we're currently syncing
pub sync_state: Arc<SyncState>,
/// To be passed around to collect stats and info
state_info: ServerStateInfo,
/// Stop flag
pub stop_state: Arc<StopState>,
/// Maintain a lock_file so we do not run multiple Grin nodes from same dir.
lock_file: Arc<File>,
connect_thread: Option<JoinHandle<()>>,
sync_thread: JoinHandle<()>,
dandelion_thread: JoinHandle<()>,
}
impl Server {
/// Instantiates and starts a new server. Optionally takes a callback
/// for the server to send an ARC copy of itself, to allow another process
/// to poll info about the server status
pub fn start<F>(
config: ServerConfig,
logs_rx: Option<mpsc::Receiver<LogEntry>>,
mut info_callback: F,
stop_state: Option<Arc<StopState>>,
api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>),
) -> Result<(), Error>
where
F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>),
{
let mining_config = config.stratum_mining_config.clone();
let enable_test_miner = config.run_test_miner;
let test_miner_wallet_url = config.test_miner_wallet_url.clone();
let serv = Server::new(config, stop_state, api_chan)?;
if let Some(c) = mining_config {
let enable_stratum_server = c.enable_stratum_server;
if let Some(s) = enable_stratum_server {
if s {
{
let mut stratum_stats = serv.state_info.stratum_stats.write();
stratum_stats.is_enabled = true;
}
serv.start_stratum_server(c);
}
}
}
if let Some(s) = enable_test_miner {
if s {
serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone());
}
}
info_callback(serv, logs_rx);
Ok(())
}
// Exclusive (advisory) lock_file to ensure we do not run multiple
// instance of grin server from the same dir.
// This uses fs2 and should be safe cross-platform unless somebody abuses the file itself.
fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> {
let path = Path::new(&config.db_root);
fs::create_dir_all(&path)?;
let path = path.join("grin.lock");
let lock_file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
lock_file.try_lock_exclusive().map_err(|e| {
let mut stderr = std::io::stderr();
writeln!(
&mut stderr,
"Failed to lock {:?} (grin server already running?)",
path
)
.expect("Could not write to stderr");
e
})?;
Ok(Arc::new(lock_file))
}
/// Instantiates a new server associated with the provided future reactor.
pub fn new(
config: ServerConfig,
stop_state: Option<Arc<StopState>>,
api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>),
) -> Result<Server, Error> {
// Obtain our lock_file or fail immediately with an error.
let lock_file = Server::one_grin_at_a_time(&config)?;
// Defaults to None (optional) in config file.
// This translates to false here.
let archive_mode = match config.archive_mode {
None => false,
Some(b) => b,
};
let stop_state = if stop_state.is_some() {
stop_state.unwrap()
} else {
Arc::new(StopState::new())
};
let pool_adapter = Arc::new(PoolToChainAdapter::new());
let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone()));
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(
config.pool_config.clone(),
pool_adapter.clone(),
pool_net_adapter.clone(),
)));
let sync_state = Arc::new(SyncState::new());
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(
tx_pool.clone(),
init_chain_hooks(&config),
));
let genesis = match config.chain_type {
global::ChainTypes::AutomatedTesting => pow::mine_genesis_block().unwrap(),
global::ChainTypes::UserTesting => pow::mine_genesis_block().unwrap(),
global::ChainTypes::Testnet => genesis::genesis_test(),
global::ChainTypes::Mainnet => genesis::genesis_main(),
};
info!("Starting server, genesis block: {}", genesis.hash());
let shared_chain = Arc::new(chain::Chain::init(
config.db_root.clone(),
chain_adapter.clone(),
genesis.clone(),
pow::verify_size,
archive_mode,
)?);
pool_adapter.set_chain(shared_chain.clone());
let net_adapter = Arc::new(NetToChainAdapter::new(
sync_state.clone(),
shared_chain.clone(),
tx_pool.clone(),
config.clone(),
init_net_hooks(&config),
));
// Initialize our capabilities.
// Currently either "default" or with optional "archive_mode" (block history) support enabled.
let capabilities = if let Some(true) = config.archive_mode {
Capabilities::default() | Capabilities::BLOCK_HIST
} else {
Capabilities::default()
};
debug!("Capabilities: {:?}", capabilities);
let p2p_server = Arc::new(p2p::Server::new(
&config.db_root,
capabilities,
config.p2p_config.clone(),
net_adapter.clone(),
genesis.hash(),
stop_state.clone(),
)?);
// Initialize various adapters with our dynamic set of connected peers.
chain_adapter.init(p2p_server.peers.clone());
pool_net_adapter.init(p2p_server.peers.clone());
net_adapter.init(p2p_server.peers.clone());
let mut connect_thread = None;
if config.p2p_config.seeding_type != p2p::Seeding::Programmatic {
let seed_list = match config.p2p_config.seeding_type {
p2p::Seeding::None => {
warn!("No seed configured, will stay solo until connected to");
seed::predefined_seeds(vec![])
}
p2p::Seeding::List => match &config.p2p_config.seeds {
Some(seeds) => seed::predefined_seeds(seeds.peers.clone()),
None => {
return Err(Error::Configuration(
"Seeds must be configured for seeding type List".to_owned(),
));
}
},
p2p::Seeding::DNSSeed => seed::default_dns_seeds(),
_ => unreachable!(),
};
connect_thread = Some(seed::connect_and_monitor(
p2p_server.clone(),
seed_list,
config.p2p_config.clone(),
stop_state.clone(),
)?);
}
// Defaults to None (optional) in config file.
// This translates to false here so we do not skip by default.
let skip_sync_wait = config.skip_sync_wait.unwrap_or(false);
sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait));
let sync_thread = sync::run_sync(
sync_state.clone(),
p2p_server.peers.clone(),
shared_chain.clone(),
stop_state.clone(),
)?;
let p2p_inner = p2p_server.clone();
let _ = thread::Builder::new()
.name("p2p-server".to_string())
.spawn(move || {
if let Err(e) = p2p_inner.listen() {
error!("P2P server failed with erorr: {:?}", e);
}
})?;
info!("Starting rest apis at: {}", &config.api_http_addr);
let api_secret = get_first_line(config.api_secret_path.clone());
let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone());
let tls_conf = match config.tls_certificate_file.clone() {
None => None,
Some(file) => {
let key = match config.tls_certificate_key.clone() {
Some(k) => k,
None => {
let msg = "Private key for certificate is not set".to_string();
return Err(Error::ArgumentError(msg));
}
};
Some(TLSConfig::new(file, key))
}
};
api::node_apis(
&config.api_http_addr,
shared_chain.clone(),
tx_pool.clone(),
p2p_server.peers.clone(),
sync_state.clone(),
api_secret,
foreign_api_secret,
tls_conf,
api_chan,
stop_state.clone(),
)?;
info!("Starting dandelion monitor: {}", &config.api_http_addr);
let dandelion_thread = dandelion_monitor::monitor_transactions(
config.dandelion_config.clone(),
tx_pool.clone(),
pool_net_adapter,
stop_state.clone(),
)?;
warn!("Grin server started.");
Ok(Server {
config,
p2p: p2p_server,
chain: shared_chain,
tx_pool,
sync_state,
state_info: ServerStateInfo {
..Default::default()
},
stop_state,
lock_file,
connect_thread,
sync_thread,
dandelion_thread,
})
}
/// Asks the server to connect to a peer at the provided network address.
pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> {
self.p2p.connect(addr)?;
Ok(())
}
/// Ping all peers, mostly useful for tests to have connected peers share
/// their heights
pub fn | (&self) -> Result<(), Error> {
let head = self.chain.head()?;
self.p2p.peers.check_all(head.total_difficulty, head.height);
Ok(())
}
/// Number of peers
pub fn peer_count(&self) -> u32 {
self.p2p
.peers
.iter()
.connected()
.count()
.try_into()
.unwrap()
}
/// Start a minimal "stratum" mining service on a separate thread
pub fn start_stratum_server(&self, config: StratumServerConfig) {
let proof_size = global::proofsize();
let sync_state = self.sync_state.clone();
let mut stratum_server = stratumserver::StratumServer::new(
config,
self.chain.clone(),
self.tx_pool.clone(),
self.state_info.stratum_stats.clone(),
);
let _ = thread::Builder::new()
.name("stratum_server".to_string())
.spawn(move || {
stratum_server.run_loop(proof_size, sync_state);
});
}
/// Start mining for blocks internally on a separate thread. Relies on
/// internal miner, and should only be used for automated testing. Burns
/// reward if wallet_listener_url is 'None'
pub fn start_test_miner(
&self,
wallet_listener_url: Option<String>,
stop_state: Arc<StopState>,
) {
info!("start_test_miner - start",);
let sync_state = self.sync_state.clone();
let config_wallet_url = match wallet_listener_url.clone() {
Some(u) => u,
None => String::from("http://127.0.0.1:13415"),
};
let config = StratumServerConfig {
attempt_time_per_block: 60,
burn_reward: false,
enable_stratum_server: None,
stratum_server_addr: None,
wallet_listener_url: config_wallet_url,
minimum_share_difficulty: 1,
};
let mut miner = Miner::new(
config,
self.chain.clone(),
self.tx_pool.clone(),
stop_state,
sync_state,
);
miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port));
let _ = thread::Builder::new()
.name("test_miner".to_string())
.spawn(move || miner.run_loop(wallet_listener_url));
}
/// The chain head
pub fn head(&self) -> Result<chain::Tip, Error> {
self.chain.head().map_err(|e| e.into())
}
/// The head of the block header chain
pub fn header_head(&self) -> Result<chain::Tip, Error> {
self.chain.header_head().map_err(|e| e.into())
}
/// The p2p layer protocol version for this node.
pub fn protocol_version() -> ProtocolVersion {
ProtocolVersion::local()
}
/// Returns a set of stats about this server. This and the ServerStats
/// structure
/// can be updated over time to include any information needed by tests or
/// other consumers
pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
let stratum_stats = self.state_info.stratum_stats.read().clone();
// Fill out stats on our current difficulty calculation
// TODO: check the overhead of calculating this again isn't too much
// could return it from next_difficulty, but would rather keep consensus
// code clean. This may be handy for testing but not really needed
// for release
let diff_stats = {
let last_blocks: Vec<consensus::HeaderDifficultyInfo> =
global::difficulty_data_to_vector(self.chain.difficulty_iter()?)
.into_iter()
.collect();
let tip_height = self.head()?.height as i64;
let mut height = tip_height as i64 - last_blocks.len() as i64 + 1;
let diff_entries: Vec<DiffBlock> = last_blocks
.windows(2)
.map(|pair| {
let prev = &pair[0];
let next = &pair[1];
height += 1;
let block_hash = next.hash.unwrap_or(ZERO_HASH);
DiffBlock {
block_height: height,
block_hash,
difficulty: next.difficulty.to_num(),
time: next.timestamp,
duration: next.timestamp - prev.timestamp,
secondary_scaling: next.secondary_scaling,
is_secondary: next.is_secondary,
}
})
.collect();
let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration);
let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty);
DiffStats {
height: height as u64,
last_blocks: diff_entries,
average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1),
average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1),
window_size: consensus::DMA_WINDOW,
}
};
let peer_stats = self
.p2p
.peers
.iter()
.connected()
.into_iter()
.map(|p| PeerStats::from_peer(&p))
.collect();
// Updating TUI stats should not block any other processing so only attempt to
// acquire various read locks with a timeout.
let read_timeout = Duration::from_millis(500);
let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats {
tx_pool_size: pool.txpool.size(),
tx_pool_kernels: pool.txpool.kernel_count(),
stem_pool_size: pool.stempool.size(),
stem_pool_kernels: pool.stempool.kernel_count(),
});
let head = self.chain.head_header()?;
let head_stats = ChainStats {
latest_timestamp: head.timestamp,
height: head.height,
last_block_h: head.hash(),
total_difficulty: head.total_difficulty(),
};
let header_head = self.chain.header_head()?;
let header = self.chain.get_block_header(&header_head.hash())?;
let header_stats = ChainStats {
latest_timestamp: header.timestamp,
height: header.height,
last_block_h: header.hash(),
total_difficulty: header.total_difficulty(),
};
let disk_usage_bytes = WalkDir::new(&self.config.db_root)
.min_depth(1)
.max_depth(3)
.into_iter()
.filter_map(|entry| entry.ok())
.filter_map(|entry| entry.metadata().ok())
.filter(|metadata| metadata.is_file())
.fold(0, |acc, m| acc + m.len());
let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64));
Ok(ServerStats {
peer_count: self.peer_count(),
chain_stats: head_stats,
header_stats: header_stats,
sync_status: self.sync_state.status(),
disk_usage_gb: disk_usage_gb,
stratum_stats: stratum_stats,
peer_stats: peer_stats,
diff_stats: diff_stats,
tx_stats: tx_stats,
})
}
/// Stop the server.
pub fn stop(self) {
{
self.sync_state.update(SyncStatus::Shutdown);
self.stop_state.stop();
if let Some(connect_thread) = self.connect_thread {
match connect_thread.join() {
Err(e) => error!("failed to join to connect_and_monitor thread: {:?}", e),
Ok(_) => info!("connect_and_monitor thread stopped"),
}
} else {
info!("No active connect_and_monitor thread")
}
match self.sync_thread.join() {
Err(e) => error!("failed to join to sync thread: {:?}", e),
Ok(_) => info!("sync thread stopped"),
}
match self.dandelion_thread.join() {
Err(e) => error!("failed to join to dandelion_monitor thread: {:?}", e),
Ok(_) => info!("dandelion_monitor thread stopped"),
}
}
// this call is blocking and makes sure all peers stop, however
// we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread
self.p2p.stop();
let _ = self.lock_file.unlock();
warn!("Shutdown complete");
}
/// Pause the p2p server.
pub fn pause(&self) {
self.stop_state.pause();
thread::sleep(time::Duration::from_secs(1));
self.p2p.pause();
}
/// Resume p2p server.
/// TODO - We appear not to resume the p2p server (peer connections) here?
pub fn resume(&self) {
self.stop_state.resume();
}
/// Stops the test miner without stopping the p2p layer
pub fn stop_test_miner(&self, stop: Arc<StopState>) {
stop.stop();
info!("stop_test_miner - stop",);
}
}
| ping_peers | identifier_name |
server.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Grin server implementation, glues the different parts of the system (mostly
//! the peer-to-peer server, the blockchain and the transaction pool) and acts
//! as a facade.
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::sync::{mpsc, Arc};
use std::{convert::TryInto, fs};
use std::{
thread::{self, JoinHandle},
time::{self, Duration},
};
use fs2::FileExt;
use walkdir::WalkDir;
use crate::api;
use crate::api::TLSConfig;
use crate::chain::{self, SyncState, SyncStatus};
use crate::common::adapters::{
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
};
use crate::common::hooks::{init_chain_hooks, init_net_hooks};
use crate::common::stats::{
ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats,
};
use crate::common::types::{Error, ServerConfig, StratumServerConfig};
use crate::core::core::hash::{Hashed, ZERO_HASH};
use crate::core::ser::ProtocolVersion;
use crate::core::{consensus, genesis, global, pow};
use crate::grin::{dandelion_monitor, seed, sync};
use crate::mining::stratumserver;
use crate::mining::test_miner::Miner;
use crate::p2p;
use crate::p2p::types::{Capabilities, PeerAddr};
use crate::pool;
use crate::util::file::get_first_line;
use crate::util::{RwLock, StopState};
use futures::channel::oneshot;
use grin_util::logger::LogEntry;
/// Arcified thread-safe TransactionPool with type parameters used by server components
pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter>>>;
/// Grin server holding internal structures.
pub struct Server {
/// server config
pub config: ServerConfig,
/// handle to our network server
pub p2p: Arc<p2p::Server>,
/// data store access
pub chain: Arc<chain::Chain>,
/// in-memory transaction pool
pub tx_pool: ServerTxPool,
/// Whether we're currently syncing
pub sync_state: Arc<SyncState>,
/// To be passed around to collect stats and info
state_info: ServerStateInfo,
/// Stop flag
pub stop_state: Arc<StopState>,
/// Maintain a lock_file so we do not run multiple Grin nodes from same dir.
lock_file: Arc<File>,
connect_thread: Option<JoinHandle<()>>,
sync_thread: JoinHandle<()>,
dandelion_thread: JoinHandle<()>,
}
impl Server {
/// Instantiates and starts a new server. Optionally takes a callback
/// for the server to send an ARC copy of itself, to allow another process
/// to poll info about the server status
pub fn start<F>(
config: ServerConfig,
logs_rx: Option<mpsc::Receiver<LogEntry>>,
mut info_callback: F,
stop_state: Option<Arc<StopState>>,
api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>),
) -> Result<(), Error>
where
F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>),
{
let mining_config = config.stratum_mining_config.clone();
let enable_test_miner = config.run_test_miner;
let test_miner_wallet_url = config.test_miner_wallet_url.clone();
let serv = Server::new(config, stop_state, api_chan)?;
if let Some(c) = mining_config {
let enable_stratum_server = c.enable_stratum_server;
if let Some(s) = enable_stratum_server {
if s {
{
let mut stratum_stats = serv.state_info.stratum_stats.write();
stratum_stats.is_enabled = true;
}
serv.start_stratum_server(c);
}
}
}
if let Some(s) = enable_test_miner {
if s {
serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone());
}
}
info_callback(serv, logs_rx);
Ok(())
}
// Exclusive (advisory) lock_file to ensure we do not run multiple
// instance of grin server from the same dir.
// This uses fs2 and should be safe cross-platform unless somebody abuses the file itself.
fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> {
let path = Path::new(&config.db_root);
fs::create_dir_all(&path)?;
let path = path.join("grin.lock");
let lock_file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
lock_file.try_lock_exclusive().map_err(|e| {
let mut stderr = std::io::stderr();
writeln!(
&mut stderr,
"Failed to lock {:?} (grin server already running?)",
path
)
.expect("Could not write to stderr");
e
})?;
Ok(Arc::new(lock_file))
}
/// Instantiates a new server associated with the provided future reactor.
pub fn new(
config: ServerConfig,
stop_state: Option<Arc<StopState>>,
api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>),
) -> Result<Server, Error> {
// Obtain our lock_file or fail immediately with an error.
let lock_file = Server::one_grin_at_a_time(&config)?;
// Defaults to None (optional) in config file.
// This translates to false here.
let archive_mode = match config.archive_mode {
None => false,
Some(b) => b,
};
let stop_state = if stop_state.is_some() {
stop_state.unwrap()
} else {
Arc::new(StopState::new())
};
let pool_adapter = Arc::new(PoolToChainAdapter::new());
let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone()));
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(
config.pool_config.clone(),
pool_adapter.clone(),
pool_net_adapter.clone(),
)));
let sync_state = Arc::new(SyncState::new());
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(
tx_pool.clone(),
init_chain_hooks(&config),
));
let genesis = match config.chain_type {
global::ChainTypes::AutomatedTesting => pow::mine_genesis_block().unwrap(),
global::ChainTypes::UserTesting => pow::mine_genesis_block().unwrap(),
global::ChainTypes::Testnet => genesis::genesis_test(),
global::ChainTypes::Mainnet => genesis::genesis_main(),
};
info!("Starting server, genesis block: {}", genesis.hash());
let shared_chain = Arc::new(chain::Chain::init(
config.db_root.clone(),
chain_adapter.clone(),
genesis.clone(),
pow::verify_size,
archive_mode,
)?);
pool_adapter.set_chain(shared_chain.clone());
let net_adapter = Arc::new(NetToChainAdapter::new(
sync_state.clone(),
shared_chain.clone(),
tx_pool.clone(),
config.clone(),
init_net_hooks(&config),
));
// Initialize our capabilities.
// Currently either "default" or with optional "archive_mode" (block history) support enabled.
let capabilities = if let Some(true) = config.archive_mode {
Capabilities::default() | Capabilities::BLOCK_HIST
} else {
Capabilities::default()
};
debug!("Capabilities: {:?}", capabilities);
let p2p_server = Arc::new(p2p::Server::new(
&config.db_root,
capabilities,
config.p2p_config.clone(),
net_adapter.clone(),
genesis.hash(),
stop_state.clone(),
)?);
// Initialize various adapters with our dynamic set of connected peers.
chain_adapter.init(p2p_server.peers.clone());
pool_net_adapter.init(p2p_server.peers.clone());
net_adapter.init(p2p_server.peers.clone());
let mut connect_thread = None;
if config.p2p_config.seeding_type != p2p::Seeding::Programmatic {
let seed_list = match config.p2p_config.seeding_type {
p2p::Seeding::None => {
warn!("No seed configured, will stay solo until connected to");
seed::predefined_seeds(vec![])
}
p2p::Seeding::List => match &config.p2p_config.seeds {
Some(seeds) => seed::predefined_seeds(seeds.peers.clone()),
None => {
return Err(Error::Configuration(
"Seeds must be configured for seeding type List".to_owned(),
));
}
},
p2p::Seeding::DNSSeed => seed::default_dns_seeds(),
_ => unreachable!(),
};
connect_thread = Some(seed::connect_and_monitor(
p2p_server.clone(),
seed_list,
config.p2p_config.clone(),
stop_state.clone(),
)?);
}
// Defaults to None (optional) in config file.
// This translates to false here so we do not skip by default.
let skip_sync_wait = config.skip_sync_wait.unwrap_or(false);
sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait));
let sync_thread = sync::run_sync(
sync_state.clone(),
p2p_server.peers.clone(),
shared_chain.clone(),
stop_state.clone(),
)?;
let p2p_inner = p2p_server.clone();
let _ = thread::Builder::new()
.name("p2p-server".to_string())
.spawn(move || {
if let Err(e) = p2p_inner.listen() {
error!("P2P server failed with erorr: {:?}", e);
}
})?;
info!("Starting rest apis at: {}", &config.api_http_addr);
let api_secret = get_first_line(config.api_secret_path.clone());
let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone());
let tls_conf = match config.tls_certificate_file.clone() {
None => None,
Some(file) => {
let key = match config.tls_certificate_key.clone() {
Some(k) => k,
None => {
let msg = "Private key for certificate is not set".to_string();
return Err(Error::ArgumentError(msg));
}
};
Some(TLSConfig::new(file, key))
}
};
api::node_apis(
&config.api_http_addr,
shared_chain.clone(),
tx_pool.clone(),
p2p_server.peers.clone(),
sync_state.clone(),
api_secret,
foreign_api_secret,
tls_conf,
api_chan,
stop_state.clone(),
)?;
info!("Starting dandelion monitor: {}", &config.api_http_addr);
let dandelion_thread = dandelion_monitor::monitor_transactions(
config.dandelion_config.clone(),
tx_pool.clone(),
pool_net_adapter,
stop_state.clone(),
)?;
warn!("Grin server started.");
Ok(Server {
config,
p2p: p2p_server,
chain: shared_chain,
tx_pool,
sync_state,
state_info: ServerStateInfo {
..Default::default()
},
stop_state,
lock_file,
connect_thread,
sync_thread,
dandelion_thread,
})
}
/// Asks the server to connect to a peer at the provided network address.
pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> {
self.p2p.connect(addr)?;
Ok(())
}
/// Ping all peers, mostly useful for tests to have connected peers share
/// their heights
pub fn ping_peers(&self) -> Result<(), Error> {
let head = self.chain.head()?;
self.p2p.peers.check_all(head.total_difficulty, head.height);
Ok(())
}
/// Number of peers
pub fn peer_count(&self) -> u32 {
self.p2p
.peers
.iter()
.connected()
.count()
.try_into()
.unwrap()
}
/// Start a minimal "stratum" mining service on a separate thread
pub fn start_stratum_server(&self, config: StratumServerConfig) {
let proof_size = global::proofsize();
let sync_state = self.sync_state.clone();
let mut stratum_server = stratumserver::StratumServer::new(
config,
self.chain.clone(),
self.tx_pool.clone(),
self.state_info.stratum_stats.clone(),
);
let _ = thread::Builder::new()
.name("stratum_server".to_string())
.spawn(move || {
stratum_server.run_loop(proof_size, sync_state);
});
}
/// Start mining for blocks internally on a separate thread. Relies on
/// internal miner, and should only be used for automated testing. Burns
/// reward if wallet_listener_url is 'None'
pub fn start_test_miner(
&self,
wallet_listener_url: Option<String>,
stop_state: Arc<StopState>,
) {
info!("start_test_miner - start",);
let sync_state = self.sync_state.clone();
let config_wallet_url = match wallet_listener_url.clone() {
Some(u) => u,
None => String::from("http://127.0.0.1:13415"),
};
let config = StratumServerConfig {
attempt_time_per_block: 60,
burn_reward: false,
enable_stratum_server: None,
stratum_server_addr: None,
wallet_listener_url: config_wallet_url,
minimum_share_difficulty: 1,
};
let mut miner = Miner::new(
config,
self.chain.clone(),
self.tx_pool.clone(),
stop_state,
sync_state,
);
miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port));
let _ = thread::Builder::new()
.name("test_miner".to_string())
.spawn(move || miner.run_loop(wallet_listener_url));
}
/// The chain head
pub fn head(&self) -> Result<chain::Tip, Error> |
/// The head of the block header chain
pub fn header_head(&self) -> Result<chain::Tip, Error> {
self.chain.header_head().map_err(|e| e.into())
}
/// The p2p layer protocol version for this node.
pub fn protocol_version() -> ProtocolVersion {
ProtocolVersion::local()
}
/// Returns a set of stats about this server. This and the ServerStats
/// structure
/// can be updated over time to include any information needed by tests or
/// other consumers
pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
let stratum_stats = self.state_info.stratum_stats.read().clone();
// Fill out stats on our current difficulty calculation
// TODO: check the overhead of calculating this again isn't too much
// could return it from next_difficulty, but would rather keep consensus
// code clean. This may be handy for testing but not really needed
// for release
let diff_stats = {
let last_blocks: Vec<consensus::HeaderDifficultyInfo> =
global::difficulty_data_to_vector(self.chain.difficulty_iter()?)
.into_iter()
.collect();
let tip_height = self.head()?.height as i64;
let mut height = tip_height as i64 - last_blocks.len() as i64 + 1;
let diff_entries: Vec<DiffBlock> = last_blocks
.windows(2)
.map(|pair| {
let prev = &pair[0];
let next = &pair[1];
height += 1;
let block_hash = next.hash.unwrap_or(ZERO_HASH);
DiffBlock {
block_height: height,
block_hash,
difficulty: next.difficulty.to_num(),
time: next.timestamp,
duration: next.timestamp - prev.timestamp,
secondary_scaling: next.secondary_scaling,
is_secondary: next.is_secondary,
}
})
.collect();
let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration);
let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty);
DiffStats {
height: height as u64,
last_blocks: diff_entries,
average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1),
average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1),
window_size: consensus::DMA_WINDOW,
}
};
let peer_stats = self
.p2p
.peers
.iter()
.connected()
.into_iter()
.map(|p| PeerStats::from_peer(&p))
.collect();
// Updating TUI stats should not block any other processing so only attempt to
// acquire various read locks with a timeout.
let read_timeout = Duration::from_millis(500);
let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats {
tx_pool_size: pool.txpool.size(),
tx_pool_kernels: pool.txpool.kernel_count(),
stem_pool_size: pool.stempool.size(),
stem_pool_kernels: pool.stempool.kernel_count(),
});
let head = self.chain.head_header()?;
let head_stats = ChainStats {
latest_timestamp: head.timestamp,
height: head.height,
last_block_h: head.hash(),
total_difficulty: head.total_difficulty(),
};
let header_head = self.chain.header_head()?;
let header = self.chain.get_block_header(&header_head.hash())?;
let header_stats = ChainStats {
latest_timestamp: header.timestamp,
height: header.height,
last_block_h: header.hash(),
total_difficulty: header.total_difficulty(),
};
let disk_usage_bytes = WalkDir::new(&self.config.db_root)
.min_depth(1)
.max_depth(3)
.into_iter()
.filter_map(|entry| entry.ok())
.filter_map(|entry| entry.metadata().ok())
.filter(|metadata| metadata.is_file())
.fold(0, |acc, m| acc + m.len());
let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64));
Ok(ServerStats {
peer_count: self.peer_count(),
chain_stats: head_stats,
header_stats: header_stats,
sync_status: self.sync_state.status(),
disk_usage_gb: disk_usage_gb,
stratum_stats: stratum_stats,
peer_stats: peer_stats,
diff_stats: diff_stats,
tx_stats: tx_stats,
})
}
/// Stop the server.
pub fn stop(self) {
{
self.sync_state.update(SyncStatus::Shutdown);
self.stop_state.stop();
if let Some(connect_thread) = self.connect_thread {
match connect_thread.join() {
Err(e) => error!("failed to join to connect_and_monitor thread: {:?}", e),
Ok(_) => info!("connect_and_monitor thread stopped"),
}
} else {
info!("No active connect_and_monitor thread")
}
match self.sync_thread.join() {
Err(e) => error!("failed to join to sync thread: {:?}", e),
Ok(_) => info!("sync thread stopped"),
}
match self.dandelion_thread.join() {
Err(e) => error!("failed to join to dandelion_monitor thread: {:?}", e),
Ok(_) => info!("dandelion_monitor thread stopped"),
}
}
// this call is blocking and makes sure all peers stop, however
// we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread
self.p2p.stop();
let _ = self.lock_file.unlock();
warn!("Shutdown complete");
}
/// Pause the p2p server.
pub fn pause(&self) {
self.stop_state.pause();
thread::sleep(time::Duration::from_secs(1));
self.p2p.pause();
}
/// Resume p2p server.
/// TODO - We appear not to resume the p2p server (peer connections) here?
pub fn resume(&self) {
self.stop_state.resume();
}
/// Stops the test miner without stopping the p2p layer
pub fn stop_test_miner(&self, stop: Arc<StopState>) {
stop.stop();
info!("stop_test_miner - stop",);
}
}
| {
self.chain.head().map_err(|e| e.into())
} | identifier_body |
server.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Grin server implementation, glues the different parts of the system (mostly
//! the peer-to-peer server, the blockchain and the transaction pool) and acts
//! as a facade.
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::sync::{mpsc, Arc};
use std::{convert::TryInto, fs};
use std::{
thread::{self, JoinHandle},
time::{self, Duration},
};
use fs2::FileExt;
use walkdir::WalkDir;
use crate::api;
use crate::api::TLSConfig;
use crate::chain::{self, SyncState, SyncStatus};
use crate::common::adapters::{
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
};
use crate::common::hooks::{init_chain_hooks, init_net_hooks};
use crate::common::stats::{
ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats,
};
use crate::common::types::{Error, ServerConfig, StratumServerConfig};
use crate::core::core::hash::{Hashed, ZERO_HASH};
use crate::core::ser::ProtocolVersion;
use crate::core::{consensus, genesis, global, pow};
use crate::grin::{dandelion_monitor, seed, sync};
use crate::mining::stratumserver;
use crate::mining::test_miner::Miner;
use crate::p2p;
use crate::p2p::types::{Capabilities, PeerAddr};
use crate::pool;
use crate::util::file::get_first_line;
use crate::util::{RwLock, StopState};
use futures::channel::oneshot;
use grin_util::logger::LogEntry;
/// Arcified thread-safe TransactionPool with type parameters used by server components
pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter>>>;
/// Grin server holding internal structures.
pub struct Server {
/// server config
pub config: ServerConfig,
/// handle to our network server
pub p2p: Arc<p2p::Server>,
/// data store access
pub chain: Arc<chain::Chain>,
/// in-memory transaction pool
pub tx_pool: ServerTxPool,
/// Whether we're currently syncing
pub sync_state: Arc<SyncState>,
/// To be passed around to collect stats and info
state_info: ServerStateInfo,
/// Stop flag
pub stop_state: Arc<StopState>,
/// Maintain a lock_file so we do not run multiple Grin nodes from same dir.
lock_file: Arc<File>,
connect_thread: Option<JoinHandle<()>>,
sync_thread: JoinHandle<()>,
dandelion_thread: JoinHandle<()>,
}
impl Server {
/// Instantiates and starts a new server. Optionally takes a callback
/// for the server to send an ARC copy of itself, to allow another process
/// to poll info about the server status
pub fn start<F>(
config: ServerConfig,
logs_rx: Option<mpsc::Receiver<LogEntry>>,
mut info_callback: F,
stop_state: Option<Arc<StopState>>,
api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>),
) -> Result<(), Error>
where
F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>),
{
let mining_config = config.stratum_mining_config.clone();
let enable_test_miner = config.run_test_miner;
let test_miner_wallet_url = config.test_miner_wallet_url.clone();
let serv = Server::new(config, stop_state, api_chan)?;
if let Some(c) = mining_config {
let enable_stratum_server = c.enable_stratum_server;
if let Some(s) = enable_stratum_server {
if s {
{
let mut stratum_stats = serv.state_info.stratum_stats.write();
stratum_stats.is_enabled = true;
}
serv.start_stratum_server(c);
}
}
}
if let Some(s) = enable_test_miner {
if s {
serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone());
}
}
info_callback(serv, logs_rx);
Ok(())
}
// Exclusive (advisory) lock_file to ensure we do not run multiple
// instance of grin server from the same dir.
// This uses fs2 and should be safe cross-platform unless somebody abuses the file itself.
fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> {
let path = Path::new(&config.db_root);
fs::create_dir_all(&path)?;
let path = path.join("grin.lock");
let lock_file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
lock_file.try_lock_exclusive().map_err(|e| {
let mut stderr = std::io::stderr();
writeln!(
&mut stderr,
"Failed to lock {:?} (grin server already running?)",
path
)
.expect("Could not write to stderr");
e
})?;
Ok(Arc::new(lock_file))
}
/// Instantiates a new server associated with the provided future reactor.
pub fn new(
config: ServerConfig,
stop_state: Option<Arc<StopState>>,
api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>),
) -> Result<Server, Error> {
// Obtain our lock_file or fail immediately with an error.
let lock_file = Server::one_grin_at_a_time(&config)?;
// Defaults to None (optional) in config file.
// This translates to false here.
let archive_mode = match config.archive_mode {
None => false,
Some(b) => b,
};
let stop_state = if stop_state.is_some() {
stop_state.unwrap()
} else {
Arc::new(StopState::new())
};
let pool_adapter = Arc::new(PoolToChainAdapter::new());
let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone()));
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(
config.pool_config.clone(),
pool_adapter.clone(),
pool_net_adapter.clone(),
)));
let sync_state = Arc::new(SyncState::new());
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(
tx_pool.clone(),
init_chain_hooks(&config),
));
let genesis = match config.chain_type {
global::ChainTypes::AutomatedTesting => pow::mine_genesis_block().unwrap(),
global::ChainTypes::UserTesting => pow::mine_genesis_block().unwrap(),
global::ChainTypes::Testnet => genesis::genesis_test(),
global::ChainTypes::Mainnet => genesis::genesis_main(),
};
info!("Starting server, genesis block: {}", genesis.hash());
let shared_chain = Arc::new(chain::Chain::init(
config.db_root.clone(),
chain_adapter.clone(),
genesis.clone(),
pow::verify_size,
archive_mode,
)?);
pool_adapter.set_chain(shared_chain.clone());
let net_adapter = Arc::new(NetToChainAdapter::new(
sync_state.clone(),
shared_chain.clone(),
tx_pool.clone(),
config.clone(),
init_net_hooks(&config),
));
// Initialize our capabilities.
// Currently either "default" or with optional "archive_mode" (block history) support enabled.
let capabilities = if let Some(true) = config.archive_mode {
Capabilities::default() | Capabilities::BLOCK_HIST
} else {
Capabilities::default()
};
debug!("Capabilities: {:?}", capabilities);
let p2p_server = Arc::new(p2p::Server::new(
&config.db_root,
capabilities,
config.p2p_config.clone(),
net_adapter.clone(),
genesis.hash(),
stop_state.clone(),
)?);
// Initialize various adapters with our dynamic set of connected peers.
chain_adapter.init(p2p_server.peers.clone());
pool_net_adapter.init(p2p_server.peers.clone());
net_adapter.init(p2p_server.peers.clone());
let mut connect_thread = None;
if config.p2p_config.seeding_type != p2p::Seeding::Programmatic {
let seed_list = match config.p2p_config.seeding_type {
p2p::Seeding::None => {
warn!("No seed configured, will stay solo until connected to");
seed::predefined_seeds(vec![])
}
p2p::Seeding::List => match &config.p2p_config.seeds {
Some(seeds) => seed::predefined_seeds(seeds.peers.clone()),
None => {
return Err(Error::Configuration(
"Seeds must be configured for seeding type List".to_owned(),
));
}
},
p2p::Seeding::DNSSeed => seed::default_dns_seeds(),
_ => unreachable!(),
};
connect_thread = Some(seed::connect_and_monitor(
p2p_server.clone(),
seed_list,
config.p2p_config.clone(),
stop_state.clone(),
)?);
}
// Defaults to None (optional) in config file.
// This translates to false here so we do not skip by default.
let skip_sync_wait = config.skip_sync_wait.unwrap_or(false);
sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait));
let sync_thread = sync::run_sync(
sync_state.clone(),
p2p_server.peers.clone(),
shared_chain.clone(),
stop_state.clone(),
)?;
let p2p_inner = p2p_server.clone();
let _ = thread::Builder::new()
.name("p2p-server".to_string())
.spawn(move || {
if let Err(e) = p2p_inner.listen() {
error!("P2P server failed with erorr: {:?}", e);
}
})?;
info!("Starting rest apis at: {}", &config.api_http_addr);
let api_secret = get_first_line(config.api_secret_path.clone());
let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone());
let tls_conf = match config.tls_certificate_file.clone() {
None => None,
Some(file) => {
let key = match config.tls_certificate_key.clone() {
Some(k) => k,
None => {
let msg = "Private key for certificate is not set".to_string();
return Err(Error::ArgumentError(msg));
}
};
Some(TLSConfig::new(file, key))
}
};
api::node_apis(
&config.api_http_addr,
shared_chain.clone(),
tx_pool.clone(),
p2p_server.peers.clone(),
sync_state.clone(),
api_secret,
foreign_api_secret,
tls_conf,
api_chan,
stop_state.clone(),
)?;
info!("Starting dandelion monitor: {}", &config.api_http_addr);
let dandelion_thread = dandelion_monitor::monitor_transactions(
config.dandelion_config.clone(),
tx_pool.clone(),
pool_net_adapter,
stop_state.clone(),
)?;
warn!("Grin server started.");
Ok(Server {
config,
p2p: p2p_server,
chain: shared_chain,
tx_pool,
sync_state,
state_info: ServerStateInfo {
..Default::default()
},
stop_state,
lock_file,
connect_thread,
sync_thread,
dandelion_thread,
})
}
/// Asks the server to connect to a peer at the provided network address.
pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> {
self.p2p.connect(addr)?;
Ok(())
}
/// Ping all peers, mostly useful for tests to have connected peers share
/// their heights
pub fn ping_peers(&self) -> Result<(), Error> {
let head = self.chain.head()?;
self.p2p.peers.check_all(head.total_difficulty, head.height);
Ok(())
}
/// Number of peers
pub fn peer_count(&self) -> u32 {
self.p2p
.peers
.iter()
.connected()
.count()
.try_into()
.unwrap()
}
/// Start a minimal "stratum" mining service on a separate thread
pub fn start_stratum_server(&self, config: StratumServerConfig) {
let proof_size = global::proofsize();
let sync_state = self.sync_state.clone();
let mut stratum_server = stratumserver::StratumServer::new(
config,
self.chain.clone(),
self.tx_pool.clone(),
self.state_info.stratum_stats.clone(),
);
let _ = thread::Builder::new()
.name("stratum_server".to_string())
.spawn(move || {
stratum_server.run_loop(proof_size, sync_state);
});
}
/// Start mining for blocks internally on a separate thread. Relies on
/// internal miner, and should only be used for automated testing. Burns
/// reward if wallet_listener_url is 'None'
pub fn start_test_miner(
&self,
wallet_listener_url: Option<String>,
stop_state: Arc<StopState>,
) {
info!("start_test_miner - start",);
let sync_state = self.sync_state.clone();
let config_wallet_url = match wallet_listener_url.clone() {
Some(u) => u,
None => String::from("http://127.0.0.1:13415"),
};
let config = StratumServerConfig {
attempt_time_per_block: 60,
burn_reward: false,
enable_stratum_server: None,
stratum_server_addr: None,
wallet_listener_url: config_wallet_url,
minimum_share_difficulty: 1,
};
let mut miner = Miner::new(
config,
self.chain.clone(),
self.tx_pool.clone(),
stop_state,
sync_state,
);
miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port));
let _ = thread::Builder::new()
.name("test_miner".to_string())
.spawn(move || miner.run_loop(wallet_listener_url));
}
/// The chain head
pub fn head(&self) -> Result<chain::Tip, Error> {
self.chain.head().map_err(|e| e.into())
}
/// The head of the block header chain
pub fn header_head(&self) -> Result<chain::Tip, Error> {
self.chain.header_head().map_err(|e| e.into())
}
/// The p2p layer protocol version for this node.
pub fn protocol_version() -> ProtocolVersion {
ProtocolVersion::local()
}
/// Returns a set of stats about this server. This and the ServerStats
/// structure
/// can be updated over time to include any information needed by tests or
/// other consumers
pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
let stratum_stats = self.state_info.stratum_stats.read().clone();
// Fill out stats on our current difficulty calculation
// TODO: check the overhead of calculating this again isn't too much
// could return it from next_difficulty, but would rather keep consensus
// code clean. This may be handy for testing but not really needed
// for release
let diff_stats = {
let last_blocks: Vec<consensus::HeaderDifficultyInfo> =
global::difficulty_data_to_vector(self.chain.difficulty_iter()?)
.into_iter()
.collect();
let tip_height = self.head()?.height as i64;
let mut height = tip_height as i64 - last_blocks.len() as i64 + 1;
let diff_entries: Vec<DiffBlock> = last_blocks
.windows(2)
.map(|pair| {
let prev = &pair[0];
let next = &pair[1];
height += 1;
let block_hash = next.hash.unwrap_or(ZERO_HASH);
DiffBlock {
block_height: height,
block_hash,
difficulty: next.difficulty.to_num(),
time: next.timestamp,
duration: next.timestamp - prev.timestamp,
secondary_scaling: next.secondary_scaling,
is_secondary: next.is_secondary,
}
})
.collect();
let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration);
let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty);
DiffStats {
height: height as u64,
last_blocks: diff_entries,
average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1),
average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1),
window_size: consensus::DMA_WINDOW,
}
};
let peer_stats = self
.p2p
.peers
.iter()
.connected()
.into_iter()
.map(|p| PeerStats::from_peer(&p))
.collect();
// Updating TUI stats should not block any other processing so only attempt to
// acquire various read locks with a timeout.
let read_timeout = Duration::from_millis(500);
let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats {
tx_pool_size: pool.txpool.size(),
tx_pool_kernels: pool.txpool.kernel_count(),
stem_pool_size: pool.stempool.size(),
stem_pool_kernels: pool.stempool.kernel_count(),
});
let head = self.chain.head_header()?;
let head_stats = ChainStats {
latest_timestamp: head.timestamp,
height: head.height,
last_block_h: head.hash(),
total_difficulty: head.total_difficulty(),
};
let header_head = self.chain.header_head()?;
let header = self.chain.get_block_header(&header_head.hash())?; | };
let disk_usage_bytes = WalkDir::new(&self.config.db_root)
.min_depth(1)
.max_depth(3)
.into_iter()
.filter_map(|entry| entry.ok())
.filter_map(|entry| entry.metadata().ok())
.filter(|metadata| metadata.is_file())
.fold(0, |acc, m| acc + m.len());
let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64));
Ok(ServerStats {
peer_count: self.peer_count(),
chain_stats: head_stats,
header_stats: header_stats,
sync_status: self.sync_state.status(),
disk_usage_gb: disk_usage_gb,
stratum_stats: stratum_stats,
peer_stats: peer_stats,
diff_stats: diff_stats,
tx_stats: tx_stats,
})
}
/// Stop the server.
pub fn stop(self) {
{
self.sync_state.update(SyncStatus::Shutdown);
self.stop_state.stop();
if let Some(connect_thread) = self.connect_thread {
match connect_thread.join() {
Err(e) => error!("failed to join to connect_and_monitor thread: {:?}", e),
Ok(_) => info!("connect_and_monitor thread stopped"),
}
} else {
info!("No active connect_and_monitor thread")
}
match self.sync_thread.join() {
Err(e) => error!("failed to join to sync thread: {:?}", e),
Ok(_) => info!("sync thread stopped"),
}
match self.dandelion_thread.join() {
Err(e) => error!("failed to join to dandelion_monitor thread: {:?}", e),
Ok(_) => info!("dandelion_monitor thread stopped"),
}
}
// this call is blocking and makes sure all peers stop, however
// we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread
self.p2p.stop();
let _ = self.lock_file.unlock();
warn!("Shutdown complete");
}
/// Pause the p2p server.
pub fn pause(&self) {
self.stop_state.pause();
thread::sleep(time::Duration::from_secs(1));
self.p2p.pause();
}
/// Resume p2p server.
/// TODO - We appear not to resume the p2p server (peer connections) here?
pub fn resume(&self) {
self.stop_state.resume();
}
/// Stops the test miner without stopping the p2p layer
pub fn stop_test_miner(&self, stop: Arc<StopState>) {
stop.stop();
info!("stop_test_miner - stop",);
}
} | let header_stats = ChainStats {
latest_timestamp: header.timestamp,
height: header.height,
last_block_h: header.hash(),
total_difficulty: header.total_difficulty(), | random_line_split |
server.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Grin server implementation, glues the different parts of the system (mostly
//! the peer-to-peer server, the blockchain and the transaction pool) and acts
//! as a facade.
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::sync::{mpsc, Arc};
use std::{convert::TryInto, fs};
use std::{
thread::{self, JoinHandle},
time::{self, Duration},
};
use fs2::FileExt;
use walkdir::WalkDir;
use crate::api;
use crate::api::TLSConfig;
use crate::chain::{self, SyncState, SyncStatus};
use crate::common::adapters::{
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
};
use crate::common::hooks::{init_chain_hooks, init_net_hooks};
use crate::common::stats::{
ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats,
};
use crate::common::types::{Error, ServerConfig, StratumServerConfig};
use crate::core::core::hash::{Hashed, ZERO_HASH};
use crate::core::ser::ProtocolVersion;
use crate::core::{consensus, genesis, global, pow};
use crate::grin::{dandelion_monitor, seed, sync};
use crate::mining::stratumserver;
use crate::mining::test_miner::Miner;
use crate::p2p;
use crate::p2p::types::{Capabilities, PeerAddr};
use crate::pool;
use crate::util::file::get_first_line;
use crate::util::{RwLock, StopState};
use futures::channel::oneshot;
use grin_util::logger::LogEntry;
/// Arcified thread-safe TransactionPool with type parameters used by server components
pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter>>>;
/// Grin server holding internal structures.
pub struct Server {
/// server config
pub config: ServerConfig,
/// handle to our network server
pub p2p: Arc<p2p::Server>,
/// data store access
pub chain: Arc<chain::Chain>,
/// in-memory transaction pool
pub tx_pool: ServerTxPool,
/// Whether we're currently syncing
pub sync_state: Arc<SyncState>,
/// To be passed around to collect stats and info
state_info: ServerStateInfo,
/// Stop flag
pub stop_state: Arc<StopState>,
/// Maintain a lock_file so we do not run multiple Grin nodes from same dir.
lock_file: Arc<File>,
connect_thread: Option<JoinHandle<()>>,
sync_thread: JoinHandle<()>,
dandelion_thread: JoinHandle<()>,
}
impl Server {
/// Instantiates and starts a new server. Optionally takes a callback
/// for the server to send an ARC copy of itself, to allow another process
/// to poll info about the server status
pub fn start<F>(
config: ServerConfig,
logs_rx: Option<mpsc::Receiver<LogEntry>>,
mut info_callback: F,
stop_state: Option<Arc<StopState>>,
api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>),
) -> Result<(), Error>
where
F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>),
{
let mining_config = config.stratum_mining_config.clone();
let enable_test_miner = config.run_test_miner;
let test_miner_wallet_url = config.test_miner_wallet_url.clone();
let serv = Server::new(config, stop_state, api_chan)?;
if let Some(c) = mining_config {
let enable_stratum_server = c.enable_stratum_server;
if let Some(s) = enable_stratum_server {
if s {
{
let mut stratum_stats = serv.state_info.stratum_stats.write();
stratum_stats.is_enabled = true;
}
serv.start_stratum_server(c);
}
}
}
if let Some(s) = enable_test_miner {
if s {
serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone());
}
}
info_callback(serv, logs_rx);
Ok(())
}
// Exclusive (advisory) lock_file to ensure we do not run multiple
// instance of grin server from the same dir.
// This uses fs2 and should be safe cross-platform unless somebody abuses the file itself.
fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> {
let path = Path::new(&config.db_root);
fs::create_dir_all(&path)?;
let path = path.join("grin.lock");
let lock_file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
lock_file.try_lock_exclusive().map_err(|e| {
let mut stderr = std::io::stderr();
writeln!(
&mut stderr,
"Failed to lock {:?} (grin server already running?)",
path
)
.expect("Could not write to stderr");
e
})?;
Ok(Arc::new(lock_file))
}
/// Instantiates a new server associated with the provided future reactor.
pub fn new(
config: ServerConfig,
stop_state: Option<Arc<StopState>>,
api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>),
) -> Result<Server, Error> {
// Obtain our lock_file or fail immediately with an error.
let lock_file = Server::one_grin_at_a_time(&config)?;
// Defaults to None (optional) in config file.
// This translates to false here.
let archive_mode = match config.archive_mode {
None => false,
Some(b) => b,
};
let stop_state = if stop_state.is_some() {
stop_state.unwrap()
} else {
Arc::new(StopState::new())
};
let pool_adapter = Arc::new(PoolToChainAdapter::new());
let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone()));
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(
config.pool_config.clone(),
pool_adapter.clone(),
pool_net_adapter.clone(),
)));
let sync_state = Arc::new(SyncState::new());
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(
tx_pool.clone(),
init_chain_hooks(&config),
));
let genesis = match config.chain_type {
global::ChainTypes::AutomatedTesting => pow::mine_genesis_block().unwrap(),
global::ChainTypes::UserTesting => pow::mine_genesis_block().unwrap(),
global::ChainTypes::Testnet => genesis::genesis_test(),
global::ChainTypes::Mainnet => genesis::genesis_main(),
};
info!("Starting server, genesis block: {}", genesis.hash());
let shared_chain = Arc::new(chain::Chain::init(
config.db_root.clone(),
chain_adapter.clone(),
genesis.clone(),
pow::verify_size,
archive_mode,
)?);
pool_adapter.set_chain(shared_chain.clone());
let net_adapter = Arc::new(NetToChainAdapter::new(
sync_state.clone(),
shared_chain.clone(),
tx_pool.clone(),
config.clone(),
init_net_hooks(&config),
));
// Initialize our capabilities.
// Currently either "default" or with optional "archive_mode" (block history) support enabled.
let capabilities = if let Some(true) = config.archive_mode {
Capabilities::default() | Capabilities::BLOCK_HIST
} else {
Capabilities::default()
};
debug!("Capabilities: {:?}", capabilities);
let p2p_server = Arc::new(p2p::Server::new(
&config.db_root,
capabilities,
config.p2p_config.clone(),
net_adapter.clone(),
genesis.hash(),
stop_state.clone(),
)?);
// Initialize various adapters with our dynamic set of connected peers.
chain_adapter.init(p2p_server.peers.clone());
pool_net_adapter.init(p2p_server.peers.clone());
net_adapter.init(p2p_server.peers.clone());
let mut connect_thread = None;
if config.p2p_config.seeding_type != p2p::Seeding::Programmatic {
let seed_list = match config.p2p_config.seeding_type {
p2p::Seeding::None => {
warn!("No seed configured, will stay solo until connected to");
seed::predefined_seeds(vec![])
}
p2p::Seeding::List => match &config.p2p_config.seeds {
Some(seeds) => seed::predefined_seeds(seeds.peers.clone()),
None => |
},
p2p::Seeding::DNSSeed => seed::default_dns_seeds(),
_ => unreachable!(),
};
connect_thread = Some(seed::connect_and_monitor(
p2p_server.clone(),
seed_list,
config.p2p_config.clone(),
stop_state.clone(),
)?);
}
// Defaults to None (optional) in config file.
// This translates to false here so we do not skip by default.
let skip_sync_wait = config.skip_sync_wait.unwrap_or(false);
sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait));
let sync_thread = sync::run_sync(
sync_state.clone(),
p2p_server.peers.clone(),
shared_chain.clone(),
stop_state.clone(),
)?;
let p2p_inner = p2p_server.clone();
let _ = thread::Builder::new()
.name("p2p-server".to_string())
.spawn(move || {
if let Err(e) = p2p_inner.listen() {
error!("P2P server failed with erorr: {:?}", e);
}
})?;
info!("Starting rest apis at: {}", &config.api_http_addr);
let api_secret = get_first_line(config.api_secret_path.clone());
let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone());
let tls_conf = match config.tls_certificate_file.clone() {
None => None,
Some(file) => {
let key = match config.tls_certificate_key.clone() {
Some(k) => k,
None => {
let msg = "Private key for certificate is not set".to_string();
return Err(Error::ArgumentError(msg));
}
};
Some(TLSConfig::new(file, key))
}
};
api::node_apis(
&config.api_http_addr,
shared_chain.clone(),
tx_pool.clone(),
p2p_server.peers.clone(),
sync_state.clone(),
api_secret,
foreign_api_secret,
tls_conf,
api_chan,
stop_state.clone(),
)?;
info!("Starting dandelion monitor: {}", &config.api_http_addr);
let dandelion_thread = dandelion_monitor::monitor_transactions(
config.dandelion_config.clone(),
tx_pool.clone(),
pool_net_adapter,
stop_state.clone(),
)?;
warn!("Grin server started.");
Ok(Server {
config,
p2p: p2p_server,
chain: shared_chain,
tx_pool,
sync_state,
state_info: ServerStateInfo {
..Default::default()
},
stop_state,
lock_file,
connect_thread,
sync_thread,
dandelion_thread,
})
}
/// Asks the server to connect to a peer at the provided network address.
pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> {
self.p2p.connect(addr)?;
Ok(())
}
/// Ping all peers, mostly useful for tests to have connected peers share
/// their heights
pub fn ping_peers(&self) -> Result<(), Error> {
let head = self.chain.head()?;
self.p2p.peers.check_all(head.total_difficulty, head.height);
Ok(())
}
/// Number of peers
pub fn peer_count(&self) -> u32 {
self.p2p
.peers
.iter()
.connected()
.count()
.try_into()
.unwrap()
}
/// Start a minimal "stratum" mining service on a separate thread
pub fn start_stratum_server(&self, config: StratumServerConfig) {
let proof_size = global::proofsize();
let sync_state = self.sync_state.clone();
let mut stratum_server = stratumserver::StratumServer::new(
config,
self.chain.clone(),
self.tx_pool.clone(),
self.state_info.stratum_stats.clone(),
);
let _ = thread::Builder::new()
.name("stratum_server".to_string())
.spawn(move || {
stratum_server.run_loop(proof_size, sync_state);
});
}
/// Start mining for blocks internally on a separate thread. Relies on
/// internal miner, and should only be used for automated testing. Burns
/// reward if wallet_listener_url is 'None'
pub fn start_test_miner(
&self,
wallet_listener_url: Option<String>,
stop_state: Arc<StopState>,
) {
info!("start_test_miner - start",);
let sync_state = self.sync_state.clone();
let config_wallet_url = match wallet_listener_url.clone() {
Some(u) => u,
None => String::from("http://127.0.0.1:13415"),
};
let config = StratumServerConfig {
attempt_time_per_block: 60,
burn_reward: false,
enable_stratum_server: None,
stratum_server_addr: None,
wallet_listener_url: config_wallet_url,
minimum_share_difficulty: 1,
};
let mut miner = Miner::new(
config,
self.chain.clone(),
self.tx_pool.clone(),
stop_state,
sync_state,
);
miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port));
let _ = thread::Builder::new()
.name("test_miner".to_string())
.spawn(move || miner.run_loop(wallet_listener_url));
}
/// The chain head
pub fn head(&self) -> Result<chain::Tip, Error> {
self.chain.head().map_err(|e| e.into())
}
/// The head of the block header chain
pub fn header_head(&self) -> Result<chain::Tip, Error> {
self.chain.header_head().map_err(|e| e.into())
}
/// The p2p layer protocol version for this node.
pub fn protocol_version() -> ProtocolVersion {
ProtocolVersion::local()
}
/// Returns a set of stats about this server. This and the ServerStats
/// structure
/// can be updated over time to include any information needed by tests or
/// other consumers
pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
let stratum_stats = self.state_info.stratum_stats.read().clone();
// Fill out stats on our current difficulty calculation
// TODO: check the overhead of calculating this again isn't too much
// could return it from next_difficulty, but would rather keep consensus
// code clean. This may be handy for testing but not really needed
// for release
let diff_stats = {
let last_blocks: Vec<consensus::HeaderDifficultyInfo> =
global::difficulty_data_to_vector(self.chain.difficulty_iter()?)
.into_iter()
.collect();
let tip_height = self.head()?.height as i64;
let mut height = tip_height as i64 - last_blocks.len() as i64 + 1;
let diff_entries: Vec<DiffBlock> = last_blocks
.windows(2)
.map(|pair| {
let prev = &pair[0];
let next = &pair[1];
height += 1;
let block_hash = next.hash.unwrap_or(ZERO_HASH);
DiffBlock {
block_height: height,
block_hash,
difficulty: next.difficulty.to_num(),
time: next.timestamp,
duration: next.timestamp - prev.timestamp,
secondary_scaling: next.secondary_scaling,
is_secondary: next.is_secondary,
}
})
.collect();
let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration);
let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty);
DiffStats {
height: height as u64,
last_blocks: diff_entries,
average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1),
average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1),
window_size: consensus::DMA_WINDOW,
}
};
let peer_stats = self
.p2p
.peers
.iter()
.connected()
.into_iter()
.map(|p| PeerStats::from_peer(&p))
.collect();
// Updating TUI stats should not block any other processing so only attempt to
// acquire various read locks with a timeout.
let read_timeout = Duration::from_millis(500);
let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats {
tx_pool_size: pool.txpool.size(),
tx_pool_kernels: pool.txpool.kernel_count(),
stem_pool_size: pool.stempool.size(),
stem_pool_kernels: pool.stempool.kernel_count(),
});
let head = self.chain.head_header()?;
let head_stats = ChainStats {
latest_timestamp: head.timestamp,
height: head.height,
last_block_h: head.hash(),
total_difficulty: head.total_difficulty(),
};
let header_head = self.chain.header_head()?;
let header = self.chain.get_block_header(&header_head.hash())?;
let header_stats = ChainStats {
latest_timestamp: header.timestamp,
height: header.height,
last_block_h: header.hash(),
total_difficulty: header.total_difficulty(),
};
let disk_usage_bytes = WalkDir::new(&self.config.db_root)
.min_depth(1)
.max_depth(3)
.into_iter()
.filter_map(|entry| entry.ok())
.filter_map(|entry| entry.metadata().ok())
.filter(|metadata| metadata.is_file())
.fold(0, |acc, m| acc + m.len());
let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64));
Ok(ServerStats {
peer_count: self.peer_count(),
chain_stats: head_stats,
header_stats: header_stats,
sync_status: self.sync_state.status(),
disk_usage_gb: disk_usage_gb,
stratum_stats: stratum_stats,
peer_stats: peer_stats,
diff_stats: diff_stats,
tx_stats: tx_stats,
})
}
/// Stop the server.
pub fn stop(self) {
{
self.sync_state.update(SyncStatus::Shutdown);
self.stop_state.stop();
if let Some(connect_thread) = self.connect_thread {
match connect_thread.join() {
Err(e) => error!("failed to join to connect_and_monitor thread: {:?}", e),
Ok(_) => info!("connect_and_monitor thread stopped"),
}
} else {
info!("No active connect_and_monitor thread")
}
match self.sync_thread.join() {
Err(e) => error!("failed to join to sync thread: {:?}", e),
Ok(_) => info!("sync thread stopped"),
}
match self.dandelion_thread.join() {
Err(e) => error!("failed to join to dandelion_monitor thread: {:?}", e),
Ok(_) => info!("dandelion_monitor thread stopped"),
}
}
// this call is blocking and makes sure all peers stop, however
// we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread
self.p2p.stop();
let _ = self.lock_file.unlock();
warn!("Shutdown complete");
}
/// Pause the p2p server.
pub fn pause(&self) {
self.stop_state.pause();
thread::sleep(time::Duration::from_secs(1));
self.p2p.pause();
}
/// Resume p2p server.
/// TODO - We appear not to resume the p2p server (peer connections) here?
pub fn resume(&self) {
self.stop_state.resume();
}
/// Stops the test miner without stopping the p2p layer
pub fn stop_test_miner(&self, stop: Arc<StopState>) {
stop.stop();
info!("stop_test_miner - stop",);
}
}
| {
return Err(Error::Configuration(
"Seeds must be configured for seeding type List".to_owned(),
));
} | conditional_block |
genesis.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: kira/tokens/genesis.proto
package types
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type GenesisState struct {
Aliases []*TokenAlias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty"`
Rates []*TokenRate `protobuf:"bytes,2,rep,name=rates,proto3" json:"rates,omitempty"`
TokenBlackWhites *TokensWhiteBlack `protobuf:"bytes,3,opt,name=tokenBlackWhites,proto3" json:"tokenBlackWhites,omitempty"`
}
func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) ProtoMessage() {}
func (*GenesisState) Descriptor() ([]byte, []int) {
return fileDescriptor_d3cbd9121e22d5d1, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GenesisState) XXX_Merge(src proto.Message) {
xxx_messageInfo_GenesisState.Merge(m, src)
}
func (m *GenesisState) XXX_Size() int {
return m.Size()
}
func (m *GenesisState) XXX_DiscardUnknown() {
xxx_messageInfo_GenesisState.DiscardUnknown(m)
}
var xxx_messageInfo_GenesisState proto.InternalMessageInfo
func (m *GenesisState) GetAliases() []*TokenAlias {
if m != nil {
return m.Aliases
}
return nil
}
func (m *GenesisState) GetRates() []*TokenRate {
if m != nil {
return m.Rates
}
return nil
}
func (m *GenesisState) GetTokenBlackWhites() *TokensWhiteBlack {
if m != nil {
return m.TokenBlackWhites
}
return nil
}
func init() {
proto.RegisterType((*GenesisState)(nil), "kira.tokens.GenesisState")
}
func init() { proto.RegisterFile("kira/tokens/genesis.proto", fileDescriptor_d3cbd9121e22d5d1) }
var fileDescriptor_d3cbd9121e22d5d1 = []byte{
// 254 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xce, 0x2c, 0x4a,
0xd4, 0x2f, 0xc9, 0xcf, 0x4e, 0xcd, 0x2b, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x06, 0x49, 0xe9, 0x41, 0xa4, 0xa4, 0xc4, 0x91, 0xd5,
0x25, 0xe6, 0x64, 0x26, 0x42, 0x55, 0x49, 0x89, 0x21, 0x4b, 0x14, 0x25, 0x96, 0xa4, 0x42, 0xc5,
0x25, 0x90, 0xc5, 0xd3, 0x8a, 0x52, 0x53, 0xab, 0xa0, 0x32, 0x4a, 0xbb, 0x18, 0xb9, 0x78, 0xdc,
0x21, 0x36, 0x05, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x19, 0x72, 0xb1, 0x83, 0x4d, 0x4c, 0x2d, 0x96,
0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x12, 0xd7, 0x43, 0xb2, 0x5a, 0x2f, 0x04, 0x44, 0x39, 0x82,
0x14, 0x04, 0xc1, 0xd4, 0x09, 0xe9, 0x70, 0xb1, 0x82, 0xec, 0x2a, 0x96, 0x60, 0x02, 0x6b, 0x10,
0xc3, 0xd4, 0x10, 0x94, 0x58, 0x92, 0x1a, 0x04, 0x51, 0x24, 0xe4, 0xc9, 0x25, 0x00, 0x96, 0x72,
0xca, 0x49, 0x4c, 0xce, 0x0e, 0xcf, 0xc8, 0x04, 0x69, 0x64, 0x56, 0x60, 0xd4, 0xe0, 0x36, 0x92,
0xc5, 0xd4, 0x58, 0x0c, 0x56, 0x00, 0x56, 0x1a, 0x84, 0xa1, 0xcd, 0xc9, 0xe9, 0xc4, 0x23, 0x39,
0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63,
0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x34, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92,
0xf3, 0x73, 0xf5, 0xbd, 0x33, 0x8b, 0x12, 0x9d, 0xf3, 0x8b, 0x52, 0xf5, 0x8b, 0x53, 0xb3, 0x13,
0x33, 0xf5, 0x2b, 0x60, 0xe1, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x9c, 0xc4, 0x06, 0x0e, 0x07, 0x63,
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xad, 0xea, 0x38, 0x58, 0x7c, 0x01, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.TokenBlackWhites != nil {
{
size, err := m.TokenBlackWhites.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if len(m.Rates) > 0 {
for iNdEx := len(m.Rates) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Rates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Aliases) > 0 {
for iNdEx := len(m.Aliases) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Aliases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
offset -= sovGenesis(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GenesisState) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Aliases) > 0 {
for _, e := range m.Aliases {
l = e.Size()
n += 1 + l + sovGenesis(uint64(l))
}
}
if len(m.Rates) > 0 {
for _, e := range m.Rates {
l = e.Size()
n += 1 + l + sovGenesis(uint64(l))
}
}
if m.TokenBlackWhites != nil {
l = m.TokenBlackWhites.Size()
n += 1 + l + sovGenesis(uint64(l))
}
return n
}
func sovGenesis(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenesis(x uint64) (n int) {
return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GenesisState) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Aliases = append(m.Aliases, &TokenAlias{})
if err := m.Aliases[len(m.Aliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Rates", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Rates = append(m.Rates, &TokenRate{})
if err := m.Rates[len(m.Rates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TokenBlackWhites", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l |
if m.TokenBlackWhites == nil {
m.TokenBlackWhites = &TokensWhiteBlack{}
}
if err := m.TokenBlackWhites.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenesis(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenesis
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenesis(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenesis
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenesis
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenesis
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
)
| {
return io.ErrUnexpectedEOF
} | conditional_block |
genesis.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: kira/tokens/genesis.proto
package types
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package | Rates []*TokenRate `protobuf:"bytes,2,rep,name=rates,proto3" json:"rates,omitempty"`
TokenBlackWhites *TokensWhiteBlack `protobuf:"bytes,3,opt,name=tokenBlackWhites,proto3" json:"tokenBlackWhites,omitempty"`
}
func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) ProtoMessage() {}
func (*GenesisState) Descriptor() ([]byte, []int) {
return fileDescriptor_d3cbd9121e22d5d1, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GenesisState) XXX_Merge(src proto.Message) {
xxx_messageInfo_GenesisState.Merge(m, src)
}
func (m *GenesisState) XXX_Size() int {
return m.Size()
}
func (m *GenesisState) XXX_DiscardUnknown() {
xxx_messageInfo_GenesisState.DiscardUnknown(m)
}
var xxx_messageInfo_GenesisState proto.InternalMessageInfo
func (m *GenesisState) GetAliases() []*TokenAlias {
if m != nil {
return m.Aliases
}
return nil
}
func (m *GenesisState) GetRates() []*TokenRate {
if m != nil {
return m.Rates
}
return nil
}
func (m *GenesisState) GetTokenBlackWhites() *TokensWhiteBlack {
if m != nil {
return m.TokenBlackWhites
}
return nil
}
func init() {
proto.RegisterType((*GenesisState)(nil), "kira.tokens.GenesisState")
}
func init() { proto.RegisterFile("kira/tokens/genesis.proto", fileDescriptor_d3cbd9121e22d5d1) }
var fileDescriptor_d3cbd9121e22d5d1 = []byte{
// 254 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xce, 0x2c, 0x4a,
0xd4, 0x2f, 0xc9, 0xcf, 0x4e, 0xcd, 0x2b, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x06, 0x49, 0xe9, 0x41, 0xa4, 0xa4, 0xc4, 0x91, 0xd5,
0x25, 0xe6, 0x64, 0x26, 0x42, 0x55, 0x49, 0x89, 0x21, 0x4b, 0x14, 0x25, 0x96, 0xa4, 0x42, 0xc5,
0x25, 0x90, 0xc5, 0xd3, 0x8a, 0x52, 0x53, 0xab, 0xa0, 0x32, 0x4a, 0xbb, 0x18, 0xb9, 0x78, 0xdc,
0x21, 0x36, 0x05, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x19, 0x72, 0xb1, 0x83, 0x4d, 0x4c, 0x2d, 0x96,
0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x12, 0xd7, 0x43, 0xb2, 0x5a, 0x2f, 0x04, 0x44, 0x39, 0x82,
0x14, 0x04, 0xc1, 0xd4, 0x09, 0xe9, 0x70, 0xb1, 0x82, 0xec, 0x2a, 0x96, 0x60, 0x02, 0x6b, 0x10,
0xc3, 0xd4, 0x10, 0x94, 0x58, 0x92, 0x1a, 0x04, 0x51, 0x24, 0xe4, 0xc9, 0x25, 0x00, 0x96, 0x72,
0xca, 0x49, 0x4c, 0xce, 0x0e, 0xcf, 0xc8, 0x04, 0x69, 0x64, 0x56, 0x60, 0xd4, 0xe0, 0x36, 0x92,
0xc5, 0xd4, 0x58, 0x0c, 0x56, 0x00, 0x56, 0x1a, 0x84, 0xa1, 0xcd, 0xc9, 0xe9, 0xc4, 0x23, 0x39,
0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63,
0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x34, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92,
0xf3, 0x73, 0xf5, 0xbd, 0x33, 0x8b, 0x12, 0x9d, 0xf3, 0x8b, 0x52, 0xf5, 0x8b, 0x53, 0xb3, 0x13,
0x33, 0xf5, 0x2b, 0x60, 0xe1, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x9c, 0xc4, 0x06, 0x0e, 0x07, 0x63,
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xad, 0xea, 0x38, 0x58, 0x7c, 0x01, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.TokenBlackWhites != nil {
{
size, err := m.TokenBlackWhites.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if len(m.Rates) > 0 {
for iNdEx := len(m.Rates) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Rates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Aliases) > 0 {
for iNdEx := len(m.Aliases) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Aliases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
offset -= sovGenesis(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GenesisState) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Aliases) > 0 {
for _, e := range m.Aliases {
l = e.Size()
n += 1 + l + sovGenesis(uint64(l))
}
}
if len(m.Rates) > 0 {
for _, e := range m.Rates {
l = e.Size()
n += 1 + l + sovGenesis(uint64(l))
}
}
if m.TokenBlackWhites != nil {
l = m.TokenBlackWhites.Size()
n += 1 + l + sovGenesis(uint64(l))
}
return n
}
func sovGenesis(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenesis(x uint64) (n int) {
return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GenesisState) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Aliases = append(m.Aliases, &TokenAlias{})
if err := m.Aliases[len(m.Aliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Rates", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Rates = append(m.Rates, &TokenRate{})
if err := m.Rates[len(m.Rates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TokenBlackWhites", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TokenBlackWhites == nil {
m.TokenBlackWhites = &TokensWhiteBlack{}
}
if err := m.TokenBlackWhites.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenesis(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenesis
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenesis(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenesis
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenesis
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenesis
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
) |
type GenesisState struct {
Aliases []*TokenAlias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty"` | random_line_split |
genesis.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: kira/tokens/genesis.proto
package types
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type GenesisState struct {
Aliases []*TokenAlias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty"`
Rates []*TokenRate `protobuf:"bytes,2,rep,name=rates,proto3" json:"rates,omitempty"`
TokenBlackWhites *TokensWhiteBlack `protobuf:"bytes,3,opt,name=tokenBlackWhites,proto3" json:"tokenBlackWhites,omitempty"`
}
func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) | () {}
func (*GenesisState) Descriptor() ([]byte, []int) {
return fileDescriptor_d3cbd9121e22d5d1, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GenesisState) XXX_Merge(src proto.Message) {
xxx_messageInfo_GenesisState.Merge(m, src)
}
func (m *GenesisState) XXX_Size() int {
return m.Size()
}
func (m *GenesisState) XXX_DiscardUnknown() {
xxx_messageInfo_GenesisState.DiscardUnknown(m)
}
var xxx_messageInfo_GenesisState proto.InternalMessageInfo
func (m *GenesisState) GetAliases() []*TokenAlias {
if m != nil {
return m.Aliases
}
return nil
}
func (m *GenesisState) GetRates() []*TokenRate {
if m != nil {
return m.Rates
}
return nil
}
func (m *GenesisState) GetTokenBlackWhites() *TokensWhiteBlack {
if m != nil {
return m.TokenBlackWhites
}
return nil
}
func init() {
proto.RegisterType((*GenesisState)(nil), "kira.tokens.GenesisState")
}
func init() { proto.RegisterFile("kira/tokens/genesis.proto", fileDescriptor_d3cbd9121e22d5d1) }
var fileDescriptor_d3cbd9121e22d5d1 = []byte{
// 254 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xce, 0x2c, 0x4a,
0xd4, 0x2f, 0xc9, 0xcf, 0x4e, 0xcd, 0x2b, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x06, 0x49, 0xe9, 0x41, 0xa4, 0xa4, 0xc4, 0x91, 0xd5,
0x25, 0xe6, 0x64, 0x26, 0x42, 0x55, 0x49, 0x89, 0x21, 0x4b, 0x14, 0x25, 0x96, 0xa4, 0x42, 0xc5,
0x25, 0x90, 0xc5, 0xd3, 0x8a, 0x52, 0x53, 0xab, 0xa0, 0x32, 0x4a, 0xbb, 0x18, 0xb9, 0x78, 0xdc,
0x21, 0x36, 0x05, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x19, 0x72, 0xb1, 0x83, 0x4d, 0x4c, 0x2d, 0x96,
0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x12, 0xd7, 0x43, 0xb2, 0x5a, 0x2f, 0x04, 0x44, 0x39, 0x82,
0x14, 0x04, 0xc1, 0xd4, 0x09, 0xe9, 0x70, 0xb1, 0x82, 0xec, 0x2a, 0x96, 0x60, 0x02, 0x6b, 0x10,
0xc3, 0xd4, 0x10, 0x94, 0x58, 0x92, 0x1a, 0x04, 0x51, 0x24, 0xe4, 0xc9, 0x25, 0x00, 0x96, 0x72,
0xca, 0x49, 0x4c, 0xce, 0x0e, 0xcf, 0xc8, 0x04, 0x69, 0x64, 0x56, 0x60, 0xd4, 0xe0, 0x36, 0x92,
0xc5, 0xd4, 0x58, 0x0c, 0x56, 0x00, 0x56, 0x1a, 0x84, 0xa1, 0xcd, 0xc9, 0xe9, 0xc4, 0x23, 0x39,
0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63,
0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x34, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92,
0xf3, 0x73, 0xf5, 0xbd, 0x33, 0x8b, 0x12, 0x9d, 0xf3, 0x8b, 0x52, 0xf5, 0x8b, 0x53, 0xb3, 0x13,
0x33, 0xf5, 0x2b, 0x60, 0xe1, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x9c, 0xc4, 0x06, 0x0e, 0x07, 0x63,
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xad, 0xea, 0x38, 0x58, 0x7c, 0x01, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.TokenBlackWhites != nil {
{
size, err := m.TokenBlackWhites.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if len(m.Rates) > 0 {
for iNdEx := len(m.Rates) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Rates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Aliases) > 0 {
for iNdEx := len(m.Aliases) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Aliases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
offset -= sovGenesis(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GenesisState) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Aliases) > 0 {
for _, e := range m.Aliases {
l = e.Size()
n += 1 + l + sovGenesis(uint64(l))
}
}
if len(m.Rates) > 0 {
for _, e := range m.Rates {
l = e.Size()
n += 1 + l + sovGenesis(uint64(l))
}
}
if m.TokenBlackWhites != nil {
l = m.TokenBlackWhites.Size()
n += 1 + l + sovGenesis(uint64(l))
}
return n
}
func sovGenesis(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenesis(x uint64) (n int) {
return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GenesisState) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Aliases = append(m.Aliases, &TokenAlias{})
if err := m.Aliases[len(m.Aliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Rates", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Rates = append(m.Rates, &TokenRate{})
if err := m.Rates[len(m.Rates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TokenBlackWhites", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TokenBlackWhites == nil {
m.TokenBlackWhites = &TokensWhiteBlack{}
}
if err := m.TokenBlackWhites.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenesis(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenesis
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenesis(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenesis
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenesis
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenesis
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
)
| ProtoMessage | identifier_name |
genesis.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: kira/tokens/genesis.proto
package types
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type GenesisState struct {
Aliases []*TokenAlias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty"`
Rates []*TokenRate `protobuf:"bytes,2,rep,name=rates,proto3" json:"rates,omitempty"`
TokenBlackWhites *TokensWhiteBlack `protobuf:"bytes,3,opt,name=tokenBlackWhites,proto3" json:"tokenBlackWhites,omitempty"`
}
func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) ProtoMessage() {}
func (*GenesisState) Descriptor() ([]byte, []int) {
return fileDescriptor_d3cbd9121e22d5d1, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GenesisState) XXX_Merge(src proto.Message) {
xxx_messageInfo_GenesisState.Merge(m, src)
}
func (m *GenesisState) XXX_Size() int {
return m.Size()
}
func (m *GenesisState) XXX_DiscardUnknown() |
var xxx_messageInfo_GenesisState proto.InternalMessageInfo
func (m *GenesisState) GetAliases() []*TokenAlias {
if m != nil {
return m.Aliases
}
return nil
}
func (m *GenesisState) GetRates() []*TokenRate {
if m != nil {
return m.Rates
}
return nil
}
func (m *GenesisState) GetTokenBlackWhites() *TokensWhiteBlack {
if m != nil {
return m.TokenBlackWhites
}
return nil
}
func init() {
proto.RegisterType((*GenesisState)(nil), "kira.tokens.GenesisState")
}
func init() { proto.RegisterFile("kira/tokens/genesis.proto", fileDescriptor_d3cbd9121e22d5d1) }
var fileDescriptor_d3cbd9121e22d5d1 = []byte{
// 254 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xce, 0x2c, 0x4a,
0xd4, 0x2f, 0xc9, 0xcf, 0x4e, 0xcd, 0x2b, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x06, 0x49, 0xe9, 0x41, 0xa4, 0xa4, 0xc4, 0x91, 0xd5,
0x25, 0xe6, 0x64, 0x26, 0x42, 0x55, 0x49, 0x89, 0x21, 0x4b, 0x14, 0x25, 0x96, 0xa4, 0x42, 0xc5,
0x25, 0x90, 0xc5, 0xd3, 0x8a, 0x52, 0x53, 0xab, 0xa0, 0x32, 0x4a, 0xbb, 0x18, 0xb9, 0x78, 0xdc,
0x21, 0x36, 0x05, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x19, 0x72, 0xb1, 0x83, 0x4d, 0x4c, 0x2d, 0x96,
0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x12, 0xd7, 0x43, 0xb2, 0x5a, 0x2f, 0x04, 0x44, 0x39, 0x82,
0x14, 0x04, 0xc1, 0xd4, 0x09, 0xe9, 0x70, 0xb1, 0x82, 0xec, 0x2a, 0x96, 0x60, 0x02, 0x6b, 0x10,
0xc3, 0xd4, 0x10, 0x94, 0x58, 0x92, 0x1a, 0x04, 0x51, 0x24, 0xe4, 0xc9, 0x25, 0x00, 0x96, 0x72,
0xca, 0x49, 0x4c, 0xce, 0x0e, 0xcf, 0xc8, 0x04, 0x69, 0x64, 0x56, 0x60, 0xd4, 0xe0, 0x36, 0x92,
0xc5, 0xd4, 0x58, 0x0c, 0x56, 0x00, 0x56, 0x1a, 0x84, 0xa1, 0xcd, 0xc9, 0xe9, 0xc4, 0x23, 0x39,
0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63,
0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x34, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92,
0xf3, 0x73, 0xf5, 0xbd, 0x33, 0x8b, 0x12, 0x9d, 0xf3, 0x8b, 0x52, 0xf5, 0x8b, 0x53, 0xb3, 0x13,
0x33, 0xf5, 0x2b, 0x60, 0xe1, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x9c, 0xc4, 0x06, 0x0e, 0x07, 0x63,
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xad, 0xea, 0x38, 0x58, 0x7c, 0x01, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.TokenBlackWhites != nil {
{
size, err := m.TokenBlackWhites.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if len(m.Rates) > 0 {
for iNdEx := len(m.Rates) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Rates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Aliases) > 0 {
for iNdEx := len(m.Aliases) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Aliases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenesis(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
offset -= sovGenesis(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GenesisState) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Aliases) > 0 {
for _, e := range m.Aliases {
l = e.Size()
n += 1 + l + sovGenesis(uint64(l))
}
}
if len(m.Rates) > 0 {
for _, e := range m.Rates {
l = e.Size()
n += 1 + l + sovGenesis(uint64(l))
}
}
if m.TokenBlackWhites != nil {
l = m.TokenBlackWhites.Size()
n += 1 + l + sovGenesis(uint64(l))
}
return n
}
func sovGenesis(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenesis(x uint64) (n int) {
return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GenesisState) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Aliases = append(m.Aliases, &TokenAlias{})
if err := m.Aliases[len(m.Aliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Rates", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Rates = append(m.Rates, &TokenRate{})
if err := m.Rates[len(m.Rates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TokenBlackWhites", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenesis
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenesis
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenesis
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TokenBlackWhites == nil {
m.TokenBlackWhites = &TokensWhiteBlack{}
}
if err := m.TokenBlackWhites.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenesis(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenesis
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenesis(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenesis
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenesis
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenesis
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenesis
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
)
| {
xxx_messageInfo_GenesisState.DiscardUnknown(m)
} | identifier_body |
util.js | const dateFormat = require('dateformat');
const randomstring = require("randomstring");
const queryString = require('query-string');
const $ = require('jquery');
const md5 = require('md5');
const sprintf = require('sprintf-js').sprintf
export const UUID = {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx (8-4-4-4-12)
get: function () {
var d = new Date()
var pcs = []
pcs.push(dateFormat(d, 'yyyymmdd'))
pcs.push(dateFormat(d, 'hhMM'))
pcs.push(dateFormat(d, 'ssL'))
pcs.push(randomstring.generate({
length: 12,
charset: 'hex'
}))
return pcs.join('-')
}
}
export const DatetimeFormat = {
date: 'yyyy-mm-dd',
time: 'HH:MM:ss',
datetime: 'yyyy-mm-dd HH:MM:ss',
now: function () {
return Date()
},
format: function (date, format) {
return dateFormat(date, format)
}
}
export const HtmlUtil = {
specialchars: function (str) {
var s = [];
if (!str) {
return '';
}
if (str.length == 0) {
return '';
}
for (var i = 0; i < str.length; i++) {
switch (str.substr(i, 1)) {
case "<":
s.push("<");
break;
case ">":
s.push(">");
break;
case "&":
s.push("&");
break;
case " ":
s.push(" ");
break;
case "\"":
s.push(""");
break;
default:
s.push(str.substr(i, 1));
break;
}
}
return s.join('');
}
}
export const FormatUtil = {
telephone(number) {
if (!number) {
return null
}
// console.log('before',number);
[/\+86/g, /\+/g, / /g, /\(/g, /\)/g, /-/g, /(/g, /)/g, / /g, /"/g, /;/g, /\t/g].forEach(o => {
number = number.replace(o, '')
})
// console.log('after',number)
if (/^[0-9]{3,20}$/.test(number)) {
return number
}
return null
}
}
export const StrUtil = {
randomString(len) {
len = len || 32;
var $chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789';
var maxPos = $chars.length;
var pwd = '';
for (let i = 0; i < len; i++) {
pwd += $chars.charAt(Math.floor(Math.random() * maxPos));
}
return pwd;
},
matchWildcard(text, pattern) {
var escapeRegex = (str) => str.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, "\\$1")
pattern = pattern.split("*").map(escapeRegex).join(".*")
pattern = "^" + pattern + "$"
var regex = new RegExp(pattern)
return regex.test(text)
},
keywordsMatchWildcard(text, pattern) {
var escapeRegex = (str) => str.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, "\\$1")
pattern = pattern.split("*").map(escapeRegex).join(".*")
var regex = new RegExp(pattern)
return regex.test(text)
},
sprintf() {
const args = Array.from(arguments)
return sprintf.call(null, ...args)
}
}
function str2asc(strstr) {
return ("0" + strstr.charCodeAt(0).toString(16)).slice(-2);
}
function asc2str(ascasc) {
return String.fromCharCode(ascasc);
}
export const ArrayUtil = {
unique(arr) {
if (!arr || !arr.length) {
return []
}
let map = {}
arr.forEach(o => {
map[o] = true
})
return Object.keys(map)
}
}
export const UrlUtil = {
domainUrl(url) {
url = url || ''
const base = window.location.protocol + '//' + window.location.host
if (url) {
return base + '/' + url
}
return base
},
urlencode(str) {
let ret = "";
const strSpecial = "!\"#$%&'()*+,/:;<=>?[]^`{|}~%";
let tt = "";
for (let i = 0; i < str.length; i++) {
let chr = str.charAt(i);
let c = str2asc(chr);
tt += chr + ":" + c + "n";
if (parseInt("0x" + c) > 0x7f) {
ret += "%" + c.slice(0, 2) + "%" + c.slice(-2);
} else {
if (chr === " ")
ret += "+";
else if (strSpecial.indexOf(chr) !== -1)
ret += "%" + c.toString(16);
else
ret += chr;
}
}
return ret;
},
urldecode(str) {
let ret = "";
str = str + ''
for (let i = 0; i < str.length; i++) {
let chr = str.charAt(i);
if (chr === "+") {
ret += " ";
} else if (chr === "%") {
let asc = str.substring(i + 1, i + 3);
if (parseInt("0x" + asc) > 0x7f) {
ret += asc2str(parseInt("0x" + asc + str.substring(i + 4, i + 6)));
i += 5;
} else {
| } else {
ret += chr;
}
}
return ret;
},
getQueries(query = undefined) {
return UrlUtil.parseQuery(query)
},
getQuery(key, defaultValue = null, query = undefined) {
const param = UrlUtil.parseQuery(query)
if (key in param) {
return param[key]
}
return defaultValue
},
parseQuery(str) {
str = str || window.location.search
return queryString.parse(str)
},
buildQuery(param) {
return queryString.stringify(param)
}
}
export const JsonUtil = {
extend() {
return $.extend(...arguments)
},
clone(obj) {
return JSON.parse(JSON.stringify(obj))
},
equal(o1, o2) {
return JSON.stringify(o1) === JSON.stringify(o2)
},
notEqual(o1, o2) {
return !JsonUtil.equal(o1, o2)
},
clearObject(obj) {
let type
for (var i in obj) {
type = typeof obj[i]
switch (type) {
case 'string':
obj[i] = ''
break;
case 'number':
obj[i] = 0
break;
}
}
}
}
export const BeanUtil = {
/**
* 使用 valuePool 更新 bean ,valuePool要包含全部字段
*
* @param bean
* @param valuePool
* @deprecated 请使用update
*/
assign(bean, valuePool) {
if (!bean || !valuePool) {
return
}
Object.keys(bean).map(o => {
bean[o] = valuePool[o]
})
},
/**
* 使用 beanNewValue 更新 bean,beanNewValue可以是部分字段
* @param bean
* @param beanNewValue
*/
update(bean, beanNewValue) {
if (!bean || !beanNewValue) {
return
}
Object.keys(beanNewValue).map(o => {
bean[o] = beanNewValue[o]
})
},
/**
* 判断两个Bean是否相等,注意键值的顺序也要一样
* @param o1
* @param o2
* @returns {boolean}
*/
equal(o1, o2) {
return JSON.stringify(o1) === JSON.stringify(o2)
},
notEqual(o1, o2) {
return !BeanUtil.equal(o1, o2)
},
clone(obj) {
return JSON.parse(JSON.stringify(obj))
},
}
export const UiUtil = {
treeToKeyBoolean(tree, values, valueKey, childrenKey) {
valueKey = valueKey || 'name'
childrenKey = childrenKey || 'children'
let list = []
const walk = (node) => {
node.map(o => {
list.push(o[valueKey])
if (o[childrenKey]) {
walk(o[childrenKey])
}
})
}
walk(tree)
return UiUtil.listToKeyBoolean(list, values)
},
listToKeyBoolean(list, values) {
let keyBooleanMap = {}
values = values || []
list.map(o => keyBooleanMap[o] = values.indexOf(o) >= 0)
return keyBooleanMap
},
// 将 {} 中将 key 为 true 的值取出返回 list
keyBooleanToList(map) {
let list = []
Object.keys(map).map(k => {
if (map[k]) {
list.push(k)
}
})
return list
}
}
export const DomUtil = {
// 动态设置样式
setStyleContent(id, css) {
let style = document.getElementById(id)
if (!style) {
style = document.createElement('style')
style.type = 'text/css'
style.id = id
document.getElementsByTagName('head')[0].appendChild(style)
style = document.getElementById(id)
}
style.innerHTML = css
},
// 动态加载JS
loadScript(url, cb) {
let id = 's_' + md5(url)
let script = document.getElementById(id)
if (script) {
cb && cb({isNew: false})
return
}
script = document.createElement('script')
script.id = id
script.src = url
script.onload = () => {
cb && cb({isNew: true})
}
document.getElementsByTagName('head')[0].appendChild(script)
},
loadScripts(urls, cb) {
let loads = {};
for (let url of urls) {
loads[url] = null
DomUtil.loadScript(url, data => {
loads[url] = data
})
}
let watch = () => {
for (let o in loads) {
if (!loads[o]) {
setTimeout(() => {
watch()
}, 100)
return
}
}
cb && cb()
}
setTimeout(() => {
watch()
}, 100)
}
,
// 动态加载CSS
loadStylesheet(url, cb) {
let id = 's_' + md5(url)
let link = document.getElementById(id)
if (link) {
cb && cb({isNew: false})
return
}
link = document.createElement('link')
link.id = id
link.rel = 'stylesheet'
link.type = 'text/css'
link.href = url
link.onload = () => {
cb && cb({isNew: true})
}
document.getElementsByTagName('head')[0].appendChild(link)
}
}
const parser = require('ua-parser-js');
export const AgentUtil = {
isMobile(ua) {
ua = ua || window.navigator.userAgent
const device = parser.setUA(ua).getDevice()
return device.type === 'mobile'
},
isIOS() {
let u = navigator.userAgent;
return !!u.match(/\(i[^;]+;( U;)? CPU.+Mac OS X/)
},
isWX() {
let ua = window.navigator.userAgent.toLowerCase();
return ua.match(/MicroMessenger/i) === 'micromessenger'
}
}
| ret += asc2str(parseInt("0x" + asc));
i += 2;
}
| conditional_block |
util.js | const dateFormat = require('dateformat');
const randomstring = require("randomstring");
const queryString = require('query-string');
const $ = require('jquery');
const md5 = require('md5');
const sprintf = require('sprintf-js').sprintf
export const UUID = {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx (8-4-4-4-12)
get: function () {
var d = new Date()
var pcs = []
pcs.push(dateFormat(d, 'yyyymmdd'))
pcs.push(dateFormat(d, 'hhMM'))
pcs.push(dateFormat(d, 'ssL'))
pcs.push(randomstring.generate({
length: 12,
charset: 'hex'
}))
return pcs.join('-')
}
}
export const DatetimeFormat = {
date: 'yyyy-mm-dd',
time: 'HH:MM:ss',
datetime: 'yyyy-mm-dd HH:MM:ss',
now: function () {
return Date()
},
format: function (date, format) {
return dateFormat(date, format)
}
}
export const HtmlUtil = {
specialchars: function (str) {
var s = [];
if (!str) {
return '';
}
if (str.length == 0) {
return '';
}
for (var i = 0; i < str.length; i++) {
switch (str.substr(i, 1)) {
case "<":
s.push("<");
break;
case ">":
s.push(">");
break;
case "&":
s.push("&");
break;
case " ":
s.push(" ");
break;
case "\"":
s.push(""");
break;
default:
s.push(str.substr(i, 1));
break;
}
}
return s.join('');
}
}
| export const FormatUtil = {
telephone(number) {
if (!number) {
return null
}
// console.log('before',number);
[/\+86/g, /\+/g, / /g, /\(/g, /\)/g, /-/g, /(/g, /)/g, / /g, /"/g, /;/g, /\t/g].forEach(o => {
number = number.replace(o, '')
})
// console.log('after',number)
if (/^[0-9]{3,20}$/.test(number)) {
return number
}
return null
}
}
export const StrUtil = {
randomString(len) {
len = len || 32;
var $chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789';
var maxPos = $chars.length;
var pwd = '';
for (let i = 0; i < len; i++) {
pwd += $chars.charAt(Math.floor(Math.random() * maxPos));
}
return pwd;
},
matchWildcard(text, pattern) {
var escapeRegex = (str) => str.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, "\\$1")
pattern = pattern.split("*").map(escapeRegex).join(".*")
pattern = "^" + pattern + "$"
var regex = new RegExp(pattern)
return regex.test(text)
},
keywordsMatchWildcard(text, pattern) {
var escapeRegex = (str) => str.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, "\\$1")
pattern = pattern.split("*").map(escapeRegex).join(".*")
var regex = new RegExp(pattern)
return regex.test(text)
},
sprintf() {
const args = Array.from(arguments)
return sprintf.call(null, ...args)
}
}
function str2asc(strstr) {
return ("0" + strstr.charCodeAt(0).toString(16)).slice(-2);
}
function asc2str(ascasc) {
return String.fromCharCode(ascasc);
}
export const ArrayUtil = {
unique(arr) {
if (!arr || !arr.length) {
return []
}
let map = {}
arr.forEach(o => {
map[o] = true
})
return Object.keys(map)
}
}
export const UrlUtil = {
domainUrl(url) {
url = url || ''
const base = window.location.protocol + '//' + window.location.host
if (url) {
return base + '/' + url
}
return base
},
urlencode(str) {
let ret = "";
const strSpecial = "!\"#$%&'()*+,/:;<=>?[]^`{|}~%";
let tt = "";
for (let i = 0; i < str.length; i++) {
let chr = str.charAt(i);
let c = str2asc(chr);
tt += chr + ":" + c + "n";
if (parseInt("0x" + c) > 0x7f) {
ret += "%" + c.slice(0, 2) + "%" + c.slice(-2);
} else {
if (chr === " ")
ret += "+";
else if (strSpecial.indexOf(chr) !== -1)
ret += "%" + c.toString(16);
else
ret += chr;
}
}
return ret;
},
urldecode(str) {
let ret = "";
str = str + ''
for (let i = 0; i < str.length; i++) {
let chr = str.charAt(i);
if (chr === "+") {
ret += " ";
} else if (chr === "%") {
let asc = str.substring(i + 1, i + 3);
if (parseInt("0x" + asc) > 0x7f) {
ret += asc2str(parseInt("0x" + asc + str.substring(i + 4, i + 6)));
i += 5;
} else {
ret += asc2str(parseInt("0x" + asc));
i += 2;
}
} else {
ret += chr;
}
}
return ret;
},
getQueries(query = undefined) {
return UrlUtil.parseQuery(query)
},
getQuery(key, defaultValue = null, query = undefined) {
const param = UrlUtil.parseQuery(query)
if (key in param) {
return param[key]
}
return defaultValue
},
parseQuery(str) {
str = str || window.location.search
return queryString.parse(str)
},
buildQuery(param) {
return queryString.stringify(param)
}
}
export const JsonUtil = {
extend() {
return $.extend(...arguments)
},
clone(obj) {
return JSON.parse(JSON.stringify(obj))
},
equal(o1, o2) {
return JSON.stringify(o1) === JSON.stringify(o2)
},
notEqual(o1, o2) {
return !JsonUtil.equal(o1, o2)
},
clearObject(obj) {
let type
for (var i in obj) {
type = typeof obj[i]
switch (type) {
case 'string':
obj[i] = ''
break;
case 'number':
obj[i] = 0
break;
}
}
}
}
export const BeanUtil = {
/**
* 使用 valuePool 更新 bean ,valuePool要包含全部字段
*
* @param bean
* @param valuePool
* @deprecated 请使用update
*/
assign(bean, valuePool) {
if (!bean || !valuePool) {
return
}
Object.keys(bean).map(o => {
bean[o] = valuePool[o]
})
},
/**
* 使用 beanNewValue 更新 bean,beanNewValue可以是部分字段
* @param bean
* @param beanNewValue
*/
update(bean, beanNewValue) {
if (!bean || !beanNewValue) {
return
}
Object.keys(beanNewValue).map(o => {
bean[o] = beanNewValue[o]
})
},
/**
* 判断两个Bean是否相等,注意键值的顺序也要一样
* @param o1
* @param o2
* @returns {boolean}
*/
equal(o1, o2) {
return JSON.stringify(o1) === JSON.stringify(o2)
},
notEqual(o1, o2) {
return !BeanUtil.equal(o1, o2)
},
clone(obj) {
return JSON.parse(JSON.stringify(obj))
},
}
export const UiUtil = {
treeToKeyBoolean(tree, values, valueKey, childrenKey) {
valueKey = valueKey || 'name'
childrenKey = childrenKey || 'children'
let list = []
const walk = (node) => {
node.map(o => {
list.push(o[valueKey])
if (o[childrenKey]) {
walk(o[childrenKey])
}
})
}
walk(tree)
return UiUtil.listToKeyBoolean(list, values)
},
listToKeyBoolean(list, values) {
let keyBooleanMap = {}
values = values || []
list.map(o => keyBooleanMap[o] = values.indexOf(o) >= 0)
return keyBooleanMap
},
// 将 {} 中将 key 为 true 的值取出返回 list
keyBooleanToList(map) {
let list = []
Object.keys(map).map(k => {
if (map[k]) {
list.push(k)
}
})
return list
}
}
export const DomUtil = {
// 动态设置样式
setStyleContent(id, css) {
let style = document.getElementById(id)
if (!style) {
style = document.createElement('style')
style.type = 'text/css'
style.id = id
document.getElementsByTagName('head')[0].appendChild(style)
style = document.getElementById(id)
}
style.innerHTML = css
},
// 动态加载JS
loadScript(url, cb) {
let id = 's_' + md5(url)
let script = document.getElementById(id)
if (script) {
cb && cb({isNew: false})
return
}
script = document.createElement('script')
script.id = id
script.src = url
script.onload = () => {
cb && cb({isNew: true})
}
document.getElementsByTagName('head')[0].appendChild(script)
},
loadScripts(urls, cb) {
let loads = {};
for (let url of urls) {
loads[url] = null
DomUtil.loadScript(url, data => {
loads[url] = data
})
}
let watch = () => {
for (let o in loads) {
if (!loads[o]) {
setTimeout(() => {
watch()
}, 100)
return
}
}
cb && cb()
}
setTimeout(() => {
watch()
}, 100)
}
,
// 动态加载CSS
loadStylesheet(url, cb) {
let id = 's_' + md5(url)
let link = document.getElementById(id)
if (link) {
cb && cb({isNew: false})
return
}
link = document.createElement('link')
link.id = id
link.rel = 'stylesheet'
link.type = 'text/css'
link.href = url
link.onload = () => {
cb && cb({isNew: true})
}
document.getElementsByTagName('head')[0].appendChild(link)
}
}
const parser = require('ua-parser-js');
export const AgentUtil = {
isMobile(ua) {
ua = ua || window.navigator.userAgent
const device = parser.setUA(ua).getDevice()
return device.type === 'mobile'
},
isIOS() {
let u = navigator.userAgent;
return !!u.match(/\(i[^;]+;( U;)? CPU.+Mac OS X/)
},
isWX() {
let ua = window.navigator.userAgent.toLowerCase();
return ua.match(/MicroMessenger/i) === 'micromessenger'
}
} | random_line_split | |
util.js | const dateFormat = require('dateformat');
const randomstring = require("randomstring");
const queryString = require('query-string');
const $ = require('jquery');
const md5 = require('md5');
const sprintf = require('sprintf-js').sprintf
export const UUID = {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx (8-4-4-4-12)
get: function () {
var d = new Date()
var pcs = []
pcs.push(dateFormat(d, 'yyyymmdd'))
pcs.push(dateFormat(d, 'hhMM'))
pcs.push(dateFormat(d, 'ssL'))
pcs.push(randomstring.generate({
length: 12,
charset: 'hex'
}))
return pcs.join('-')
}
}
export const DatetimeFormat = {
date: 'yyyy-mm-dd',
time: 'HH:MM:ss',
datetime: 'yyyy-mm-dd HH:MM:ss',
now: function () {
return Date()
},
format: function (date, format) {
return dateFormat(date, format)
}
}
export const HtmlUtil = {
specialchars: function (str) {
var s = [];
if (!str) {
return '';
}
if (str.length == 0) {
return '';
}
for (var i = 0; i < str.length; i++) {
switch (str.substr(i, 1)) {
case "<":
s.push("<");
break;
case ">":
s.push(">");
break;
case "&":
s.push("&");
break;
case " ":
s.push(" ");
break;
case "\"":
s.push(""");
break;
default:
s.push(str.substr(i, 1));
break;
}
}
return s.join('');
}
}
export const FormatUtil = {
telephone(number) {
if (!number) {
return null
}
// console.log('before',number);
[/\+86/g, /\+/g, / /g, /\(/g, /\)/g, /-/g, /(/g, /)/g, / /g, /"/g, /;/g, /\t/g].forEach(o => {
number = number.replace(o, '')
})
// console.log('after',number)
if (/^[0-9]{3,20}$/.test(number)) {
return number
}
return null
}
}
export const StrUtil = {
randomString(len) {
len = len || 32;
var $chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789';
var maxPos = $chars.length;
var pwd = '';
for (let i = 0; i < len; i++) {
pwd += $chars.charAt(Math.floor(Math.random() * maxPos));
}
return pwd;
},
matchWildcard(text, pattern) {
var escapeRegex = (str) => str.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, "\\$1")
pattern = pattern.split("*").map(escapeRegex).join(".*")
pattern = "^" + pattern + "$"
var regex = new RegExp(pattern)
return regex.test(text)
},
keywordsMatchWildcard(text, pattern) {
var escapeRegex = (str) => str.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, "\\$1")
pattern = pattern.split("*").map(escapeRegex).join(".*")
var regex = new RegExp(pattern)
return regex.test(text)
},
sprintf() {
const args = Array.from(arguments)
return sprintf.call(null, ...args)
}
}
function str2asc(strstr) {
return ("0" + strstr.charCodeAt(0).toString(16)).slice(-2);
}
function asc2str(ascasc) {
return String.fromCharCode(ascasc);
}
export const ArrayUtil = {
unique(arr) {
if (!arr || !arr.length) {
return []
}
let map = {}
arr.forEach(o => {
map[o] = true
})
return Object.keys(map)
}
}
export const UrlUtil = {
domainUrl(url) {
url = url || ''
const base = window.location.protocol + '//' + window.location.host
if (url) {
return base + '/' + url
}
return base
},
urlencode(str) {
let ret = "";
const strSpecial = "!\"#$%&'()*+,/:;<=>?[]^`{|}~%";
let tt = "";
for (let i = 0; i < str.length; i++) {
let chr = str.charAt(i);
let c = str2asc(chr);
tt += chr + ":" + c + "n";
if (parseInt("0x" + c) > 0x7f) {
ret += "%" + c.slice(0, 2) + "%" + c.slice(-2);
} else {
if (chr === " ")
ret += "+";
else if (strSpecial.indexOf(chr) !== -1)
ret += "%" + c.toString(16);
else
ret += chr;
}
}
return ret;
},
urldecode(str) {
let ret = "";
str = str + ''
for (let i = 0; i < str.length; i++) {
let chr = str.charAt(i);
if (chr === "+") {
ret += " ";
} else if (chr === "%") {
let asc = str.substring(i + 1, i + 3);
if (parseInt("0x" + asc) > 0x7f) {
ret += asc2str(parseInt("0x" + asc + str.substring(i + 4, i + 6)));
i += 5;
} else {
ret += asc2str(parseInt("0x" + asc));
i += 2;
}
} else {
ret += chr;
}
}
return ret;
},
getQueries(query = undefined) {
return UrlUtil.parseQuery(query)
},
getQuery(key, defaultValue = null, query = undefined) {
const param = UrlUtil.parseQuery(query)
if (key in param) {
return param[key]
}
return defaultValue
},
parseQuery(str) {
str = str || window.location.search
return queryString.parse(str)
},
buildQuery(param) {
return queryString.stringify(param)
}
}
export const JsonUtil = {
extend() {
return $.extend(...arguments)
},
clone(obj) {
return JSON.parse(JSON.stringify(obj))
},
equal(o1, o2) {
return JSON.stringify(o1) === JSON.stringify(o2)
},
notEqual(o1, o2) {
return !JsonUtil.equal(o1, o2)
},
clearObject(obj) {
let type
for (var i in obj) {
type = typeof obj[i]
switch (type) {
case 'string':
obj[i] = ''
break;
case 'number':
obj[i] = 0
break;
}
}
}
}
export const BeanUtil = {
/**
* 使用 valuePool 更新 bean ,valuePool要包含全部字段
*
* @param bean
* @param valuePool
* @deprecated 请使用update
*/
assign(bean, valuePool) {
if (!bean || !valuePool) { | bean,beanNewValue可以是部分字段
* @param bean
* @param beanNewValue
*/
update(bean, beanNewValue) {
if (!bean || !beanNewValue) {
return
}
Object.keys(beanNewValue).map(o => {
bean[o] = beanNewValue[o]
})
},
/**
* 判断两个Bean是否相等,注意键值的顺序也要一样
* @param o1
* @param o2
* @returns {boolean}
*/
equal(o1, o2) {
return JSON.stringify(o1) === JSON.stringify(o2)
},
notEqual(o1, o2) {
return !BeanUtil.equal(o1, o2)
},
clone(obj) {
return JSON.parse(JSON.stringify(obj))
},
}
export const UiUtil = {
treeToKeyBoolean(tree, values, valueKey, childrenKey) {
valueKey = valueKey || 'name'
childrenKey = childrenKey || 'children'
let list = []
const walk = (node) => {
node.map(o => {
list.push(o[valueKey])
if (o[childrenKey]) {
walk(o[childrenKey])
}
})
}
walk(tree)
return UiUtil.listToKeyBoolean(list, values)
},
listToKeyBoolean(list, values) {
let keyBooleanMap = {}
values = values || []
list.map(o => keyBooleanMap[o] = values.indexOf(o) >= 0)
return keyBooleanMap
},
// 将 {} 中将 key 为 true 的值取出返回 list
keyBooleanToList(map) {
let list = []
Object.keys(map).map(k => {
if (map[k]) {
list.push(k)
}
})
return list
}
}
export const DomUtil = {
// 动态设置样式
setStyleContent(id, css) {
let style = document.getElementById(id)
if (!style) {
style = document.createElement('style')
style.type = 'text/css'
style.id = id
document.getElementsByTagName('head')[0].appendChild(style)
style = document.getElementById(id)
}
style.innerHTML = css
},
// 动态加载JS
loadScript(url, cb) {
let id = 's_' + md5(url)
let script = document.getElementById(id)
if (script) {
cb && cb({isNew: false})
return
}
script = document.createElement('script')
script.id = id
script.src = url
script.onload = () => {
cb && cb({isNew: true})
}
document.getElementsByTagName('head')[0].appendChild(script)
},
loadScripts(urls, cb) {
let loads = {};
for (let url of urls) {
loads[url] = null
DomUtil.loadScript(url, data => {
loads[url] = data
})
}
let watch = () => {
for (let o in loads) {
if (!loads[o]) {
setTimeout(() => {
watch()
}, 100)
return
}
}
cb && cb()
}
setTimeout(() => {
watch()
}, 100)
}
,
// 动态加载CSS
loadStylesheet(url, cb) {
let id = 's_' + md5(url)
let link = document.getElementById(id)
if (link) {
cb && cb({isNew: false})
return
}
link = document.createElement('link')
link.id = id
link.rel = 'stylesheet'
link.type = 'text/css'
link.href = url
link.onload = () => {
cb && cb({isNew: true})
}
document.getElementsByTagName('head')[0].appendChild(link)
}
}
const parser = require('ua-parser-js');
export const AgentUtil = {
isMobile(ua) {
ua = ua || window.navigator.userAgent
const device = parser.setUA(ua).getDevice()
return device.type === 'mobile'
},
isIOS() {
let u = navigator.userAgent;
return !!u.match(/\(i[^;]+;( U;)? CPU.+Mac OS X/)
},
isWX() {
let ua = window.navigator.userAgent.toLowerCase();
return ua.match(/MicroMessenger/i) === 'micromessenger'
}
}
|
return
}
Object.keys(bean).map(o => {
bean[o] = valuePool[o]
})
},
/**
* 使用 beanNewValue 更新 | identifier_body |
util.js | const dateFormat = require('dateformat');
const randomstring = require("randomstring");
const queryString = require('query-string');
const $ = require('jquery');
const md5 = require('md5');
const sprintf = require('sprintf-js').sprintf
export const UUID = {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx (8-4-4-4-12)
get: function () {
var d = new Date()
var pcs = []
pcs.push(dateFormat(d, 'yyyymmdd'))
pcs.push(dateFormat(d, 'hhMM'))
pcs.push(dateFormat(d, 'ssL'))
pcs.push(randomstring.generate({
length: 12,
charset: 'hex'
}))
return pcs.join('-')
}
}
export const DatetimeFormat = {
date: 'yyyy-mm-dd',
time: 'HH:MM:ss',
datetime: 'yyyy-mm-dd HH:MM:ss',
now: function () {
return Date()
},
format: function (date, format) {
return dateFormat(date, format)
}
}
export const HtmlUtil = {
specialchars: function (str) {
var s = [];
if (!str) {
return '';
}
if (str.length == 0) {
return '';
}
for (var i = 0; i < str.length; i++) {
switch (str.substr(i, 1)) {
case "<":
s.push("<");
break;
case ">":
s.push(">");
break;
case "&":
s.push("&");
break;
case " ":
s.push(" ");
break;
case "\"":
s.push(""");
break;
default:
s.push(str.substr(i, 1));
break;
}
}
return s.join('');
}
}
export const FormatUtil = {
telephone(number) {
if (!number) {
return null
}
// console.log('before',number);
[/\+86/g, /\+/g, / /g, /\(/g, /\)/g, /-/g, /(/g, /)/g, / /g, /"/g, /;/g, /\t/g].forEach(o => {
number = number.replace(o, '')
})
// console.log('after',number)
if (/^[0-9]{3,20}$/.test(number)) {
return number
}
return null
}
}
export const StrUtil = {
randomString(len) {
len = len || 32;
var $chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789';
var maxPos = $chars.length;
var pwd = '';
for (let i = 0; i < len; i++) {
pwd += $chars.charAt(Math.floor(Math.random() * maxPos));
}
return pwd;
},
matchWildcard(text, pattern) {
var escapeRegex = (str) => str.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, "\\$1")
pattern = pattern.split("*").map(escapeRegex).join(".*")
pattern = "^" + pattern + "$"
var regex = new RegExp(pattern)
return regex.test(text)
},
keywordsMatchWildcard(text, pattern) {
var escapeRegex = (str) => str.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, "\\$1")
pattern = pattern.split("*").map(escapeRegex).join(".*")
var regex = new RegExp(pattern)
return regex.test(text)
},
sprintf() {
const args = Array.from(arguments)
return sprintf.call(null, ...args)
}
}
function str2asc(strstr) {
return ("0" + strstr.charCodeAt(0).toString(16)).slice(-2);
}
function asc2str(ascasc) {
return String.fromCharCode(ascasc);
}
export const ArrayUtil = {
unique(arr) {
if (!arr || !arr.length) {
return []
}
let map = {}
arr.forEach(o => {
map[o] = true
})
return Object.keys(map)
}
}
export const UrlUtil = {
domainUrl(url) {
url = url || ''
const base = window.location.protocol + '//' + window.location.host
if (url) {
return base + '/' + url
}
return base
},
urlenc | {
let ret = "";
const strSpecial = "!\"#$%&'()*+,/:;<=>?[]^`{|}~%";
let tt = "";
for (let i = 0; i < str.length; i++) {
let chr = str.charAt(i);
let c = str2asc(chr);
tt += chr + ":" + c + "n";
if (parseInt("0x" + c) > 0x7f) {
ret += "%" + c.slice(0, 2) + "%" + c.slice(-2);
} else {
if (chr === " ")
ret += "+";
else if (strSpecial.indexOf(chr) !== -1)
ret += "%" + c.toString(16);
else
ret += chr;
}
}
return ret;
},
urldecode(str) {
let ret = "";
str = str + ''
for (let i = 0; i < str.length; i++) {
let chr = str.charAt(i);
if (chr === "+") {
ret += " ";
} else if (chr === "%") {
let asc = str.substring(i + 1, i + 3);
if (parseInt("0x" + asc) > 0x7f) {
ret += asc2str(parseInt("0x" + asc + str.substring(i + 4, i + 6)));
i += 5;
} else {
ret += asc2str(parseInt("0x" + asc));
i += 2;
}
} else {
ret += chr;
}
}
return ret;
},
getQueries(query = undefined) {
return UrlUtil.parseQuery(query)
},
getQuery(key, defaultValue = null, query = undefined) {
const param = UrlUtil.parseQuery(query)
if (key in param) {
return param[key]
}
return defaultValue
},
parseQuery(str) {
str = str || window.location.search
return queryString.parse(str)
},
buildQuery(param) {
return queryString.stringify(param)
}
}
export const JsonUtil = {
extend() {
return $.extend(...arguments)
},
clone(obj) {
return JSON.parse(JSON.stringify(obj))
},
equal(o1, o2) {
return JSON.stringify(o1) === JSON.stringify(o2)
},
notEqual(o1, o2) {
return !JsonUtil.equal(o1, o2)
},
clearObject(obj) {
let type
for (var i in obj) {
type = typeof obj[i]
switch (type) {
case 'string':
obj[i] = ''
break;
case 'number':
obj[i] = 0
break;
}
}
}
}
export const BeanUtil = {
/**
* 使用 valuePool 更新 bean ,valuePool要包含全部字段
*
* @param bean
* @param valuePool
* @deprecated 请使用update
*/
assign(bean, valuePool) {
if (!bean || !valuePool) {
return
}
Object.keys(bean).map(o => {
bean[o] = valuePool[o]
})
},
/**
* 使用 beanNewValue 更新 bean,beanNewValue可以是部分字段
* @param bean
* @param beanNewValue
*/
update(bean, beanNewValue) {
if (!bean || !beanNewValue) {
return
}
Object.keys(beanNewValue).map(o => {
bean[o] = beanNewValue[o]
})
},
/**
* 判断两个Bean是否相等,注意键值的顺序也要一样
* @param o1
* @param o2
* @returns {boolean}
*/
equal(o1, o2) {
return JSON.stringify(o1) === JSON.stringify(o2)
},
notEqual(o1, o2) {
return !BeanUtil.equal(o1, o2)
},
clone(obj) {
return JSON.parse(JSON.stringify(obj))
},
}
export const UiUtil = {
treeToKeyBoolean(tree, values, valueKey, childrenKey) {
valueKey = valueKey || 'name'
childrenKey = childrenKey || 'children'
let list = []
const walk = (node) => {
node.map(o => {
list.push(o[valueKey])
if (o[childrenKey]) {
walk(o[childrenKey])
}
})
}
walk(tree)
return UiUtil.listToKeyBoolean(list, values)
},
listToKeyBoolean(list, values) {
let keyBooleanMap = {}
values = values || []
list.map(o => keyBooleanMap[o] = values.indexOf(o) >= 0)
return keyBooleanMap
},
// 将 {} 中将 key 为 true 的值取出返回 list
keyBooleanToList(map) {
let list = []
Object.keys(map).map(k => {
if (map[k]) {
list.push(k)
}
})
return list
}
}
export const DomUtil = {
// 动态设置样式
setStyleContent(id, css) {
let style = document.getElementById(id)
if (!style) {
style = document.createElement('style')
style.type = 'text/css'
style.id = id
document.getElementsByTagName('head')[0].appendChild(style)
style = document.getElementById(id)
}
style.innerHTML = css
},
// 动态加载JS
loadScript(url, cb) {
let id = 's_' + md5(url)
let script = document.getElementById(id)
if (script) {
cb && cb({isNew: false})
return
}
script = document.createElement('script')
script.id = id
script.src = url
script.onload = () => {
cb && cb({isNew: true})
}
document.getElementsByTagName('head')[0].appendChild(script)
},
loadScripts(urls, cb) {
let loads = {};
for (let url of urls) {
loads[url] = null
DomUtil.loadScript(url, data => {
loads[url] = data
})
}
let watch = () => {
for (let o in loads) {
if (!loads[o]) {
setTimeout(() => {
watch()
}, 100)
return
}
}
cb && cb()
}
setTimeout(() => {
watch()
}, 100)
}
,
// 动态加载CSS
loadStylesheet(url, cb) {
let id = 's_' + md5(url)
let link = document.getElementById(id)
if (link) {
cb && cb({isNew: false})
return
}
link = document.createElement('link')
link.id = id
link.rel = 'stylesheet'
link.type = 'text/css'
link.href = url
link.onload = () => {
cb && cb({isNew: true})
}
document.getElementsByTagName('head')[0].appendChild(link)
}
}
const parser = require('ua-parser-js');
export const AgentUtil = {
isMobile(ua) {
ua = ua || window.navigator.userAgent
const device = parser.setUA(ua).getDevice()
return device.type === 'mobile'
},
isIOS() {
let u = navigator.userAgent;
return !!u.match(/\(i[^;]+;( U;)? CPU.+Mac OS X/)
},
isWX() {
let ua = window.navigator.userAgent.toLowerCase();
return ua.match(/MicroMessenger/i) === 'micromessenger'
}
}
| ode(str) | identifier_name |
fileopentypeenum.go | package pathfileops
import (
"fmt"
"os"
"reflect"
"strings"
)
// mFileOpenTypeIntToString - This map is used to map enumeration values
// to enumeration names stored as strings for Type FileOpenType.
var mFileOpenTypeIntToString = map[int]string{}
// mFileOpenTypeStringToInt - This map is used to map enumeration names
// stored as strings to enumeration values for Type FileOpenType.
var mFileOpenTypeStringToInt = map[string]int{}
// mFileOpenTypeLwrCaseStringToInt - This map is used to map enumeration names
// stored as lower case strings to enumeration values for Type FileOpenType.
// This map is used for case insensitive look ups.
var mFileOpenTypeLwrCaseStringToInt = map[string]int{}
// FileOpenType - In order to open a file, exactly one of the
// following File Open Codes MUST be specified:
//
// FileOpenType(0).TypeReadOnly()
// FileOpenType(0).TypeWriteOnly()
// FileOpenType(0).TypeReadWrite()
//
// In addition, one of the three previous codes may be or'd with
// zero or more of the following File Open Modes (Type: 'FileOpenMode')
// to better control file open behavior.
//
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeCreate()
// FileOpenMode(0).ModeExclusive()
// FileOpenMode(0).ModeSync()
// FileOpenMode(0).ModeTruncate()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
// This type serves a wrapper for os package constants.
//
// FileOpenType has been adapted to function as an enumeration of valid
// File Open Type values. Since Go does not directly support enumerations,
// the 'FileOpenType' has been configured to function in a manner similar
// to classic enumerations found in other languages like C#. For additional
// information, reference:
//
// Jeffrey Richter Using Reflection to implement enumerated types
// https://www.youtube.com/watch?v=DyXJy_0v0_U
//
type FileOpenType int
// None - No File Open Type specified
func (fOpenType FileOpenType) TypeNone() FileOpenType { return -1 }
// ReadOnly - File opened for 'Read Only' access
func (fOpenType FileOpenType) TypeReadOnly() FileOpenType { return FileOpenType(os.O_RDONLY) }
// WriteOnly - File opened for 'Write Only' access
func (fOpenType FileOpenType) TypeWriteOnly() FileOpenType { return FileOpenType(os.O_WRONLY) }
// ReadWrite - File opened for 'Read and Write' access
func (fOpenType FileOpenType) TypeReadWrite() FileOpenType { return FileOpenType(os.O_RDWR) }
// IsValid - If the value of the current FileOpenType is 'invalid', | // for this type.
//
func (fOpenType FileOpenType) IsValid() error {
fOpenType.checkInitializeMaps(false)
_, ok := mFileOpenTypeIntToString[int(fOpenType)]
if !ok {
ePrefix := "FileOpenType.IsValid() "
return fmt.Errorf(ePrefix+
"Error: Invalid FileOpenType! Current FileOpenType='%v'",
fOpenType)
}
return nil
}
// ParseString - Receives a string and attempts to match it with
// the string value of a supported enumeration. If successful, a
// new instance of FileOpenType is returned set to the value of the
// associated enumeration.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters:
//
// valueString string - A string which will be matched against the
// enumeration string values. If 'valueString'
// is equal to one of the enumeration names, this
// method will proceed to successful completion.
//
// You can prefix the string with "Type" or not.
// Examples: "ReadOnly" or "TypeReadOnly"
// Either string will produce the correct result.
//
// caseSensitive bool - If 'true' the search for enumeration names
// will be case sensitive and will require an
// exact match. Therefore, 'readonly' will NOT
// match the enumeration name, 'ReadOnly'.
//
// If 'false' a case insensitive search is conducted
// for the enumeration name. In this case, 'readonly'
// will match match enumeration name 'ReadOnly'.
//
// ------------------------------------------------------------------------
//
// Return Values:
//
// FileOpenType - Upon successful completion, this method will return a new
// instance of FileOpenType set to the value of the enumeration
// matched by the string search performed on input parameter,
// 'valueString'.
//
// error - If this method completes successfully, the returned error
// Type is set equal to 'nil'. If an error condition is encountered,
// this method will return an error Type which encapsulates an
// appropriate error message.
//
// ------------------------------------------------------------------------
//
// Usage:
//
// t, err := FileOpenType(0).ParseString("ReadOnly", true)
// Or
// t, err := FileOpenType(0).ParseString("TypeReadOnly", true)
// Or
// t, err := FileOpenType(0).ParseString("TypeReadOnly()", true)
// Or
// t, err := FileOpenType(0).ParseString("ReadOnly()", true)
// Or
// t, err := FileOpenType(0).ParseString("readonly", false)
//
// In of the cases shown above, t is now equal to FileOpenType(0).ReadOnly()
//
func (fOpenType FileOpenType) ParseString(
valueString string,
caseSensitive bool) (FileOpenType, error) {
ePrefix := "FileOpenType.ParseString() "
fOpenType.checkInitializeMaps(false)
result := FileOpenType(0)
lenValueStr := len(valueString)
if strings.HasSuffix(valueString, "()") {
valueString = valueString[0 : lenValueStr-2]
lenValueStr -= 2
}
if lenValueStr < 3 {
return result,
fmt.Errorf(ePrefix+
"Input parameter 'valueString' is INVALID! valueString='%v' ", valueString)
}
var ok bool
var idx int
if caseSensitive {
if !strings.HasPrefix(valueString, "Type") {
valueString = "Type" + valueString
}
idx, ok = mFileOpenTypeStringToInt[valueString]
if !ok {
return FileOpenType(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenType. valueString='%v' ", valueString)
}
result = FileOpenType(idx)
} else {
valueString = strings.ToLower(valueString)
if !strings.HasPrefix(valueString, "type") {
valueString = "type" + valueString
}
idx, ok = mFileOpenTypeLwrCaseStringToInt[valueString]
if !ok {
return FileOpenType(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenType. valueString='%v' ", valueString)
}
result =
FileOpenType(idx)
}
return result, nil
}
// String - Returns a string with the name of the enumeration associated
// with this instance of 'FileOpenType'. This is a standard utility method
// and is not part of the valid enumerations for this type.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Return Value:
//
// string - The string label or description for the current enumeration
// value. If, the FileOpenType value is invalid, this method will
// return an empty string.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t := FileOpenType(0).TypeReadWrite()
// str := t.String()
// str is now equal to "TypeReadWrite"
//
func (fOpenType FileOpenType) String() string {
fOpenType.checkInitializeMaps(false)
str, ok := mFileOpenTypeIntToString[int(fOpenType)]
if !ok {
return ""
}
return str
}
// Value - This is a utility method which is not part of the
// enumerations supported by this type. It returns the numeric
// value of the enumeration associated with the current FileOpenType
// instance.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
func (fOpenType FileOpenType) Value() int {
return int(fOpenType)
}
// checkInitializeMaps - String and value comparisons performed on enumerations
// supported by this Type, utilizes a series of 3-map types. These maps are used
// internally to perform 'string to value' or 'value to string' look ups on
// enumerations supported by this type. Each time FileOpenType.String() or
// FileOpenType.ParseString() a call is made to this method to determine if
// these maps have been initialized. If the maps and look up data have been
// properly initialized and indexed, this method returns without taking action.
//
// On the other hand, if the maps have not yet been initialized, this method will
// initialize all associated map slices.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// reInitialize bool - If 'true', this will force initialization of
// all associated maps.
//
func (fOpenType FileOpenType) checkInitializeMaps(reInitialize bool) {
if !reInitialize &&
mFileOpenTypeIntToString != nil &&
len(mFileOpenTypeIntToString) > 3 &&
mFileOpenTypeStringToInt != nil &&
len(mFileOpenTypeStringToInt) > 3 &&
mFileOpenTypeLwrCaseStringToInt != nil &&
len(mFileOpenTypeLwrCaseStringToInt) > 3 {
return
}
var t = FileOpenType(0).TypeReadOnly()
mFileOpenTypeIntToString = make(map[int]string, 0)
mFileOpenTypeStringToInt = make(map[string]int, 0)
mFileOpenTypeLwrCaseStringToInt = make(map[string]int, 0)
s := reflect.TypeOf(t)
intZero := 0
r := reflect.TypeOf(intZero)
args := [1]reflect.Value{reflect.Zero(s)}
for i := 0; i < s.NumMethod(); i++ {
f := s.Method(i).Name
if f == "String" ||
f == "ParseString" ||
f == "Value" ||
f == "IsValid" ||
f == "checkInitializeMaps" {
continue
}
value := s.Method(i).Func.Call(args[:])[0].Convert(r).Int()
x := int(value)
mFileOpenTypeIntToString[x] = f
mFileOpenTypeStringToInt[f] = x
mFileOpenTypeLwrCaseStringToInt[strings.ToLower(f)] = x
}
}
// FOpenType - This public global variable allows
// easy access to the enumerations of the FileOpenType
// using the dot operator.
//
// Example:
//
// FOpenType.TypeReadOnly()
// FOpenType.TypeWriteOnly()
// FOpenType.TypeReadWrite()
//
var FOpenType = FileOpenType(0) | // this method will return an error. If the FileOpenType is 'valid',
// this method will return a value of 'nil'.
//
// This is a standard utility method and is not part of the valid enumerations | random_line_split |
fileopentypeenum.go | package pathfileops
import (
"fmt"
"os"
"reflect"
"strings"
)
// mFileOpenTypeIntToString - This map is used to map enumeration values
// to enumeration names stored as strings for Type FileOpenType.
var mFileOpenTypeIntToString = map[int]string{}
// mFileOpenTypeStringToInt - This map is used to map enumeration names
// stored as strings to enumeration values for Type FileOpenType.
var mFileOpenTypeStringToInt = map[string]int{}
// mFileOpenTypeLwrCaseStringToInt - This map is used to map enumeration names
// stored as lower case strings to enumeration values for Type FileOpenType.
// This map is used for case insensitive look ups.
var mFileOpenTypeLwrCaseStringToInt = map[string]int{}
// FileOpenType - In order to open a file, exactly one of the
// following File Open Codes MUST be specified:
//
// FileOpenType(0).TypeReadOnly()
// FileOpenType(0).TypeWriteOnly()
// FileOpenType(0).TypeReadWrite()
//
// In addition, one of the three previous codes may be or'd with
// zero or more of the following File Open Modes (Type: 'FileOpenMode')
// to better control file open behavior.
//
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeCreate()
// FileOpenMode(0).ModeExclusive()
// FileOpenMode(0).ModeSync()
// FileOpenMode(0).ModeTruncate()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
// This type serves a wrapper for os package constants.
//
// FileOpenType has been adapted to function as an enumeration of valid
// File Open Type values. Since Go does not directly support enumerations,
// the 'FileOpenType' has been configured to function in a manner similar
// to classic enumerations found in other languages like C#. For additional
// information, reference:
//
// Jeffrey Richter Using Reflection to implement enumerated types
// https://www.youtube.com/watch?v=DyXJy_0v0_U
//
type FileOpenType int
// None - No File Open Type specified
func (fOpenType FileOpenType) TypeNone() FileOpenType { return -1 }
// ReadOnly - File opened for 'Read Only' access
func (fOpenType FileOpenType) TypeReadOnly() FileOpenType { return FileOpenType(os.O_RDONLY) }
// WriteOnly - File opened for 'Write Only' access
func (fOpenType FileOpenType) TypeWriteOnly() FileOpenType { return FileOpenType(os.O_WRONLY) }
// ReadWrite - File opened for 'Read and Write' access
func (fOpenType FileOpenType) TypeReadWrite() FileOpenType { return FileOpenType(os.O_RDWR) }
// IsValid - If the value of the current FileOpenType is 'invalid',
// this method will return an error. If the FileOpenType is 'valid',
// this method will return a value of 'nil'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
func (fOpenType FileOpenType) IsValid() error {
fOpenType.checkInitializeMaps(false)
_, ok := mFileOpenTypeIntToString[int(fOpenType)]
if !ok {
ePrefix := "FileOpenType.IsValid() "
return fmt.Errorf(ePrefix+
"Error: Invalid FileOpenType! Current FileOpenType='%v'",
fOpenType)
}
return nil
}
// ParseString - Receives a string and attempts to match it with
// the string value of a supported enumeration. If successful, a
// new instance of FileOpenType is returned set to the value of the
// associated enumeration.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters:
//
// valueString string - A string which will be matched against the
// enumeration string values. If 'valueString'
// is equal to one of the enumeration names, this
// method will proceed to successful completion.
//
// You can prefix the string with "Type" or not.
// Examples: "ReadOnly" or "TypeReadOnly"
// Either string will produce the correct result.
//
// caseSensitive bool - If 'true' the search for enumeration names
// will be case sensitive and will require an
// exact match. Therefore, 'readonly' will NOT
// match the enumeration name, 'ReadOnly'.
//
// If 'false' a case insensitive search is conducted
// for the enumeration name. In this case, 'readonly'
// will match match enumeration name 'ReadOnly'.
//
// ------------------------------------------------------------------------
//
// Return Values:
//
// FileOpenType - Upon successful completion, this method will return a new
// instance of FileOpenType set to the value of the enumeration
// matched by the string search performed on input parameter,
// 'valueString'.
//
// error - If this method completes successfully, the returned error
// Type is set equal to 'nil'. If an error condition is encountered,
// this method will return an error Type which encapsulates an
// appropriate error message.
//
// ------------------------------------------------------------------------
//
// Usage:
//
// t, err := FileOpenType(0).ParseString("ReadOnly", true)
// Or
// t, err := FileOpenType(0).ParseString("TypeReadOnly", true)
// Or
// t, err := FileOpenType(0).ParseString("TypeReadOnly()", true)
// Or
// t, err := FileOpenType(0).ParseString("ReadOnly()", true)
// Or
// t, err := FileOpenType(0).ParseString("readonly", false)
//
// In of the cases shown above, t is now equal to FileOpenType(0).ReadOnly()
//
func (fOpenType FileOpenType) ParseString(
valueString string,
caseSensitive bool) (FileOpenType, error) {
ePrefix := "FileOpenType.ParseString() "
fOpenType.checkInitializeMaps(false)
result := FileOpenType(0)
lenValueStr := len(valueString)
if strings.HasSuffix(valueString, "()") {
valueString = valueString[0 : lenValueStr-2]
lenValueStr -= 2
}
if lenValueStr < 3 |
var ok bool
var idx int
if caseSensitive {
if !strings.HasPrefix(valueString, "Type") {
valueString = "Type" + valueString
}
idx, ok = mFileOpenTypeStringToInt[valueString]
if !ok {
return FileOpenType(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenType. valueString='%v' ", valueString)
}
result = FileOpenType(idx)
} else {
valueString = strings.ToLower(valueString)
if !strings.HasPrefix(valueString, "type") {
valueString = "type" + valueString
}
idx, ok = mFileOpenTypeLwrCaseStringToInt[valueString]
if !ok {
return FileOpenType(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenType. valueString='%v' ", valueString)
}
result =
FileOpenType(idx)
}
return result, nil
}
// String - Returns a string with the name of the enumeration associated
// with this instance of 'FileOpenType'. This is a standard utility method
// and is not part of the valid enumerations for this type.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Return Value:
//
// string - The string label or description for the current enumeration
// value. If, the FileOpenType value is invalid, this method will
// return an empty string.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t := FileOpenType(0).TypeReadWrite()
// str := t.String()
// str is now equal to "TypeReadWrite"
//
func (fOpenType FileOpenType) String() string {
fOpenType.checkInitializeMaps(false)
str, ok := mFileOpenTypeIntToString[int(fOpenType)]
if !ok {
return ""
}
return str
}
// Value - This is a utility method which is not part of the
// enumerations supported by this type. It returns the numeric
// value of the enumeration associated with the current FileOpenType
// instance.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
func (fOpenType FileOpenType) Value() int {
return int(fOpenType)
}
// checkInitializeMaps - String and value comparisons performed on enumerations
// supported by this Type, utilizes a series of 3-map types. These maps are used
// internally to perform 'string to value' or 'value to string' look ups on
// enumerations supported by this type. Each time FileOpenType.String() or
// FileOpenType.ParseString() a call is made to this method to determine if
// these maps have been initialized. If the maps and look up data have been
// properly initialized and indexed, this method returns without taking action.
//
// On the other hand, if the maps have not yet been initialized, this method will
// initialize all associated map slices.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// reInitialize bool - If 'true', this will force initialization of
// all associated maps.
//
func (fOpenType FileOpenType) checkInitializeMaps(reInitialize bool) {
if !reInitialize &&
mFileOpenTypeIntToString != nil &&
len(mFileOpenTypeIntToString) > 3 &&
mFileOpenTypeStringToInt != nil &&
len(mFileOpenTypeStringToInt) > 3 &&
mFileOpenTypeLwrCaseStringToInt != nil &&
len(mFileOpenTypeLwrCaseStringToInt) > 3 {
return
}
var t = FileOpenType(0).TypeReadOnly()
mFileOpenTypeIntToString = make(map[int]string, 0)
mFileOpenTypeStringToInt = make(map[string]int, 0)
mFileOpenTypeLwrCaseStringToInt = make(map[string]int, 0)
s := reflect.TypeOf(t)
intZero := 0
r := reflect.TypeOf(intZero)
args := [1]reflect.Value{reflect.Zero(s)}
for i := 0; i < s.NumMethod(); i++ {
f := s.Method(i).Name
if f == "String" ||
f == "ParseString" ||
f == "Value" ||
f == "IsValid" ||
f == "checkInitializeMaps" {
continue
}
value := s.Method(i).Func.Call(args[:])[0].Convert(r).Int()
x := int(value)
mFileOpenTypeIntToString[x] = f
mFileOpenTypeStringToInt[f] = x
mFileOpenTypeLwrCaseStringToInt[strings.ToLower(f)] = x
}
}
// FOpenType - This public global variable allows
// easy access to the enumerations of the FileOpenType
// using the dot operator.
//
// Example:
//
// FOpenType.TypeReadOnly()
// FOpenType.TypeWriteOnly()
// FOpenType.TypeReadWrite()
//
var FOpenType = FileOpenType(0)
| {
return result,
fmt.Errorf(ePrefix+
"Input parameter 'valueString' is INVALID! valueString='%v' ", valueString)
} | conditional_block |
fileopentypeenum.go | package pathfileops
import (
"fmt"
"os"
"reflect"
"strings"
)
// mFileOpenTypeIntToString - This map is used to map enumeration values
// to enumeration names stored as strings for Type FileOpenType.
var mFileOpenTypeIntToString = map[int]string{}
// mFileOpenTypeStringToInt - This map is used to map enumeration names
// stored as strings to enumeration values for Type FileOpenType.
var mFileOpenTypeStringToInt = map[string]int{}
// mFileOpenTypeLwrCaseStringToInt - This map is used to map enumeration names
// stored as lower case strings to enumeration values for Type FileOpenType.
// This map is used for case insensitive look ups.
var mFileOpenTypeLwrCaseStringToInt = map[string]int{}
// FileOpenType - In order to open a file, exactly one of the
// following File Open Codes MUST be specified:
//
// FileOpenType(0).TypeReadOnly()
// FileOpenType(0).TypeWriteOnly()
// FileOpenType(0).TypeReadWrite()
//
// In addition, one of the three previous codes may be or'd with
// zero or more of the following File Open Modes (Type: 'FileOpenMode')
// to better control file open behavior.
//
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeCreate()
// FileOpenMode(0).ModeExclusive()
// FileOpenMode(0).ModeSync()
// FileOpenMode(0).ModeTruncate()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
// This type serves a wrapper for os package constants.
//
// FileOpenType has been adapted to function as an enumeration of valid
// File Open Type values. Since Go does not directly support enumerations,
// the 'FileOpenType' has been configured to function in a manner similar
// to classic enumerations found in other languages like C#. For additional
// information, reference:
//
// Jeffrey Richter Using Reflection to implement enumerated types
// https://www.youtube.com/watch?v=DyXJy_0v0_U
//
type FileOpenType int
// None - No File Open Type specified
func (fOpenType FileOpenType) TypeNone() FileOpenType |
// ReadOnly - File opened for 'Read Only' access
func (fOpenType FileOpenType) TypeReadOnly() FileOpenType { return FileOpenType(os.O_RDONLY) }
// WriteOnly - File opened for 'Write Only' access
func (fOpenType FileOpenType) TypeWriteOnly() FileOpenType { return FileOpenType(os.O_WRONLY) }
// ReadWrite - File opened for 'Read and Write' access
func (fOpenType FileOpenType) TypeReadWrite() FileOpenType { return FileOpenType(os.O_RDWR) }
// IsValid - If the value of the current FileOpenType is 'invalid',
// this method will return an error. If the FileOpenType is 'valid',
// this method will return a value of 'nil'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
func (fOpenType FileOpenType) IsValid() error {
fOpenType.checkInitializeMaps(false)
_, ok := mFileOpenTypeIntToString[int(fOpenType)]
if !ok {
ePrefix := "FileOpenType.IsValid() "
return fmt.Errorf(ePrefix+
"Error: Invalid FileOpenType! Current FileOpenType='%v'",
fOpenType)
}
return nil
}
// ParseString - Receives a string and attempts to match it with
// the string value of a supported enumeration. If successful, a
// new instance of FileOpenType is returned set to the value of the
// associated enumeration.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters:
//
// valueString string - A string which will be matched against the
// enumeration string values. If 'valueString'
// is equal to one of the enumeration names, this
// method will proceed to successful completion.
//
// You can prefix the string with "Type" or not.
// Examples: "ReadOnly" or "TypeReadOnly"
// Either string will produce the correct result.
//
// caseSensitive bool - If 'true' the search for enumeration names
// will be case sensitive and will require an
// exact match. Therefore, 'readonly' will NOT
// match the enumeration name, 'ReadOnly'.
//
// If 'false' a case insensitive search is conducted
// for the enumeration name. In this case, 'readonly'
// will match match enumeration name 'ReadOnly'.
//
// ------------------------------------------------------------------------
//
// Return Values:
//
// FileOpenType - Upon successful completion, this method will return a new
// instance of FileOpenType set to the value of the enumeration
// matched by the string search performed on input parameter,
// 'valueString'.
//
// error - If this method completes successfully, the returned error
// Type is set equal to 'nil'. If an error condition is encountered,
// this method will return an error Type which encapsulates an
// appropriate error message.
//
// ------------------------------------------------------------------------
//
// Usage:
//
// t, err := FileOpenType(0).ParseString("ReadOnly", true)
// Or
// t, err := FileOpenType(0).ParseString("TypeReadOnly", true)
// Or
// t, err := FileOpenType(0).ParseString("TypeReadOnly()", true)
// Or
// t, err := FileOpenType(0).ParseString("ReadOnly()", true)
// Or
// t, err := FileOpenType(0).ParseString("readonly", false)
//
// In of the cases shown above, t is now equal to FileOpenType(0).ReadOnly()
//
func (fOpenType FileOpenType) ParseString(
valueString string,
caseSensitive bool) (FileOpenType, error) {
ePrefix := "FileOpenType.ParseString() "
fOpenType.checkInitializeMaps(false)
result := FileOpenType(0)
lenValueStr := len(valueString)
if strings.HasSuffix(valueString, "()") {
valueString = valueString[0 : lenValueStr-2]
lenValueStr -= 2
}
if lenValueStr < 3 {
return result,
fmt.Errorf(ePrefix+
"Input parameter 'valueString' is INVALID! valueString='%v' ", valueString)
}
var ok bool
var idx int
if caseSensitive {
if !strings.HasPrefix(valueString, "Type") {
valueString = "Type" + valueString
}
idx, ok = mFileOpenTypeStringToInt[valueString]
if !ok {
return FileOpenType(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenType. valueString='%v' ", valueString)
}
result = FileOpenType(idx)
} else {
valueString = strings.ToLower(valueString)
if !strings.HasPrefix(valueString, "type") {
valueString = "type" + valueString
}
idx, ok = mFileOpenTypeLwrCaseStringToInt[valueString]
if !ok {
return FileOpenType(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenType. valueString='%v' ", valueString)
}
result =
FileOpenType(idx)
}
return result, nil
}
// String - Returns a string with the name of the enumeration associated
// with this instance of 'FileOpenType'. This is a standard utility method
// and is not part of the valid enumerations for this type.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Return Value:
//
// string - The string label or description for the current enumeration
// value. If, the FileOpenType value is invalid, this method will
// return an empty string.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t := FileOpenType(0).TypeReadWrite()
// str := t.String()
// str is now equal to "TypeReadWrite"
//
func (fOpenType FileOpenType) String() string {
fOpenType.checkInitializeMaps(false)
str, ok := mFileOpenTypeIntToString[int(fOpenType)]
if !ok {
return ""
}
return str
}
// Value - This is a utility method which is not part of the
// enumerations supported by this type. It returns the numeric
// value of the enumeration associated with the current FileOpenType
// instance.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
func (fOpenType FileOpenType) Value() int {
return int(fOpenType)
}
// checkInitializeMaps - String and value comparisons performed on enumerations
// supported by this Type, utilizes a series of 3-map types. These maps are used
// internally to perform 'string to value' or 'value to string' look ups on
// enumerations supported by this type. Each time FileOpenType.String() or
// FileOpenType.ParseString() a call is made to this method to determine if
// these maps have been initialized. If the maps and look up data have been
// properly initialized and indexed, this method returns without taking action.
//
// On the other hand, if the maps have not yet been initialized, this method will
// initialize all associated map slices.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// reInitialize bool - If 'true', this will force initialization of
// all associated maps.
//
func (fOpenType FileOpenType) checkInitializeMaps(reInitialize bool) {
if !reInitialize &&
mFileOpenTypeIntToString != nil &&
len(mFileOpenTypeIntToString) > 3 &&
mFileOpenTypeStringToInt != nil &&
len(mFileOpenTypeStringToInt) > 3 &&
mFileOpenTypeLwrCaseStringToInt != nil &&
len(mFileOpenTypeLwrCaseStringToInt) > 3 {
return
}
var t = FileOpenType(0).TypeReadOnly()
mFileOpenTypeIntToString = make(map[int]string, 0)
mFileOpenTypeStringToInt = make(map[string]int, 0)
mFileOpenTypeLwrCaseStringToInt = make(map[string]int, 0)
s := reflect.TypeOf(t)
intZero := 0
r := reflect.TypeOf(intZero)
args := [1]reflect.Value{reflect.Zero(s)}
for i := 0; i < s.NumMethod(); i++ {
f := s.Method(i).Name
if f == "String" ||
f == "ParseString" ||
f == "Value" ||
f == "IsValid" ||
f == "checkInitializeMaps" {
continue
}
value := s.Method(i).Func.Call(args[:])[0].Convert(r).Int()
x := int(value)
mFileOpenTypeIntToString[x] = f
mFileOpenTypeStringToInt[f] = x
mFileOpenTypeLwrCaseStringToInt[strings.ToLower(f)] = x
}
}
// FOpenType - This public global variable allows
// easy access to the enumerations of the FileOpenType
// using the dot operator.
//
// Example:
//
// FOpenType.TypeReadOnly()
// FOpenType.TypeWriteOnly()
// FOpenType.TypeReadWrite()
//
var FOpenType = FileOpenType(0)
| { return -1 } | identifier_body |
fileopentypeenum.go | package pathfileops
import (
"fmt"
"os"
"reflect"
"strings"
)
// mFileOpenTypeIntToString - This map is used to map enumeration values
// to enumeration names stored as strings for Type FileOpenType.
var mFileOpenTypeIntToString = map[int]string{}
// mFileOpenTypeStringToInt - This map is used to map enumeration names
// stored as strings to enumeration values for Type FileOpenType.
var mFileOpenTypeStringToInt = map[string]int{}
// mFileOpenTypeLwrCaseStringToInt - This map is used to map enumeration names
// stored as lower case strings to enumeration values for Type FileOpenType.
// This map is used for case insensitive look ups.
var mFileOpenTypeLwrCaseStringToInt = map[string]int{}
// FileOpenType - In order to open a file, exactly one of the
// following File Open Codes MUST be specified:
//
// FileOpenType(0).TypeReadOnly()
// FileOpenType(0).TypeWriteOnly()
// FileOpenType(0).TypeReadWrite()
//
// In addition, one of the three previous codes may be or'd with
// zero or more of the following File Open Modes (Type: 'FileOpenMode')
// to better control file open behavior.
//
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeCreate()
// FileOpenMode(0).ModeExclusive()
// FileOpenMode(0).ModeSync()
// FileOpenMode(0).ModeTruncate()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
// This type serves a wrapper for os package constants.
//
// FileOpenType has been adapted to function as an enumeration of valid
// File Open Type values. Since Go does not directly support enumerations,
// the 'FileOpenType' has been configured to function in a manner similar
// to classic enumerations found in other languages like C#. For additional
// information, reference:
//
// Jeffrey Richter Using Reflection to implement enumerated types
// https://www.youtube.com/watch?v=DyXJy_0v0_U
//
type FileOpenType int
// None - No File Open Type specified
func (fOpenType FileOpenType) TypeNone() FileOpenType { return -1 }
// ReadOnly - File opened for 'Read Only' access
func (fOpenType FileOpenType) | () FileOpenType { return FileOpenType(os.O_RDONLY) }
// WriteOnly - File opened for 'Write Only' access
func (fOpenType FileOpenType) TypeWriteOnly() FileOpenType { return FileOpenType(os.O_WRONLY) }
// ReadWrite - File opened for 'Read and Write' access
func (fOpenType FileOpenType) TypeReadWrite() FileOpenType { return FileOpenType(os.O_RDWR) }
// IsValid - If the value of the current FileOpenType is 'invalid',
// this method will return an error. If the FileOpenType is 'valid',
// this method will return a value of 'nil'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
func (fOpenType FileOpenType) IsValid() error {
fOpenType.checkInitializeMaps(false)
_, ok := mFileOpenTypeIntToString[int(fOpenType)]
if !ok {
ePrefix := "FileOpenType.IsValid() "
return fmt.Errorf(ePrefix+
"Error: Invalid FileOpenType! Current FileOpenType='%v'",
fOpenType)
}
return nil
}
// ParseString - Receives a string and attempts to match it with
// the string value of a supported enumeration. If successful, a
// new instance of FileOpenType is returned set to the value of the
// associated enumeration.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters:
//
// valueString string - A string which will be matched against the
// enumeration string values. If 'valueString'
// is equal to one of the enumeration names, this
// method will proceed to successful completion.
//
// You can prefix the string with "Type" or not.
// Examples: "ReadOnly" or "TypeReadOnly"
// Either string will produce the correct result.
//
// caseSensitive bool - If 'true' the search for enumeration names
// will be case sensitive and will require an
// exact match. Therefore, 'readonly' will NOT
// match the enumeration name, 'ReadOnly'.
//
// If 'false' a case insensitive search is conducted
// for the enumeration name. In this case, 'readonly'
// will match match enumeration name 'ReadOnly'.
//
// ------------------------------------------------------------------------
//
// Return Values:
//
// FileOpenType - Upon successful completion, this method will return a new
// instance of FileOpenType set to the value of the enumeration
// matched by the string search performed on input parameter,
// 'valueString'.
//
// error - If this method completes successfully, the returned error
// Type is set equal to 'nil'. If an error condition is encountered,
// this method will return an error Type which encapsulates an
// appropriate error message.
//
// ------------------------------------------------------------------------
//
// Usage:
//
// t, err := FileOpenType(0).ParseString("ReadOnly", true)
// Or
// t, err := FileOpenType(0).ParseString("TypeReadOnly", true)
// Or
// t, err := FileOpenType(0).ParseString("TypeReadOnly()", true)
// Or
// t, err := FileOpenType(0).ParseString("ReadOnly()", true)
// Or
// t, err := FileOpenType(0).ParseString("readonly", false)
//
// In of the cases shown above, t is now equal to FileOpenType(0).ReadOnly()
//
func (fOpenType FileOpenType) ParseString(
valueString string,
caseSensitive bool) (FileOpenType, error) {
ePrefix := "FileOpenType.ParseString() "
fOpenType.checkInitializeMaps(false)
result := FileOpenType(0)
lenValueStr := len(valueString)
if strings.HasSuffix(valueString, "()") {
valueString = valueString[0 : lenValueStr-2]
lenValueStr -= 2
}
if lenValueStr < 3 {
return result,
fmt.Errorf(ePrefix+
"Input parameter 'valueString' is INVALID! valueString='%v' ", valueString)
}
var ok bool
var idx int
if caseSensitive {
if !strings.HasPrefix(valueString, "Type") {
valueString = "Type" + valueString
}
idx, ok = mFileOpenTypeStringToInt[valueString]
if !ok {
return FileOpenType(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenType. valueString='%v' ", valueString)
}
result = FileOpenType(idx)
} else {
valueString = strings.ToLower(valueString)
if !strings.HasPrefix(valueString, "type") {
valueString = "type" + valueString
}
idx, ok = mFileOpenTypeLwrCaseStringToInt[valueString]
if !ok {
return FileOpenType(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenType. valueString='%v' ", valueString)
}
result =
FileOpenType(idx)
}
return result, nil
}
// String - Returns a string with the name of the enumeration associated
// with this instance of 'FileOpenType'. This is a standard utility method
// and is not part of the valid enumerations for this type.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Return Value:
//
// string - The string label or description for the current enumeration
// value. If, the FileOpenType value is invalid, this method will
// return an empty string.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t := FileOpenType(0).TypeReadWrite()
// str := t.String()
// str is now equal to "TypeReadWrite"
//
func (fOpenType FileOpenType) String() string {
fOpenType.checkInitializeMaps(false)
str, ok := mFileOpenTypeIntToString[int(fOpenType)]
if !ok {
return ""
}
return str
}
// Value - This is a utility method which is not part of the
// enumerations supported by this type. It returns the numeric
// value of the enumeration associated with the current FileOpenType
// instance.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
func (fOpenType FileOpenType) Value() int {
return int(fOpenType)
}
// checkInitializeMaps - String and value comparisons performed on enumerations
// supported by this Type, utilizes a series of 3-map types. These maps are used
// internally to perform 'string to value' or 'value to string' look ups on
// enumerations supported by this type. Each time FileOpenType.String() or
// FileOpenType.ParseString() a call is made to this method to determine if
// these maps have been initialized. If the maps and look up data have been
// properly initialized and indexed, this method returns without taking action.
//
// On the other hand, if the maps have not yet been initialized, this method will
// initialize all associated map slices.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// reInitialize bool - If 'true', this will force initialization of
// all associated maps.
//
func (fOpenType FileOpenType) checkInitializeMaps(reInitialize bool) {
if !reInitialize &&
mFileOpenTypeIntToString != nil &&
len(mFileOpenTypeIntToString) > 3 &&
mFileOpenTypeStringToInt != nil &&
len(mFileOpenTypeStringToInt) > 3 &&
mFileOpenTypeLwrCaseStringToInt != nil &&
len(mFileOpenTypeLwrCaseStringToInt) > 3 {
return
}
var t = FileOpenType(0).TypeReadOnly()
mFileOpenTypeIntToString = make(map[int]string, 0)
mFileOpenTypeStringToInt = make(map[string]int, 0)
mFileOpenTypeLwrCaseStringToInt = make(map[string]int, 0)
s := reflect.TypeOf(t)
intZero := 0
r := reflect.TypeOf(intZero)
args := [1]reflect.Value{reflect.Zero(s)}
for i := 0; i < s.NumMethod(); i++ {
f := s.Method(i).Name
if f == "String" ||
f == "ParseString" ||
f == "Value" ||
f == "IsValid" ||
f == "checkInitializeMaps" {
continue
}
value := s.Method(i).Func.Call(args[:])[0].Convert(r).Int()
x := int(value)
mFileOpenTypeIntToString[x] = f
mFileOpenTypeStringToInt[f] = x
mFileOpenTypeLwrCaseStringToInt[strings.ToLower(f)] = x
}
}
// FOpenType - This public global variable allows
// easy access to the enumerations of the FileOpenType
// using the dot operator.
//
// Example:
//
// FOpenType.TypeReadOnly()
// FOpenType.TypeWriteOnly()
// FOpenType.TypeReadWrite()
//
var FOpenType = FileOpenType(0)
| TypeReadOnly | identifier_name |
activeTopicPage.js | /**满立减
* Created by xiuxiu on 2016/4/11.
*/
require([
'jquery',
'h5/js/common/data',
'h5/js/common',
'h5/js/common/loadImage',
'h5/js/common/nexter',
'h5/js/common/goods',
'h5/js/common/cart',
'h5/js/common/weixin',
'h5/js/common/url',
'h5/js/common/banner',
'h5/js/common/transDialog'
], function ($, Data, Common,LoadImage, Nexter, Goods, Cart, WeiXin, URL,Banner,Dialog) {
var Page,
CateTabs,
dialog,
Cates = Storage.Cates.get(),
isApp = URL.param.isApp,
topicId = URL.param.topicId,
parameter = '',
firstLoad = true;
function init() {
render();
//Common.to_Top('#listPage');//返回顶部
}
function render() {
Page = $('<div id="listPage"> <section id="_listPage"><div class="scroll-content"><div class="banner pic-carousel" id="banner" data-banner="418"><img src=""/> </div><div class="page-list-tabs"></div></div></section></div>').appendTo('body');
Common.headerHtml('','<div class="category-handle cart-bar" href="' + URL.cart + '"><span class="price"></span></div>',false,'#listPage');
bindEvents();
renderCate();
}
function bindEvents() {
Page.on('tap', '.icon-return', function (event) {
event.preventDefault();
Common.returnPrePage();
}).on('tap', '.add-cart', function (event) {
event.preventDefault();
var id=$(this).parents('.goods').data('id'),
price = parseFloat($(this).parents('.goods-info').find('.price').text());
if(!isApp){
URL.assign(decodeURI('http://test.unesmall.com/api/h/1.0/activeTopicPage.htm?topicId='+topicId+'&id='+id+'&price='+price+'¶meter='+parameter));
addCart($(this),true);
}else{
addCart($(this));
}
}).on('transitionend', '.adding', function(event) {
$(this).removeClass('adding');
}).on('tap', '[href]', function (event) {
event.preventDefault();
Common.addPUserId($(this));
})//.on('tap', '.navbar-left span', function (event) {
// event.preventDefault();
// $(this).removeAttr('href');
// URL.assign(URL.index);
//})//.on('swipeUp', '.page-tabs-panel', function(event) {
// console.log(event);
// event.preventDefault();
// var $this = $(this);
// //setTimeout(function() {
// // $('.navTit', Page).removeClass('showNav');
// //}, 0);
//
// var nav = $('.navTit', Page);
// console.log($this.offset().top)
// if (nav.hasClass('showNav') && $this.offset().top > 0) {
// nav.removeClass('showNav');
// }
//}).on('swipeDown', '.page-tabs-panel', function(event) {
// console.log(event);
// event.preventDefault();
// setTimeout(function() {
// $('.navTit', Page).addClass('showNav');
// }, 0);
// var nav = $('.navTit', Page);
// var $this = $(this);
// console.log($this.offset().top)
// if (nav.hasClass('showNav') && $this.offset().top < 0) {
// nav.removeClass('showNav');
// }
//})
}
function renderCate() {
var wrapTemplate = '<div class="page-tabs-panel" data-role-scrollable="true"><ul class="goods-list grid"></ul></div>';
var content = $('.page-list-tabs', Page).html(wrapTemplate);
var data = {
topicId:topicId
}
if ($('.page-tabs-panel').offset().top < $(window).height()*1.5 && firstLoad) {
renderCateNexter(data);
firstLoad = false;
}
if(!isApp){
$('.header').hide();
$('.header .category-handle').remove();
$('#_listPage').append('<div style="opacity: 0;z-index: -1" class="category-handle cart-bar" href="' + URL.cart + '"><span class="price"></span></div>');
$('.banner').css('padding-top',0)
}
}
function renderCateNexter(data) {
var element = $('#_listPage');
var nexter = new Nexter({
element: element,
dataSource: Data.fetchItems,
enableScrollLoad: true,
scrollBodyContent: $('.page-tabs-panel ul'),
data: data,
//pageSize:16
}).load().on('load:success', function(res) {
//console.log(res);
var html = htmlItems(res.items);
$('#banner img').attr('src',res.topicDO.picUrls);
$('.navbar-main').text(res.topicDO.name);
document.title = res.topicDO.name
parameter = res.topicDO.parameter;
element.show();
//element.show().css('height','690px');
if (html.length) {
this.$('.goods-list').append(html.join(''));
LoadImage(this.element);
//refreshCart();
} else if (this.get('pageIndex') == 0) {
this.$('.goods-list').html('<li class="not-has-goods-msg"><img src="'+URL.imgPath+'/common/images/loading_fail.png"/><p>灰常抱歉,暂时没有数据哦</p> </li>');
}
}).render();
var sid,
scrollEventHandle = function(event) {
event.preventDefault();
clearTimeout(sid);
sid = setTimeout(function() {
LoadImage(element);
}, 0);
}
element.on('scroll', scrollEventHandle);
}
function htmlItems(items) {
var template = '<div class="goods col col-50" data-id="{{itemId}}"><div class="listimg" href="' + URL.goodsDetail + '?gid={{itemId}}" tj_category="商品" tj_action="{{title}}"><img data-lazyload-src="{{listimg}}" />{{soldOut}}{{_htmlFlagS}}</div><div class="goods-info"><h1><span class="title">{{title}}</span><span class="spec">{{_htmlLimit}}</span><span class="no-post-fee">{{noPostFee}}</span></h1><p><span class="price">{{_htmlPrice}}</span><span class="refprice">{{_htmlRelPrice}}</span><span class="discount {{discountClass}}">{{discount}}折</span></p><p class="brokeFee {{brokerageFeehide}}">推广费:<span class="price">{{brokerageFee}}</span></p><div class="soldnum"><div style="width:{{threshold}}%"></div><p>已售{{saleTotal}}件</p></div><a class="add-cart {{soldOutCartState}}"></a><div class="count">{{cartCount}}</div></div>{{_htmlFlag}}</div>',
html = [],
_index = 0;
if ($.isArray(items) && items.length) {
$.each(items, function(index, item) {
//console.log('list',item.brokerageFee,item.itemId);
if (index % 2 == 0) {
html.push('<li class="row">');
}
var goods = Goods.create(item);
(item.isTaxFree == 1) ? goods._htmlFlag = '<div class="goods-flag"><img class="flag-taxFree" src="'+URL.imgPath+'common/images/icon_label_dutyFree.png" /></div>' : '';
html.push(bainx.tpl(template, goods));
if (index % 2 == 1) {
html.push('</li>');
}
_index = index;
});
if (_index % 2 == 0) {
html.push('<div class="col col-50 goods goods-null fb fvc fac"></div></li>');
}
}
refreshCart();
return html;
}
function addCart(btn,_isapp) {
var view = btn.parents('.goods'),
gid = view.data('id'),
item = Cart.query(gid) || Cart.create(Goods.query(gid));
if (item) {
item.add({
btn: btn,
start: function(newCount) {
btn.addClass('adding');
view.find('.count').text(newCount);
var offset = $(".category-handle").offset(),
cloneViewOffset = $('.listimg', view).offset(),
imgUrl = view.find('.listimg').children('img').attr('src'), //获取当前点击图片链接
flyerWidth = $('.listimg img', view).width(),
//flyerHeight = $('.listimg img', view).height(),
flyer = $('<div class="listimg move-wrap"><img class="flyer-img" src="' + imgUrl + '"></div>'); //抛物体对象
flyer.find('.flyer-img').width(flyerWidth);
//flyer.find('.flyer-img').height(flyerHeight);
console.log(offset);
flyer.fly({
start: {
left: cloneViewOffset.left,//抛物体起点横坐标
top: cloneViewOffset.top //抛物体起点纵坐标
},
end: {
left: offset.left + 10,//抛物体终点横坐标
top: offset.top + 10, //抛物体终点纵坐标
width: 10,
height: 10
},
speed: 1.2,
vertex_Rtop: 50, //运动轨迹最高点top值,默认20
onEnd: function () {
//$("#tip").show().animate({width: '200px'},300).fadeOut(500);////成功加入购物车动画效果
this.destory(); //销毁抛物体
}
});
},
count: 1,
view: view,
},_isapp).always(function(res) {
item.count > 0 ? view.find('.count').text(item.count).css({visibility: "visible"}) : view.find('.count').css({visibility: "hidden"});
refreshCart();
}).fail(function(json) {
if (item.goods.flag.immediatelyBuy) {
URL.assign(URL.goodsDetail + '?gid=' + item.id);
} else {
alert(json && json.msg || '同步购物车失败或者您尚未登陆!');
if (!item.goods.itemNum) {
Cart.removeItem(item);
}
}
});
}
}
function refreshCart() {
Common.getCartCount();
Common.isLogin && Cart.ready(function() {
// renderSmallCart();
// refreshListCount();
});
}
init();
}) | identifier_body | ||
activeTopicPage.js | /**满立减
* Created by xiuxiu on 2016/4/11.
*/
require([
'jquery',
'h5/js/common/data',
'h5/js/common',
'h5/js/common/loadImage',
'h5/js/common/nexter',
'h5/js/common/goods',
'h5/js/common/cart',
'h5/js/common/weixin',
'h5/js/common/url',
'h5/js/common/banner',
'h5/js/common/transDialog'
], function ($, Data, Common,LoadImage, Nexter, Goods, Cart, WeiXin, URL,Banner,Dialog) {
var Page,
CateTabs,
dialog,
Cates = Storage.Cates.get(),
isApp = URL.param.isApp,
topicId = URL.param.topicId,
parameter = '',
firstLoad = true;
function init() {
render();
//Common.to_Top('#listPage');//返回顶部
}
function render() {
Page = $('<div id="listPage"> <section id="_listPage"><div class="scroll-content"><div class="banner pic-carousel" id="banner" data-banner="418"><img src=""/> </div><div class="page-list-tabs"></div></div></section></div>').appendTo('body');
Common.headerHtml('','<div class="category-handle cart-bar" href="' + URL.cart + '"><span class="price"></span></div>',false,'#listPage');
bindEvents();
renderCate();
}
function bindEvents() {
Page.on('tap', '.icon-return', function (event) {
event.preventDefault();
Common.returnPrePage();
}).on('tap', '.add-cart', function (event) {
event.preventDefault();
var id=$(this).parents('.goods').data('id'),
price = parseFloat($(this).parents('.goods-info').find('.price').text());
if(!isApp){
URL.assign(decodeURI('http://test.unesmall.com/api/h/1.0/activeTopicPage.htm?topicId='+topicId+'&id='+id+'&price='+price+'¶meter='+parameter));
addCart($(this),true);
}else{
addCart($(this));
}
}).on('transitionend', '.adding', function(event) {
$(this).removeClass('adding');
}).on('tap', '[href]', function (event) {
event.preventDefault();
Common.addPUserId($(this));
})//.on('tap', '.navbar-left span', function (event) {
// event.preventDefault();
// $(this).removeAttr('href');
// URL.assign(URL.index);
//})//.on('swipeUp', '.page-tabs-panel', function(event) {
// console.log(event);
// event.preventDefault();
// var $this = $(this);
// //setTimeout(function() {
// // $('.navTit', Page).removeClass('showNav');
// //}, 0);
//
// var nav = $('.navTit', Page);
// console.log($this.offset().top)
// if (nav.hasClass('showNav') && $this.offset().top > 0) {
// nav.removeClass('showNav');
// }
//}).on('swipeDown', '.page-tabs-panel', function(event) {
// console.log(event);
// event.preventDefault();
// setTimeout(function() {
// $('.navTit', Page).addClass('showNav');
// }, 0);
// var nav = $('.navTit', Page);
// var $this = $(this);
// console.log($this.offset().top)
// if (nav.hasClass('showNav') && $this.offset().top < 0) {
// nav.removeClass('showNav');
// }
//})
}
function renderCate() {
var wrapTemplate = '<div class="page-tabs-panel" data-role-scrollable="true"><ul class="goods-list grid"></ul></div>';
var content = $('.page-list-tabs', Page).html(wrapTemplate);
var data = {
topicId:topicId
}
if ($('.page-tabs-panel').offset().top < $(window).height()*1.5 && firstLoad) {
renderCateNexter(data);
firstLoad = false;
}
if(!isApp){
$('.header').hide();
$('.header .category-handle').remove();
$('#_listPage').append('<div style="opacity: 0;z-index: -1" class="category-handle cart-bar" href="' + URL.cart + '"><span class="price"></span></div>');
$('.banner').css('padding-top',0)
}
}
function renderCateNexter(data) {
var element = $('#_listPage');
var nexter = new Nexter({
element: element,
dataSource: Data.fetchItems,
enableScrollLoad: true,
scrollBodyContent: $('.page-tabs-panel ul'),
data: data,
//pageSize:16
}).load().on('load:success', function(res) {
//console.log(res);
var html = htmlItems(res.items);
$('#banner img').attr('src',res.topicDO.picUrls);
$('.navbar-main').text(res.topicDO.name);
document.title = res.topicDO.name
parameter = res.topicDO.parameter;
element.show();
//element.show().css('height','690px');
if (html.length) {
this.$('.goods-list').append(html.join(''));
LoadImage(this.element);
//refreshCart();
} else if (this.get('pageIndex') == 0) {
this.$('.goods-list').html('<li class="not-has-goods-msg"><img src="'+URL.imgPath+'/common/images/loading_fail.png"/><p>灰常抱歉,暂时没有数据哦</p> </li>');
}
}).render();
var sid,
scrollEventHandle = function(event) {
event.preventDefault();
clearTimeout(sid);
sid = setTimeout(function() {
LoadImage(element);
}, 0);
}
element.on('scroll', scrollEventHandle);
}
function htmlItems(items) {
var template = '<div class="goods col col-50" data-id="{{itemId}}"><div class="listimg" href="' + URL.goodsDetail + '?gid={{itemId}}" tj_category="商品" tj_action="{{title}}"><img data-lazyload-src="{{listimg}}" />{{soldOut}}{{_htmlFlagS}}</div><div class="goods-info"><h1><span class="title">{{title}}</span><span class="spec">{{_htmlLimit}}</span><span class="no-post-fee">{{noPostFee}}</span></h1><p><span class="price">{{_htmlPrice}}</span><span class="refprice">{{_htmlRelPrice}}</span><span class="discount {{discountClass}}">{{discount}}折</span></p><p class="brokeFee {{brokerageFeehide}}">推广费:<span class="price">{{brokerageFee}}</span></p><div class="soldnum"><div style="width:{{threshold}}%"></div><p>已售{{saleTotal}}件</p></div><a class="add-cart {{soldOutCartState}}"></a><div class="count">{{cartCount}}</div></div>{{_htmlFlag}}</div>',
html = [],
_index = 0;
if ($.isArray(items) && items.length) {
$.each(items, function(index, item) {
//console.log('list',item.brokerageFee,item.itemId);
if (index % 2 == 0) {
html.push('<li class="row">');
}
var goods = Goods.create(item);
(item.isTaxFree == 1) ? goods._htmlFlag = '<div class="goods-flag"><img class="flag-taxFree" src="'+URL.imgPath+'common/images/icon_label_dutyFree.png" /></div>' : '';
html.push(bainx.tpl(template, goods));
if (index % 2 == 1) {
html.push('</li>');
}
_index = index;
});
if (_index % 2 == 0) {
html.push('<div class="col col-50 goods goods-null fb fvc fac"></div></li>');
}
}
refreshCart();
return html;
}
function addCart(btn,_isapp) {
var view = btn.parents('.goo | gid = view.data('id'),
item = Cart.query(gid) || Cart.create(Goods.query(gid));
if (item) {
item.add({
btn: btn,
start: function(newCount) {
btn.addClass('adding');
view.find('.count').text(newCount);
var offset = $(".category-handle").offset(),
cloneViewOffset = $('.listimg', view).offset(),
imgUrl = view.find('.listimg').children('img').attr('src'), //获取当前点击图片链接
flyerWidth = $('.listimg img', view).width(),
//flyerHeight = $('.listimg img', view).height(),
flyer = $('<div class="listimg move-wrap"><img class="flyer-img" src="' + imgUrl + '"></div>'); //抛物体对象
flyer.find('.flyer-img').width(flyerWidth);
//flyer.find('.flyer-img').height(flyerHeight);
console.log(offset);
flyer.fly({
start: {
left: cloneViewOffset.left,//抛物体起点横坐标
top: cloneViewOffset.top //抛物体起点纵坐标
},
end: {
left: offset.left + 10,//抛物体终点横坐标
top: offset.top + 10, //抛物体终点纵坐标
width: 10,
height: 10
},
speed: 1.2,
vertex_Rtop: 50, //运动轨迹最高点top值,默认20
onEnd: function () {
//$("#tip").show().animate({width: '200px'},300).fadeOut(500);////成功加入购物车动画效果
this.destory(); //销毁抛物体
}
});
},
count: 1,
view: view,
},_isapp).always(function(res) {
item.count > 0 ? view.find('.count').text(item.count).css({visibility: "visible"}) : view.find('.count').css({visibility: "hidden"});
refreshCart();
}).fail(function(json) {
if (item.goods.flag.immediatelyBuy) {
URL.assign(URL.goodsDetail + '?gid=' + item.id);
} else {
alert(json && json.msg || '同步购物车失败或者您尚未登陆!');
if (!item.goods.itemNum) {
Cart.removeItem(item);
}
}
});
}
}
function refreshCart() {
Common.getCartCount();
Common.isLogin && Cart.ready(function() {
// renderSmallCart();
// refreshListCount();
});
}
init();
}) | ds'),
| identifier_name |
activeTopicPage.js | /**满立减
* Created by xiuxiu on 2016/4/11.
*/
require([
'jquery',
'h5/js/common/data',
'h5/js/common',
'h5/js/common/loadImage',
'h5/js/common/nexter',
'h5/js/common/goods',
'h5/js/common/cart',
'h5/js/common/weixin',
'h5/js/common/url',
'h5/js/common/banner',
'h5/js/common/transDialog'
], function ($, Data, Common,LoadImage, Nexter, Goods, Cart, WeiXin, URL,Banner,Dialog) {
var Page,
CateTabs,
dialog,
Cates = Storage.Cates.get(),
isApp = URL.param.isApp,
topicId = URL.param.topicId,
parameter = '',
firstLoad = true;
function init() {
render();
//Common.to_Top('#listPage');//返回顶部
}
function render() {
Page = $('<div id="listPage"> <section id="_listPage"><div class="scroll-content"><div class="banner pic-carousel" id="banner" data-banner="418"><img src=""/> </div><div class="page-list-tabs"></div></div></section></div>').appendTo('body');
Common.headerHtml('','<div class="category-handle cart-bar" href="' + URL.cart + '"><span class="price"></span></div>',false,'#listPage');
bindEvents();
renderCate();
}
function bindEvents() {
Page.on('tap', '.icon-return', function (event) {
event.preventDefault();
Common.returnPrePage();
}).on('tap', '.add-cart', function (event) {
event.preventDefault();
var id=$(this).parents('.goods').data('id'),
price = parseFloat($(this).parents('.goods-info').find('.price').text());
if(!isApp){
URL.assign(decodeURI('http://test.unesmall.com/api/h/1.0/activeTopicPage.htm?topicId='+topicId+'&id='+id+'&price='+price+'¶meter='+parameter));
addCart($(this),true);
}else{
addCart($(this));
}
}).on('transitionend', '.adding', function(event) {
$(this).removeClass('adding');
}).on('tap', '[href]', function (event) {
event.preventDefault();
Common.addPUserId($(this));
})//.on('tap', '.navbar-left span', function (event) {
// event.preventDefault();
// $(this).removeAttr('href');
// URL.assign(URL.index);
//})//.on('swipeUp', '.page-tabs-panel', function(event) {
// console.log(event);
// event.preventDefault();
// var $this = $(this);
// //setTimeout(function() {
// // $('.navTit', Page).removeClass('showNav');
// //}, 0);
//
// var nav = $('.navTit', Page);
// console.log($this.offset().top)
// if (nav.hasClass('showNav') && $this.offset().top > 0) {
// nav.removeClass('showNav');
// }
//}).on('swipeDown', '.page-tabs-panel', function(event) {
// console.log(event);
// event.preventDefault();
// setTimeout(function() {
// $('.navTit', Page).addClass('showNav');
// }, 0);
// var nav = $('.navTit', Page);
// var $this = $(this);
// console.log($this.offset().top)
// if (nav.hasClass('showNav') && $this.offset().top < 0) {
// nav.removeClass('showNav');
// }
//})
}
function renderCate() {
var wrapTemplate = '<div class="page-tabs-panel" data-role-scrollable="true"><ul class="goods-list grid"></ul></div>';
var content = $('.page-list-tabs', Page).html(wrapTemplate);
var data = {
topicId:topicId
}
if ($('.page-tabs-panel').offset().top < $(window).height()*1.5 && firstLoad) {
renderCateNexter(data);
firstLoad = false;
}
if(!isApp){
$('.header').hide();
$('.header .category-handle').remove();
$('#_listPage').append('<div style="opacity: 0;z-index: -1" class="category-handle cart-bar" href="' + URL.cart + '"><span class="price"></span></div>');
$('.banner').css('padding-top',0)
}
}
function renderCateNexter(data) {
var element = $('#_listPage');
var nexter = new Nexter({
element: element,
dataSource: Data.fetchItems,
enableScrollLoad: true,
scrollBodyContent: $('.page-tabs-panel ul'),
data: data,
//pageSize:16
}).load().on('load:success', function(res) {
//console.log(res);
var html = htmlItems(res.items);
$('#banner img').attr('src',res.topicDO.picUrls);
$('.navbar-main').text(res.topicDO.name);
document.title = res.topicDO.name
parameter = res.topicDO.parameter;
element.show();
//element.show().css('height','690px');
if (html.length) {
this.$('.goods-list').append(html.join(''));
LoadImage(this.element);
//refreshCart();
} else if (this.get('pageIndex') == 0) {
this.$('.goods-list').html('<li class="not-has-goods-msg"><img src="'+URL.imgPath+'/common/images/loading_fail.png"/><p>灰常抱歉,暂时没有数据哦</p> </li>');
}
}).render();
var sid,
scrollEventHandle = function(event) {
event.preventDefault();
clearTimeout(sid);
sid = setTimeout(function() {
LoadImage(element);
}, 0);
}
element.on('scroll', scrollEventHandle);
}
function htmlItems(items) {
var template = '<div class="goods col col-50" data-id="{{itemId}}"><div class="listimg" href="' + URL.goodsDetail + '?gid={{itemId}}" tj_category="商品" tj_action="{{title}}"><img data-lazyload-src="{{listimg}}" />{{soldOut}}{{_htmlFlagS}}</div><div class="goods-info"><h1><span class="title">{{title}}</span><span class="spec">{{_htmlLimit}}</span><span class="no-post-fee">{{noPostFee}}</span></h1><p><span class="price">{{_htmlPrice}}</span><span class="refprice">{{_htmlRelPrice}}</span><span class="discount {{discountClass}}">{{discount}}折</span></p><p class="brokeFee {{brokerageFeehide}}">推广费:<span class="price">{{brokerageFee}}</span></p><div class="soldnum"><div style="width:{{threshold}}%"></div><p>已售{{saleTotal}}件</p></div><a class="add-cart {{soldOutCartState}}"></a><div class="count">{{cartCount}}</div></div>{{_htmlFlag}}</div>',
html = [],
_index = 0;
if ($.isArray(items) && items.length) {
$.each(items, function(index, item) {
//console.log('list',item.brokerageFee,item.itemId);
if (index % 2 == 0) {
html.push('<li class="row">');
}
var goods = Goods.create(item);
(item.isTaxFree == 1) ? goods._htmlFlag = '<div class="goods-flag"><img class="flag-taxFree" src="'+URL.imgPath+'common/images/icon_label_dutyFree.png" /></div>' : '';
html.push(bainx.tpl(template, goods));
if (index % 2 == 1) {
html.push('</li>');
}
_index = index;
});
if (_index % 2 == 0) {
html.push('<div class="col col-50 goods goods-null fb fvc fac"></div></li>');
}
}
refreshCart();
return html;
}
function addCart(btn,_isapp) {
var view = btn.parents('.goods'),
gid = view.data('id'),
item = Cart.query(gid) || Cart.create(Goods.query(gid));
if (item) {
item.add({
btn: btn,
start: function(newCount) {
btn.addClass('adding');
view.find('.count').text(newCount);
var offset = $(".category-handle").offset(),
cloneViewOffset = $('.listimg', view).offset(),
imgUrl = view.find('.listimg').children('img').attr('src'), //获取当前点击图片链接
flyerWidth = $('.listimg img', view).width(),
//flyerHeight = $('.listimg img', view).height(),
flyer = $('<div class="listimg move-wrap"><img class="flyer-img" src="' + imgUrl + '"></div>'); //抛物体对象
flyer.find('.flyer-img').width(flyerWidth);
//flyer.find('.flyer-img').height(flyerHeight);
console.log(offset);
flyer.fly({
start: {
left: cloneViewOffset.left,//抛物体起点横坐标
top: cloneViewOffset.top //抛物体起点纵坐标
},
end: {
left: offset.left + 10,//抛物体终点横坐标
top: offset.top + 10, //抛物体终点纵坐标
width: 10,
height: 10
},
speed: 1.2,
vertex_Rtop: 50, //运动轨迹最高点top值,默认20
onEnd: function () {
//$("#tip").show().animate({width: '200px'},300).fadeOut(500);////成功加入购物车动画效果
this.destory(); //销毁抛物体 | view: view,
},_isapp).always(function(res) {
item.count > 0 ? view.find('.count').text(item.count).css({visibility: "visible"}) : view.find('.count').css({visibility: "hidden"});
refreshCart();
}).fail(function(json) {
if (item.goods.flag.immediatelyBuy) {
URL.assign(URL.goodsDetail + '?gid=' + item.id);
} else {
alert(json && json.msg || '同步购物车失败或者您尚未登陆!');
if (!item.goods.itemNum) {
Cart.removeItem(item);
}
}
});
}
}
function refreshCart() {
Common.getCartCount();
Common.isLogin && Cart.ready(function() {
// renderSmallCart();
// refreshListCount();
});
}
init();
}) | }
});
},
count: 1, | random_line_split |
activeTopicPage.js | /**满立减
* Created by xiuxiu on 2016/4/11.
*/
require([
'jquery',
'h5/js/common/data',
'h5/js/common',
'h5/js/common/loadImage',
'h5/js/common/nexter',
'h5/js/common/goods',
'h5/js/common/cart',
'h5/js/common/weixin',
'h5/js/common/url',
'h5/js/common/banner',
'h5/js/common/transDialog'
], function ($, Data, Common,LoadImage, Nexter, Goods, Cart, WeiXin, URL,Banner,Dialog) {
var Page,
CateTabs,
dialog,
Cates = Storage.Cates.get(),
isApp = URL.param.isApp,
topicId = URL.param.topicId,
parameter = '',
firstLoad = true;
function init() {
render();
//Common.to_Top('#listPage');//返回顶部
}
function render() {
Page = $('<div id="listPage"> <section id="_listPage"><div class="scroll-content"><div class="banner pic-carousel" id="banner" data-banner="418"><img src=""/> </div><div class="page-list-tabs"></div></div></section></div>').appendTo('body');
Common.headerHtml('','<div class="category-handle cart-bar" href="' + URL.cart + '"><span class="price"></span></div>',false,'#listPage');
bindEvents();
renderCate();
}
function bindEvents() {
Page.on('tap', '.icon-return', function (event) {
event.preventDefault();
Common.returnPrePage();
}).on('tap', '.add-cart', function (event) {
event.preventDefault();
var id=$(this).parents('.goods').data('id'),
price = parseFloat($(this).parents('.goods-info').find('.price').text());
if(!isApp){
URL.assign(decodeURI('http://test.unesmall.com/api/h/1.0/activeTopicPage.htm?topicId='+topicId+'&id='+id+'&price='+price+'¶meter='+parameter));
addCart($(this),true);
}else{
addCart($(this));
}
}).on('transitionend', '.adding', function(event) {
$(this).removeClass('adding');
}).on('tap', '[href]', function (event) {
event.preventDefault();
Common.addPUserId($(this));
})//.on('tap', '.navbar-left span', function (event) {
// event.preventDefault();
// $(this).removeAttr('href');
// URL.assign(URL.index);
//})//.on('swipeUp', '.page-tabs-panel', function(event) {
// console.log(event);
// event.preventDefault();
// var $this = $(this);
// //setTimeout(function() {
// // $('.navTit', Page).removeClass('showNav');
// //}, 0);
//
// var nav = $('.navTit', Page);
// console.log($this.offset().top)
// if (nav.hasClass('showNav') && $this.offset().top > 0) {
// nav.removeClass('showNav');
// }
//}).on('swipeDown', '.page-tabs-panel', function(event) {
// console.log(event);
// event.preventDefault();
// setTimeout(function() {
// $('.navTit', Page).addClass('showNav');
// }, 0);
// var nav = $('.navTit', Page);
// var $this = $(this);
// console.log($this.offset().top)
// if (nav.hasClass('showNav') && $this.offset().top < 0) {
// nav.removeClass('showNav');
// }
//})
}
function renderCate() {
var wrapTemplate = '<div class="page-tabs-panel" data-role-scrollable="true"><ul class="goods-list grid"></ul></div>';
var content = $('.page-list-tabs', Page).html(wrapTemplate);
var data = {
topicId:topicId
}
if ($('.page-tabs-panel').offset().top < $(window).height()*1.5 && firstLoad) {
renderCateNexter(data);
firstLoad = false;
}
if(!isApp){
$('.header').hide();
$('.header .category-handle').remove();
$('#_listPage').append('<div style="opacity: 0;z-index: -1" class="category-handle cart-bar" href="' + URL.cart + '"><span class="price"></span></div>');
$('.banner').css('padding-top',0)
}
}
function renderCateNexter(data) {
var element = $('#_listPage');
var nexter = new Nexter({
element: element,
dataSource: Data.fetchItems,
enableScrollLoad: true,
scrollBodyContent: $('.page-tabs-panel ul'),
data: data,
//pageSize:16
}).load().on('load:success', function(res) {
//console.log(res);
var html = htmlItems(res.items);
$('#banner img').attr('src',res.topicDO.picUrls);
$('.navbar-main').text(res.topicDO.name);
document.title = res.topicDO.name
parameter = res.topicDO.parameter;
element.show();
//element.show().css('height','690px');
if (html.length) {
this.$('.goods-list').append(html.join(''));
LoadImage(this.element);
//refreshCart();
} else if (this.get('pageIndex') == 0) {
this.$('.goods-list').html('<li class="not-has-goods-msg"><img src="'+URL.imgPath+'/common/images/loading_fail.png"/><p>灰常抱歉,暂时没有数据哦</p> </li>');
}
}).render();
var sid,
scrollEventHandle = function(event) {
event.preventDefault();
clearTimeout(sid);
sid = setTimeout(function() {
LoadImage(element);
}, 0);
}
element.on('scroll', scrollEventHandle);
}
function htmlItems(items) {
var template = '<div class="goods col col-50" data-id="{{itemId}}"><div class="listimg" href="' + URL.goodsDetail + '?gid={{itemId}}" tj_category="商品" tj_action="{{title}}"><img data-lazyload-src="{{listimg}}" />{{soldOut}}{{_htmlFlagS}}</div><div class="goods-info"><h1><span class="title">{{title}}</span><span class="spec">{{_htmlLimit}}</span><span class="no-post-fee">{{noPostFee}}</span></h1><p><span class="price">{{_htmlPrice}}</span><span class="refprice">{{_htmlRelPrice}}</span><span class="discount {{discountClass}}">{{discount}}折</span></p><p class="brokeFee {{brokerageFeehide}}">推广费:<span class="price">{{brokerageFee}}</span></p><div class="soldnum"><div style="width:{{threshold}}%"></div><p>已售{{saleTotal}}件</p></div><a class="add-cart {{soldOutCartState}}"></a><div class="count">{{cartCount}}</div></div>{{_htmlFlag}}</div>',
html = [],
_index = 0;
if ($.isArray(items) && items.length) {
$.each(items, function(index, item) {
//console.log('list',item.brokerageFee,item.itemId);
if (index % 2 == 0) {
html.push('<li class="row">');
}
var goods = Goods.create(item);
(item.isTaxFree == 1) ? goods._htmlFlag = '<div class="goods-flag"><img class="flag-taxFree" src="'+URL.imgPath+'common/images/icon_label_dutyFree.png" /></div>' : '';
html.push(bainx.tpl(template, goods));
if (index % 2 == 1) {
html.push('</li>');
}
_index = index;
});
if (_index % 2 == 0) {
html.push('<div class="col col-50 goods goods-null fb fvc fac"></div></li>');
}
}
refreshCart();
return html;
}
function addCart(btn,_isapp) {
var view = btn.parents('.goods'),
gid = view.data('id'),
item = Cart.query(gid) || Cart.create(Goods.query(gid));
if (item) {
item.add({
btn: btn,
start: function(newCount) {
btn.addClass('adding');
view.find('.count').text(newCount);
var offset = $(".category-handle").offset(),
cloneViewOffset = $('.listimg', view).offset(),
imgUrl = view.find('.listimg').children('img').attr('src'), //获取当前点击图片链接
flyerWidth = $('.listimg img', view).width(),
//flyerHeight = $('.listimg img', view).height(),
flyer = $('<div class="listimg move-wrap"><img class="flyer-img" src="' + imgUrl + '"></div>'); //抛物体对象
flyer.find('.flyer-img').width(flyerWidth);
//flyer.find('.flyer-img').height(flyerHeight);
console.log(offset);
flyer.fly({
start: {
left: cloneViewOffset.left,//抛物体起点横坐标
top: cloneViewOffset.top //抛物体起点纵坐标
},
end: {
left: offset.left + 10,//抛物体终点横坐标
top: offset.top + 10, //抛物体终点纵坐标
width: 10,
height: 10
},
speed: 1.2,
vertex_Rtop: 50, //运动轨迹最高点top值,默认20
onEnd: function () {
//$("#tip").show().animate({width: '200px'},300).fadeOut(500);////成功加入购物车动画效果
this.destory(); //销毁抛物体
}
});
},
count: 1,
view: view,
},_isapp).always(function(res) {
item.count > 0 ? view.find('.count').text(item.count).css({visibility: "visible"}) : view.find('.count').css({visibility: "hidden"});
refreshCart();
}).fail(function(json) {
if (item.goods.flag.immediatelyBuy) {
URL.assign(URL.goodsDetail + '?gid=' + item.id);
} else {
alert(json && json.msg || '同步购物车失败或者您尚未登陆!');
if (!item.goods.itemNum) {
Cart.removeItem(item);
}
}
| init();
}) | });
}
}
function refreshCart() {
Common.getCartCount();
Common.isLogin && Cart.ready(function() {
// renderSmallCart();
// refreshListCount();
});
}
| conditional_block |
main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/gorilla/mux"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
)
var (
Token string
Color = 0x009688
//Icons = "https://kittyhacker101.tk/Static/KatBot"
Icons = "https://cdn.discordapp.com/emojis"
Emojis = make(map[string]string)
Channels = make(map[string]string)
//Devices = make(map[string]string)
DeviceMap = make(map[string]deviceStruct)
ActionMap = make(map[string]actionStruct)
)
const APITOKEN = "sometoken"
const DEVICEAPITOKEN = "xyz"
const LISTENIP = "0.0.0.0"
const LISTENPORT = "57000"
const INDEXHTML = "index.html"
type wifiNetwork struct {
RSSI int `json:"rssi"`
SSID string `json:"ssid"`
BSSID string `json:"bssid"`
Channel int `json:"channel"`
Secure int `json:"secure"`
}
type shortStatusResult struct {
Timestamp string `json:"Timestamp"`
Hostname string `json:"Hostname"`
Device string `json:"Device"`
State string `json:"State"`
Maintenancemode string `json:"MaintenanceMode"`
}
type deviceStruct struct {
Name string `json:"Name"`
Hostname string `json:"Hostname"`
Port int `json:"Port"`
Channel string `json:"Channel"`
}
type actionStruct struct {
Name string `json:"Name"`
States []string
}
//var MyDevices []deviceStruct
func init() {
Emojis["laseron"] = "<:laseron:729726642758615151>"
Emojis["laseroff"] = "<:laseroff:730213748102529064>"
Emojis["maintenanceon"] = "<:maintenanceon:729732695009263616>"
Emojis["maintenanceoff"] = "<:maintenanceoff:729828147414958202>"
Emojis["backlighton"] = "<:backlighton:729820542336761856>"
Emojis["backlightoff"] = "<:backlightoff:729820688516644894>"
Emojis["eehtick"] = "<:eehtick:729828147414958202>"
Emojis["overrideon"] = "<:overrideon:730075631198404649>"
Emojis["overrideoff"] = "<:overrideoff:730448103517454376>"
Emojis["3don"] = "<:3don:730213748102529064>"
Emojis["3doff"] = "<3doff:730213748102529064>"
Emojis["userlogin"] = "<:userlogin:730444250839515297>"
Emojis["userlogout"] = "<:userlogout:730444251695153285>"
Channels["general-junk"] = "729631967905054764"
Channels["laser"] = "730836803556343808"
// Devices["laser"] = "192.168.10.135"
/* MyDevices = []deviceStruct{
deviceStruct{
Name: "laser",
IP: "192.168.10.135",
Channel: "729632142358872138", // laser
},
deviceStruct{
Name: "laser2",
IP: "192.168.10.136",
Channel: "729632142358872138", // laser
},
} */
DeviceMap["laser"] = deviceStruct{
Name: "laser",
Hostname: "192.168.10.135",
Port: 80,
Channel: "730836803556343808", // laser
}
DeviceMap["laser2"] = deviceStruct{
Name: "laser",
Hostname: "192.168.10.136",
Port: 80,
Channel: "730836803556343808", // laser
}
ActionMap["maintenance"] = actionStruct{
Name: "maintenance",
States: []string{"on", "off"},
}
ActionMap["backlight"] = actionStruct{
Name: "backlight",
States: []string{"on", "off"},
}
ActionMap["override"] = actionStruct{
Name: "override",
States: []string{"on", "off"},
}
ActionMap["user"] = actionStruct{
Name: "user",
States: []string{"login", "logout"},
}
/*
var steve string = "laser"
fmt.Println("Name:", DeviceMap[steve].Name, " Host:", DeviceMap[steve].Hostname, ":", DeviceMap[steve].Port, " Channel:", DeviceMap[steve].Channel)
steve = "laser2"
fmt.Println("Name:", DeviceMap[steve].Name, " Host:", DeviceMap[steve].Hostname, ":", DeviceMap[steve].Port, " Channel:", DeviceMap[steve].Channel)
*/
for k, v := range DeviceMap {
fmt.Printf("%s Host:%s:%d Channel:%s\n", k, v.Hostname, v.Port, v.Channel)
}
//os.Exit(0)
flag.StringVar(&Token, "t", "", "Bot Token")
flag.Parse()
}
func main() {
// Create a new Discord session using the provided bot token.
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("error creating Discord session,", err)
return
}
// Register the messageCreate func as a callback for MessageCreate events.
dg.AddHandler(messageCreate)
// Open a websocket connection to Discord and begin listening.
err = dg.Open()
if err != nil {
fmt.Println("error opening connection,", err)
return
}
// Wait here until CTRL-C or other term signal is received.
fmt.Println("Bot is now running. Press CTRL-C to exit.")
startWeb(LISTENIP, LISTENPORT, false)
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Cleanly close down the Discord session.
dg.Close()
}
// This function will be called (due to AddHandler above) every time a new
// message is created on any channel that the authenticated bot has access to.
func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Ignore all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID {
return
}
if m.Content == "!laser backlight on" {
s.ChannelMessageSend(m.ChannelID, backlight("on"))
}
if m.Content == "!laser backlight off" {
s.ChannelMessageSend(m.ChannelID, backlight("off"))
}
if m.Content == "!laser fullstatus" {
s.ChannelMessageSend(m.ChannelID, fullStatus())
}
if m.Content == "!laser help" {
var printText string
printText += "```\n"
printText += "Available Commands:\n"
printText += "-------------------------------\n"
printText += " laser backlight [on|off]\n"
printText += " laser fullstatus\n"
printText += " laser help\n"
printText += " laser maintenance [on|off]\n"
printText += " laser scanwifi\n"
printText += " laser status\n"
printText += "```\n"
s.ChannelMessageSend(m.ChannelID, printText)
}
if m.Content == "!laser maintenance off" {
s.ChannelMessageSend(m.ChannelID, maintenancemode("disable"))
}
if m.Content == "!laser maintenance on" {
s.ChannelMessageSend(m.ChannelID, maintenancemode("enable"))
}
if m.Content == "!laser scanwifi" {
s.ChannelMessageSend(m.ChannelID, scanWifi())
}
if m.Content == "!laser status" {
s.ChannelMessageSend(m.ChannelID, shortStatus())
}
if m.Content == "!cat" {
tr := &http.Transport{DisableKeepAlives: true}
client := &http.Client{Transport: tr}
resp, err := client.Get("https://images-na.ssl-images-amazon.com/images/I/71FcdrSeKlL._AC_SL1001_.jpg")
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
s.ChannelMessageSend(m.ChannelID, "Unable to fetch cat!")
fmt.Println("[Warning] : Cat API Error")
} else {
s.ChannelMessageSendEmbed(m.ChannelID, &discordgo.MessageEmbed{
Author: &discordgo.MessageEmbedAuthor{Name: "Cat Picture", IconURL: Icons + "/729726642758615151.png"},
Color: Color,
Image: &discordgo.MessageEmbedImage{
URL: resp.Request.URL.String(),
},
Footer: &discordgo.MessageEmbedFooter{Text: "Cat pictures provided by TheCatApi", IconURL: Icons + "/729726642758615151.png"},
})
fmt.Println("[Info] : Cat sent successfully to " + m.Author.Username + "(" + m.Author.ID + ") in " + m.ChannelID)
}
}
}
func shortStatus() string {
fmt.Println("starting shortStatus")
url := fmt.Sprintf("http://192.168.10.135/status")
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15 | if err != nil {
log.Fatal(err)
}
var mystatus shortStatusResult
err = json.Unmarshal([]byte(bodyBytes), &mystatus)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = ""
if mystatus.State == "on" {
returnText = Emojis["laseron"] + " **" + strings.ToUpper(mystatus.Device) + " IN USE**"
} else {
returnText = "**" + strings.ToUpper(mystatus.Device) + " IS FREE**"
}
if mystatus.Maintenancemode == "enabled" {
returnText = Emojis["maintenance"] + " **" + strings.ToUpper(mystatus.Device) + " IN MAINTENANCE MODE**"
}
return returnText
}
return "No status available"
}
func fullStatus() string {
fmt.Println("starting fullStatus")
url := fmt.Sprintf("http://192.168.10.135/fullstatus?api=%s", DEVICEAPITOKEN)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mystatus shortStatusResult
err = json.Unmarshal([]byte(bodyBytes), &mystatus)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = "```\n"
/*
if mystatus.State == "on" {
returnText = Emojis["laseron"] + " **" + strings.ToUpper(mystatus.Device) + " IN USE**"
} else {
returnText = "**" + strings.ToUpper(mystatus.Device) + " IS FREE**"
}
if mystatus.Maintenancemode == "enabled" {
returnText = Emojis["maintenance"] + " **" + strings.ToUpper(mystatus.Device) + " IN MAINTENANCE MODE**"
}
*/
returnText += string(bodyBytes)
returnText += "```\n"
return returnText
}
return "No status available"
}
func scanWifi() string {
fmt.Println("starting scanWifi")
url := fmt.Sprintf("http://192.168.10.135/scanwifi?api=%s", DEVICEAPITOKEN)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mynetworks []wifiNetwork
err = json.Unmarshal([]byte(bodyBytes), &mynetworks)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = ""
fmt.Println("Number of networks found: ", len(mynetworks))
if len(mynetworks) > 0 {
returnText = "```\n"
}
for i := 0; i < len(mynetworks); i++ {
fmt.Println(mynetworks[i].SSID)
returnText += mynetworks[i].SSID + "\n"
}
if len(mynetworks) > 0 {
returnText += "```"
}
fmt.Println(returnText)
return returnText
}
return "No networks available"
}
func backlightold(mystate string) string {
fmt.Println("starting backlight")
url := fmt.Sprintf("http://192.168.10.135/backlight?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
if err != nil {
log.Fatal(err)
}
if mystate == "on" {
return Emojis["backlighton"] + " **LASER BACKLIGHT ON**"
}
if mystate == "off" {
return Emojis["backlightoff"] + " **LASER BACKLIGHT OFF**"
}
} else {
return "ERROR"
}
return ""
}
//=========
func backlight(mystate string) string {
fmt.Println("starting backlight")
url := fmt.Sprintf("http://192.168.10.135/backlight?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
client.Do(req)
return ""
}
//=========
func maintenancemode(mystate string) string {
fmt.Println("starting maintenancemode")
url := fmt.Sprintf("http://192.168.10.135/maintenance?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
//resp, err := client.Do(req)
client.Do(req)
/*if resp.StatusCode == http.StatusOK {
if err != nil {
log.Fatal(err)
}
if mystate == "enable" {
return Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
}
if mystate == "disable" {
return Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
}
} else {
return "ERROR"
}*/
return ""
}
func printFile(filename string, webprint http.ResponseWriter) {
fmt.Println("Starting printFile")
texttoprint, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("ERROR: cannot open ", filename)
if webprint != nil {
http.Error(webprint, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
}
if webprint != nil {
fmt.Fprintf(webprint, "%s", string(texttoprint))
} else {
fmt.Print(string(texttoprint))
}
}
func startWeb(listenip string, listenport string, usetls bool) {
r := mux.NewRouter()
r.HandleFunc("/", handlerIndex)
r.HandleFunc("/laser", handlerLaser)
//laserRouter := r.PathPrefix("/laser").Subrouter()
//laserRouter.HandleFunc("/{laser}", handlerLaser)
//laserRouter.Use(loggingMiddleware)
r.HandleFunc("/api", handlerApi)
log.Printf("Starting HTTP Webserver http://%s:%s\n", listenip, listenport)
srv := &http.Server{
Handler: r,
Addr: LISTENIP + ":" + LISTENPORT,
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
err := srv.ListenAndServe()
fmt.Println("cannot start http server:", err)
}
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println("MIDDLEWARE: ", r.RemoteAddr, " ", r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
func handlerIndex(w http.ResponseWriter, r *http.Request) {
log.Println("Starting handlerIndex")
printFile(INDEXHTML, w)
}
func handlerLaser(webprint http.ResponseWriter, r *http.Request) {
fmt.Println("starting handlerLaser2")
queries := r.URL.Query()
fmt.Printf("queries = %q\n", queries)
if APITOKEN != queries.Get("api") {
fmt.Fprintf(webprint, "%s", "ERROR: Invalid API")
return
}
var returnText = ""
switch strings.ToLower(queries.Get("action")) {
case "off":
returnText = Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
case "on":
returnText = Emojis["laseron"] + " **" + queries.Get("user") + " IS FIRING LASER, PEW PEW**"
case "override":
returnText = Emojis["eehboss"] + " **LASER BOSS MODE ENABLED**"
case "maintenanceon":
returnText = Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
default:
return
}
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("Unable to create discord session!")
return
}
fmt.Fprintf(webprint, "%s", returnText)
dg.ChannelMessageSend("729631967905054764", returnText)
}
//====
func checkIfDeviceExists(device string) bool {
// get all the device names and store in an array
i := 0
devicearray := make([]string, len(DeviceMap))
for k := range DeviceMap {
devicearray[i] = k
i++
}
for _, a := range devicearray {
if strings.ToLower(a) == strings.ToLower(device) {
return true
}
}
return false
}
// check if valid action and state
func checkActionAndState(action string, state string) bool {
// get all the device names and store in an array
i := 0
actionarray := make([]string, len(ActionMap))
for k := range ActionMap {
actionarray[i] = k
i++
}
//fmt.Printf("array=%v\n", actionarray)
for _, a := range actionarray {
//fmt.Println("a=", a)
if strings.ToLower(a) == strings.ToLower(action) {
//fmt.Println("Valid Action:", action)
//fmt.Println("strings.ToLower(a)=", strings.ToLower(a), " strings.ToLower(action)=", strings.ToLower(action))
for _, s := range ActionMap[a].States {
//fmt.Println("s=", s)
//fmt.Printf("v=%v\n", s)
if strings.ToLower(s) == strings.ToLower(state) {
// valid state for this action found
return true
}
}
}
}
// if reached here, state or action is bad and thus return false
return false
}
func handlerApi(webprint http.ResponseWriter, r *http.Request) {
fmt.Println("starting handlerLaser2")
queries := r.URL.Query()
fmt.Printf("queries = %q\n", queries)
// check if api token is valid
if APITOKEN != queries.Get("token") {
fmt.Println("ERROR: Invalid API Token", queries.Get("token"))
fmt.Fprintf(webprint, "%s", "ERROR: Invalid API Token")
return
}
if !checkIfDeviceExists(queries.Get("device")) {
fmt.Println("ERROR: Invalid device", queries.Get("device"))
fmt.Fprintf(webprint, "%s", "ERROR: Invalid Device")
return
}
if !checkActionAndState(queries.Get("action"), queries.Get("state")) {
fmt.Println("ERROR: Bad action or state", queries.Get("action"), queries.Get("state"))
fmt.Fprintf(webprint, "%s", "ERROR: Bad action or state")
return
}
fmt.Printf("Device %s is valid\nAction %s is valid\nState %s is valid\n", queries.Get("device"), queries.Get("action"), queries.Get("state"))
fmt.Fprintf(webprint, "Device %s is valid\nAction %s is valid\nState %s is valid", queries.Get("device"), queries.Get("action"), queries.Get("state"))
var returnText = ""
/* switch strings.ToLower(queries.Get("action")) {
case "off":
returnText = Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
case "on":
returnText = Emojis["laseron"] + " **" + queries.Get("user") + " IS FIRING LASER, PEW PEW**"
case "override":
returnText = Emojis["eehboss"] + " **LASER BOSS MODE ENABLED**"
case "maintenanceon":
returnText = Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
default:
return
}
*/
var lookup = queries.Get("action") + queries.Get("state")
fmt.Println("lookup=", lookup)
returnText = Emojis[queries.Get("action")+queries.Get("state")] + " **" + strings.ToUpper(queries.Get("device")) + " " + strings.ToUpper(queries.Get("action")) + ":" + strings.ToUpper(queries.Get("state")) + "**"
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("Unable to create discord session!")
return
}
//fmt.Fprintf(webprint, "%s, %s", DeviceMap[queries.Get("device")].Channel, returnText)
dg.ChannelMessageSend(DeviceMap[queries.Get("device")].Channel, returnText)
fmt.Printf("qdevice=%s dchannel=%s\n", queries.Get("device"), DeviceMap[queries.Get("device")].Channel)
//dg.ChannelMessageSend("730836803556343808", returnText)
fmt.Println("returnText = ", returnText)
//dg.ChannelMessageSend("729631967905054764", returnText)
} | resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body) | random_line_split |
main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/gorilla/mux"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
)
var (
Token string
Color = 0x009688
//Icons = "https://kittyhacker101.tk/Static/KatBot"
Icons = "https://cdn.discordapp.com/emojis"
Emojis = make(map[string]string)
Channels = make(map[string]string)
//Devices = make(map[string]string)
DeviceMap = make(map[string]deviceStruct)
ActionMap = make(map[string]actionStruct)
)
const APITOKEN = "sometoken"
const DEVICEAPITOKEN = "xyz"
const LISTENIP = "0.0.0.0"
const LISTENPORT = "57000"
const INDEXHTML = "index.html"
type wifiNetwork struct {
RSSI int `json:"rssi"`
SSID string `json:"ssid"`
BSSID string `json:"bssid"`
Channel int `json:"channel"`
Secure int `json:"secure"`
}
type shortStatusResult struct {
Timestamp string `json:"Timestamp"`
Hostname string `json:"Hostname"`
Device string `json:"Device"`
State string `json:"State"`
Maintenancemode string `json:"MaintenanceMode"`
}
type deviceStruct struct {
Name string `json:"Name"`
Hostname string `json:"Hostname"`
Port int `json:"Port"`
Channel string `json:"Channel"`
}
type actionStruct struct {
Name string `json:"Name"`
States []string
}
//var MyDevices []deviceStruct
func init() {
Emojis["laseron"] = "<:laseron:729726642758615151>"
Emojis["laseroff"] = "<:laseroff:730213748102529064>"
Emojis["maintenanceon"] = "<:maintenanceon:729732695009263616>"
Emojis["maintenanceoff"] = "<:maintenanceoff:729828147414958202>"
Emojis["backlighton"] = "<:backlighton:729820542336761856>"
Emojis["backlightoff"] = "<:backlightoff:729820688516644894>"
Emojis["eehtick"] = "<:eehtick:729828147414958202>"
Emojis["overrideon"] = "<:overrideon:730075631198404649>"
Emojis["overrideoff"] = "<:overrideoff:730448103517454376>"
Emojis["3don"] = "<:3don:730213748102529064>"
Emojis["3doff"] = "<3doff:730213748102529064>"
Emojis["userlogin"] = "<:userlogin:730444250839515297>"
Emojis["userlogout"] = "<:userlogout:730444251695153285>"
Channels["general-junk"] = "729631967905054764"
Channels["laser"] = "730836803556343808"
// Devices["laser"] = "192.168.10.135"
/* MyDevices = []deviceStruct{
deviceStruct{
Name: "laser",
IP: "192.168.10.135",
Channel: "729632142358872138", // laser
},
deviceStruct{
Name: "laser2",
IP: "192.168.10.136",
Channel: "729632142358872138", // laser
},
} */
DeviceMap["laser"] = deviceStruct{
Name: "laser",
Hostname: "192.168.10.135",
Port: 80,
Channel: "730836803556343808", // laser
}
DeviceMap["laser2"] = deviceStruct{
Name: "laser",
Hostname: "192.168.10.136",
Port: 80,
Channel: "730836803556343808", // laser
}
ActionMap["maintenance"] = actionStruct{
Name: "maintenance",
States: []string{"on", "off"},
}
ActionMap["backlight"] = actionStruct{
Name: "backlight",
States: []string{"on", "off"},
}
ActionMap["override"] = actionStruct{
Name: "override",
States: []string{"on", "off"},
}
ActionMap["user"] = actionStruct{
Name: "user",
States: []string{"login", "logout"},
}
/*
var steve string = "laser"
fmt.Println("Name:", DeviceMap[steve].Name, " Host:", DeviceMap[steve].Hostname, ":", DeviceMap[steve].Port, " Channel:", DeviceMap[steve].Channel)
steve = "laser2"
fmt.Println("Name:", DeviceMap[steve].Name, " Host:", DeviceMap[steve].Hostname, ":", DeviceMap[steve].Port, " Channel:", DeviceMap[steve].Channel)
*/
for k, v := range DeviceMap {
fmt.Printf("%s Host:%s:%d Channel:%s\n", k, v.Hostname, v.Port, v.Channel)
}
//os.Exit(0)
flag.StringVar(&Token, "t", "", "Bot Token")
flag.Parse()
}
func main() {
// Create a new Discord session using the provided bot token.
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("error creating Discord session,", err)
return
}
// Register the messageCreate func as a callback for MessageCreate events.
dg.AddHandler(messageCreate)
// Open a websocket connection to Discord and begin listening.
err = dg.Open()
if err != nil {
fmt.Println("error opening connection,", err)
return
}
// Wait here until CTRL-C or other term signal is received.
fmt.Println("Bot is now running. Press CTRL-C to exit.")
startWeb(LISTENIP, LISTENPORT, false)
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Cleanly close down the Discord session.
dg.Close()
}
// This function will be called (due to AddHandler above) every time a new
// message is created on any channel that the authenticated bot has access to.
func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Ignore all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID {
return
}
if m.Content == "!laser backlight on" {
s.ChannelMessageSend(m.ChannelID, backlight("on"))
}
if m.Content == "!laser backlight off" {
s.ChannelMessageSend(m.ChannelID, backlight("off"))
}
if m.Content == "!laser fullstatus" {
s.ChannelMessageSend(m.ChannelID, fullStatus())
}
if m.Content == "!laser help" {
var printText string
printText += "```\n"
printText += "Available Commands:\n"
printText += "-------------------------------\n"
printText += " laser backlight [on|off]\n"
printText += " laser fullstatus\n"
printText += " laser help\n"
printText += " laser maintenance [on|off]\n"
printText += " laser scanwifi\n"
printText += " laser status\n"
printText += "```\n"
s.ChannelMessageSend(m.ChannelID, printText)
}
if m.Content == "!laser maintenance off" {
s.ChannelMessageSend(m.ChannelID, maintenancemode("disable"))
}
if m.Content == "!laser maintenance on" {
s.ChannelMessageSend(m.ChannelID, maintenancemode("enable"))
}
if m.Content == "!laser scanwifi" {
s.ChannelMessageSend(m.ChannelID, scanWifi())
}
if m.Content == "!laser status" {
s.ChannelMessageSend(m.ChannelID, shortStatus())
}
if m.Content == "!cat" {
tr := &http.Transport{DisableKeepAlives: true}
client := &http.Client{Transport: tr}
resp, err := client.Get("https://images-na.ssl-images-amazon.com/images/I/71FcdrSeKlL._AC_SL1001_.jpg")
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
s.ChannelMessageSend(m.ChannelID, "Unable to fetch cat!")
fmt.Println("[Warning] : Cat API Error")
} else {
s.ChannelMessageSendEmbed(m.ChannelID, &discordgo.MessageEmbed{
Author: &discordgo.MessageEmbedAuthor{Name: "Cat Picture", IconURL: Icons + "/729726642758615151.png"},
Color: Color,
Image: &discordgo.MessageEmbedImage{
URL: resp.Request.URL.String(),
},
Footer: &discordgo.MessageEmbedFooter{Text: "Cat pictures provided by TheCatApi", IconURL: Icons + "/729726642758615151.png"},
})
fmt.Println("[Info] : Cat sent successfully to " + m.Author.Username + "(" + m.Author.ID + ") in " + m.ChannelID)
}
}
}
func shortStatus() string {
fmt.Println("starting shortStatus")
url := fmt.Sprintf("http://192.168.10.135/status")
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mystatus shortStatusResult
err = json.Unmarshal([]byte(bodyBytes), &mystatus)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = ""
if mystatus.State == "on" {
returnText = Emojis["laseron"] + " **" + strings.ToUpper(mystatus.Device) + " IN USE**"
} else {
returnText = "**" + strings.ToUpper(mystatus.Device) + " IS FREE**"
}
if mystatus.Maintenancemode == "enabled" {
returnText = Emojis["maintenance"] + " **" + strings.ToUpper(mystatus.Device) + " IN MAINTENANCE MODE**"
}
return returnText
}
return "No status available"
}
func fullStatus() string {
fmt.Println("starting fullStatus")
url := fmt.Sprintf("http://192.168.10.135/fullstatus?api=%s", DEVICEAPITOKEN)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mystatus shortStatusResult
err = json.Unmarshal([]byte(bodyBytes), &mystatus)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = "```\n"
/*
if mystatus.State == "on" {
returnText = Emojis["laseron"] + " **" + strings.ToUpper(mystatus.Device) + " IN USE**"
} else {
returnText = "**" + strings.ToUpper(mystatus.Device) + " IS FREE**"
}
if mystatus.Maintenancemode == "enabled" {
returnText = Emojis["maintenance"] + " **" + strings.ToUpper(mystatus.Device) + " IN MAINTENANCE MODE**"
}
*/
returnText += string(bodyBytes)
returnText += "```\n"
return returnText
}
return "No status available"
}
func scanWifi() string {
fmt.Println("starting scanWifi")
url := fmt.Sprintf("http://192.168.10.135/scanwifi?api=%s", DEVICEAPITOKEN)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mynetworks []wifiNetwork
err = json.Unmarshal([]byte(bodyBytes), &mynetworks)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = ""
fmt.Println("Number of networks found: ", len(mynetworks))
if len(mynetworks) > 0 {
returnText = "```\n"
}
for i := 0; i < len(mynetworks); i++ {
fmt.Println(mynetworks[i].SSID)
returnText += mynetworks[i].SSID + "\n"
}
if len(mynetworks) > 0 {
returnText += "```"
}
fmt.Println(returnText)
return returnText
}
return "No networks available"
}
func backlightold(mystate string) string {
fmt.Println("starting backlight")
url := fmt.Sprintf("http://192.168.10.135/backlight?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
if err != nil {
log.Fatal(err)
}
if mystate == "on" {
return Emojis["backlighton"] + " **LASER BACKLIGHT ON**"
}
if mystate == "off" {
return Emojis["backlightoff"] + " **LASER BACKLIGHT OFF**"
}
} else {
return "ERROR"
}
return ""
}
//=========
func backlight(mystate string) string {
fmt.Println("starting backlight")
url := fmt.Sprintf("http://192.168.10.135/backlight?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
client.Do(req)
return ""
}
//=========
func maintenancemode(mystate string) string {
fmt.Println("starting maintenancemode")
url := fmt.Sprintf("http://192.168.10.135/maintenance?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
//resp, err := client.Do(req)
client.Do(req)
/*if resp.StatusCode == http.StatusOK {
if err != nil {
log.Fatal(err)
}
if mystate == "enable" {
return Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
}
if mystate == "disable" {
return Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
}
} else {
return "ERROR"
}*/
return ""
}
func printFile(filename string, webprint http.ResponseWriter) {
fmt.Println("Starting printFile")
texttoprint, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("ERROR: cannot open ", filename)
if webprint != nil {
http.Error(webprint, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
}
if webprint != nil {
fmt.Fprintf(webprint, "%s", string(texttoprint))
} else {
fmt.Print(string(texttoprint))
}
}
func startWeb(listenip string, listenport string, usetls bool) {
r := mux.NewRouter()
r.HandleFunc("/", handlerIndex)
r.HandleFunc("/laser", handlerLaser)
//laserRouter := r.PathPrefix("/laser").Subrouter()
//laserRouter.HandleFunc("/{laser}", handlerLaser)
//laserRouter.Use(loggingMiddleware)
r.HandleFunc("/api", handlerApi)
log.Printf("Starting HTTP Webserver http://%s:%s\n", listenip, listenport)
srv := &http.Server{
Handler: r,
Addr: LISTENIP + ":" + LISTENPORT,
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
err := srv.ListenAndServe()
fmt.Println("cannot start http server:", err)
}
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println("MIDDLEWARE: ", r.RemoteAddr, " ", r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
func handlerIndex(w http.ResponseWriter, r *http.Request) {
log.Println("Starting handlerIndex")
printFile(INDEXHTML, w)
}
func handlerLaser(webprint http.ResponseWriter, r *http.Request) |
//====
func checkIfDeviceExists(device string) bool {
// get all the device names and store in an array
i := 0
devicearray := make([]string, len(DeviceMap))
for k := range DeviceMap {
devicearray[i] = k
i++
}
for _, a := range devicearray {
if strings.ToLower(a) == strings.ToLower(device) {
return true
}
}
return false
}
// check if valid action and state
func checkActionAndState(action string, state string) bool {
// get all the device names and store in an array
i := 0
actionarray := make([]string, len(ActionMap))
for k := range ActionMap {
actionarray[i] = k
i++
}
//fmt.Printf("array=%v\n", actionarray)
for _, a := range actionarray {
//fmt.Println("a=", a)
if strings.ToLower(a) == strings.ToLower(action) {
//fmt.Println("Valid Action:", action)
//fmt.Println("strings.ToLower(a)=", strings.ToLower(a), " strings.ToLower(action)=", strings.ToLower(action))
for _, s := range ActionMap[a].States {
//fmt.Println("s=", s)
//fmt.Printf("v=%v\n", s)
if strings.ToLower(s) == strings.ToLower(state) {
// valid state for this action found
return true
}
}
}
}
// if reached here, state or action is bad and thus return false
return false
}
func handlerApi(webprint http.ResponseWriter, r *http.Request) {
fmt.Println("starting handlerLaser2")
queries := r.URL.Query()
fmt.Printf("queries = %q\n", queries)
// check if api token is valid
if APITOKEN != queries.Get("token") {
fmt.Println("ERROR: Invalid API Token", queries.Get("token"))
fmt.Fprintf(webprint, "%s", "ERROR: Invalid API Token")
return
}
if !checkIfDeviceExists(queries.Get("device")) {
fmt.Println("ERROR: Invalid device", queries.Get("device"))
fmt.Fprintf(webprint, "%s", "ERROR: Invalid Device")
return
}
if !checkActionAndState(queries.Get("action"), queries.Get("state")) {
fmt.Println("ERROR: Bad action or state", queries.Get("action"), queries.Get("state"))
fmt.Fprintf(webprint, "%s", "ERROR: Bad action or state")
return
}
fmt.Printf("Device %s is valid\nAction %s is valid\nState %s is valid\n", queries.Get("device"), queries.Get("action"), queries.Get("state"))
fmt.Fprintf(webprint, "Device %s is valid\nAction %s is valid\nState %s is valid", queries.Get("device"), queries.Get("action"), queries.Get("state"))
var returnText = ""
/* switch strings.ToLower(queries.Get("action")) {
case "off":
returnText = Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
case "on":
returnText = Emojis["laseron"] + " **" + queries.Get("user") + " IS FIRING LASER, PEW PEW**"
case "override":
returnText = Emojis["eehboss"] + " **LASER BOSS MODE ENABLED**"
case "maintenanceon":
returnText = Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
default:
return
}
*/
var lookup = queries.Get("action") + queries.Get("state")
fmt.Println("lookup=", lookup)
returnText = Emojis[queries.Get("action")+queries.Get("state")] + " **" + strings.ToUpper(queries.Get("device")) + " " + strings.ToUpper(queries.Get("action")) + ":" + strings.ToUpper(queries.Get("state")) + "**"
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("Unable to create discord session!")
return
}
//fmt.Fprintf(webprint, "%s, %s", DeviceMap[queries.Get("device")].Channel, returnText)
dg.ChannelMessageSend(DeviceMap[queries.Get("device")].Channel, returnText)
fmt.Printf("qdevice=%s dchannel=%s\n", queries.Get("device"), DeviceMap[queries.Get("device")].Channel)
//dg.ChannelMessageSend("730836803556343808", returnText)
fmt.Println("returnText = ", returnText)
//dg.ChannelMessageSend("729631967905054764", returnText)
}
| {
fmt.Println("starting handlerLaser2")
queries := r.URL.Query()
fmt.Printf("queries = %q\n", queries)
if APITOKEN != queries.Get("api") {
fmt.Fprintf(webprint, "%s", "ERROR: Invalid API")
return
}
var returnText = ""
switch strings.ToLower(queries.Get("action")) {
case "off":
returnText = Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
case "on":
returnText = Emojis["laseron"] + " **" + queries.Get("user") + " IS FIRING LASER, PEW PEW**"
case "override":
returnText = Emojis["eehboss"] + " **LASER BOSS MODE ENABLED**"
case "maintenanceon":
returnText = Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
default:
return
}
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("Unable to create discord session!")
return
}
fmt.Fprintf(webprint, "%s", returnText)
dg.ChannelMessageSend("729631967905054764", returnText)
} | identifier_body |
main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/gorilla/mux"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
)
var (
Token string
Color = 0x009688
//Icons = "https://kittyhacker101.tk/Static/KatBot"
Icons = "https://cdn.discordapp.com/emojis"
Emojis = make(map[string]string)
Channels = make(map[string]string)
//Devices = make(map[string]string)
DeviceMap = make(map[string]deviceStruct)
ActionMap = make(map[string]actionStruct)
)
const APITOKEN = "sometoken"
const DEVICEAPITOKEN = "xyz"
const LISTENIP = "0.0.0.0"
const LISTENPORT = "57000"
const INDEXHTML = "index.html"
type wifiNetwork struct {
RSSI int `json:"rssi"`
SSID string `json:"ssid"`
BSSID string `json:"bssid"`
Channel int `json:"channel"`
Secure int `json:"secure"`
}
type shortStatusResult struct {
Timestamp string `json:"Timestamp"`
Hostname string `json:"Hostname"`
Device string `json:"Device"`
State string `json:"State"`
Maintenancemode string `json:"MaintenanceMode"`
}
type deviceStruct struct {
Name string `json:"Name"`
Hostname string `json:"Hostname"`
Port int `json:"Port"`
Channel string `json:"Channel"`
}
type actionStruct struct {
Name string `json:"Name"`
States []string
}
//var MyDevices []deviceStruct
func init() {
Emojis["laseron"] = "<:laseron:729726642758615151>"
Emojis["laseroff"] = "<:laseroff:730213748102529064>"
Emojis["maintenanceon"] = "<:maintenanceon:729732695009263616>"
Emojis["maintenanceoff"] = "<:maintenanceoff:729828147414958202>"
Emojis["backlighton"] = "<:backlighton:729820542336761856>"
Emojis["backlightoff"] = "<:backlightoff:729820688516644894>"
Emojis["eehtick"] = "<:eehtick:729828147414958202>"
Emojis["overrideon"] = "<:overrideon:730075631198404649>"
Emojis["overrideoff"] = "<:overrideoff:730448103517454376>"
Emojis["3don"] = "<:3don:730213748102529064>"
Emojis["3doff"] = "<3doff:730213748102529064>"
Emojis["userlogin"] = "<:userlogin:730444250839515297>"
Emojis["userlogout"] = "<:userlogout:730444251695153285>"
Channels["general-junk"] = "729631967905054764"
Channels["laser"] = "730836803556343808"
// Devices["laser"] = "192.168.10.135"
/* MyDevices = []deviceStruct{
deviceStruct{
Name: "laser",
IP: "192.168.10.135",
Channel: "729632142358872138", // laser
},
deviceStruct{
Name: "laser2",
IP: "192.168.10.136",
Channel: "729632142358872138", // laser
},
} */
DeviceMap["laser"] = deviceStruct{
Name: "laser",
Hostname: "192.168.10.135",
Port: 80,
Channel: "730836803556343808", // laser
}
DeviceMap["laser2"] = deviceStruct{
Name: "laser",
Hostname: "192.168.10.136",
Port: 80,
Channel: "730836803556343808", // laser
}
ActionMap["maintenance"] = actionStruct{
Name: "maintenance",
States: []string{"on", "off"},
}
ActionMap["backlight"] = actionStruct{
Name: "backlight",
States: []string{"on", "off"},
}
ActionMap["override"] = actionStruct{
Name: "override",
States: []string{"on", "off"},
}
ActionMap["user"] = actionStruct{
Name: "user",
States: []string{"login", "logout"},
}
/*
var steve string = "laser"
fmt.Println("Name:", DeviceMap[steve].Name, " Host:", DeviceMap[steve].Hostname, ":", DeviceMap[steve].Port, " Channel:", DeviceMap[steve].Channel)
steve = "laser2"
fmt.Println("Name:", DeviceMap[steve].Name, " Host:", DeviceMap[steve].Hostname, ":", DeviceMap[steve].Port, " Channel:", DeviceMap[steve].Channel)
*/
for k, v := range DeviceMap {
fmt.Printf("%s Host:%s:%d Channel:%s\n", k, v.Hostname, v.Port, v.Channel)
}
//os.Exit(0)
flag.StringVar(&Token, "t", "", "Bot Token")
flag.Parse()
}
func main() {
// Create a new Discord session using the provided bot token.
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("error creating Discord session,", err)
return
}
// Register the messageCreate func as a callback for MessageCreate events.
dg.AddHandler(messageCreate)
// Open a websocket connection to Discord and begin listening.
err = dg.Open()
if err != nil {
fmt.Println("error opening connection,", err)
return
}
// Wait here until CTRL-C or other term signal is received.
fmt.Println("Bot is now running. Press CTRL-C to exit.")
startWeb(LISTENIP, LISTENPORT, false)
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Cleanly close down the Discord session.
dg.Close()
}
// This function will be called (due to AddHandler above) every time a new
// message is created on any channel that the authenticated bot has access to.
func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Ignore all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID |
if m.Content == "!laser backlight on" {
s.ChannelMessageSend(m.ChannelID, backlight("on"))
}
if m.Content == "!laser backlight off" {
s.ChannelMessageSend(m.ChannelID, backlight("off"))
}
if m.Content == "!laser fullstatus" {
s.ChannelMessageSend(m.ChannelID, fullStatus())
}
if m.Content == "!laser help" {
var printText string
printText += "```\n"
printText += "Available Commands:\n"
printText += "-------------------------------\n"
printText += " laser backlight [on|off]\n"
printText += " laser fullstatus\n"
printText += " laser help\n"
printText += " laser maintenance [on|off]\n"
printText += " laser scanwifi\n"
printText += " laser status\n"
printText += "```\n"
s.ChannelMessageSend(m.ChannelID, printText)
}
if m.Content == "!laser maintenance off" {
s.ChannelMessageSend(m.ChannelID, maintenancemode("disable"))
}
if m.Content == "!laser maintenance on" {
s.ChannelMessageSend(m.ChannelID, maintenancemode("enable"))
}
if m.Content == "!laser scanwifi" {
s.ChannelMessageSend(m.ChannelID, scanWifi())
}
if m.Content == "!laser status" {
s.ChannelMessageSend(m.ChannelID, shortStatus())
}
if m.Content == "!cat" {
tr := &http.Transport{DisableKeepAlives: true}
client := &http.Client{Transport: tr}
resp, err := client.Get("https://images-na.ssl-images-amazon.com/images/I/71FcdrSeKlL._AC_SL1001_.jpg")
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
s.ChannelMessageSend(m.ChannelID, "Unable to fetch cat!")
fmt.Println("[Warning] : Cat API Error")
} else {
s.ChannelMessageSendEmbed(m.ChannelID, &discordgo.MessageEmbed{
Author: &discordgo.MessageEmbedAuthor{Name: "Cat Picture", IconURL: Icons + "/729726642758615151.png"},
Color: Color,
Image: &discordgo.MessageEmbedImage{
URL: resp.Request.URL.String(),
},
Footer: &discordgo.MessageEmbedFooter{Text: "Cat pictures provided by TheCatApi", IconURL: Icons + "/729726642758615151.png"},
})
fmt.Println("[Info] : Cat sent successfully to " + m.Author.Username + "(" + m.Author.ID + ") in " + m.ChannelID)
}
}
}
func shortStatus() string {
fmt.Println("starting shortStatus")
url := fmt.Sprintf("http://192.168.10.135/status")
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mystatus shortStatusResult
err = json.Unmarshal([]byte(bodyBytes), &mystatus)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = ""
if mystatus.State == "on" {
returnText = Emojis["laseron"] + " **" + strings.ToUpper(mystatus.Device) + " IN USE**"
} else {
returnText = "**" + strings.ToUpper(mystatus.Device) + " IS FREE**"
}
if mystatus.Maintenancemode == "enabled" {
returnText = Emojis["maintenance"] + " **" + strings.ToUpper(mystatus.Device) + " IN MAINTENANCE MODE**"
}
return returnText
}
return "No status available"
}
func fullStatus() string {
fmt.Println("starting fullStatus")
url := fmt.Sprintf("http://192.168.10.135/fullstatus?api=%s", DEVICEAPITOKEN)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mystatus shortStatusResult
err = json.Unmarshal([]byte(bodyBytes), &mystatus)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = "```\n"
/*
if mystatus.State == "on" {
returnText = Emojis["laseron"] + " **" + strings.ToUpper(mystatus.Device) + " IN USE**"
} else {
returnText = "**" + strings.ToUpper(mystatus.Device) + " IS FREE**"
}
if mystatus.Maintenancemode == "enabled" {
returnText = Emojis["maintenance"] + " **" + strings.ToUpper(mystatus.Device) + " IN MAINTENANCE MODE**"
}
*/
returnText += string(bodyBytes)
returnText += "```\n"
return returnText
}
return "No status available"
}
func scanWifi() string {
fmt.Println("starting scanWifi")
url := fmt.Sprintf("http://192.168.10.135/scanwifi?api=%s", DEVICEAPITOKEN)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mynetworks []wifiNetwork
err = json.Unmarshal([]byte(bodyBytes), &mynetworks)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = ""
fmt.Println("Number of networks found: ", len(mynetworks))
if len(mynetworks) > 0 {
returnText = "```\n"
}
for i := 0; i < len(mynetworks); i++ {
fmt.Println(mynetworks[i].SSID)
returnText += mynetworks[i].SSID + "\n"
}
if len(mynetworks) > 0 {
returnText += "```"
}
fmt.Println(returnText)
return returnText
}
return "No networks available"
}
func backlightold(mystate string) string {
fmt.Println("starting backlight")
url := fmt.Sprintf("http://192.168.10.135/backlight?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
if err != nil {
log.Fatal(err)
}
if mystate == "on" {
return Emojis["backlighton"] + " **LASER BACKLIGHT ON**"
}
if mystate == "off" {
return Emojis["backlightoff"] + " **LASER BACKLIGHT OFF**"
}
} else {
return "ERROR"
}
return ""
}
//=========
func backlight(mystate string) string {
fmt.Println("starting backlight")
url := fmt.Sprintf("http://192.168.10.135/backlight?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
client.Do(req)
return ""
}
//=========
func maintenancemode(mystate string) string {
fmt.Println("starting maintenancemode")
url := fmt.Sprintf("http://192.168.10.135/maintenance?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
//resp, err := client.Do(req)
client.Do(req)
/*if resp.StatusCode == http.StatusOK {
if err != nil {
log.Fatal(err)
}
if mystate == "enable" {
return Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
}
if mystate == "disable" {
return Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
}
} else {
return "ERROR"
}*/
return ""
}
func printFile(filename string, webprint http.ResponseWriter) {
fmt.Println("Starting printFile")
texttoprint, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("ERROR: cannot open ", filename)
if webprint != nil {
http.Error(webprint, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
}
if webprint != nil {
fmt.Fprintf(webprint, "%s", string(texttoprint))
} else {
fmt.Print(string(texttoprint))
}
}
func startWeb(listenip string, listenport string, usetls bool) {
r := mux.NewRouter()
r.HandleFunc("/", handlerIndex)
r.HandleFunc("/laser", handlerLaser)
//laserRouter := r.PathPrefix("/laser").Subrouter()
//laserRouter.HandleFunc("/{laser}", handlerLaser)
//laserRouter.Use(loggingMiddleware)
r.HandleFunc("/api", handlerApi)
log.Printf("Starting HTTP Webserver http://%s:%s\n", listenip, listenport)
srv := &http.Server{
Handler: r,
Addr: LISTENIP + ":" + LISTENPORT,
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
err := srv.ListenAndServe()
fmt.Println("cannot start http server:", err)
}
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println("MIDDLEWARE: ", r.RemoteAddr, " ", r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
func handlerIndex(w http.ResponseWriter, r *http.Request) {
log.Println("Starting handlerIndex")
printFile(INDEXHTML, w)
}
func handlerLaser(webprint http.ResponseWriter, r *http.Request) {
fmt.Println("starting handlerLaser2")
queries := r.URL.Query()
fmt.Printf("queries = %q\n", queries)
if APITOKEN != queries.Get("api") {
fmt.Fprintf(webprint, "%s", "ERROR: Invalid API")
return
}
var returnText = ""
switch strings.ToLower(queries.Get("action")) {
case "off":
returnText = Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
case "on":
returnText = Emojis["laseron"] + " **" + queries.Get("user") + " IS FIRING LASER, PEW PEW**"
case "override":
returnText = Emojis["eehboss"] + " **LASER BOSS MODE ENABLED**"
case "maintenanceon":
returnText = Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
default:
return
}
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("Unable to create discord session!")
return
}
fmt.Fprintf(webprint, "%s", returnText)
dg.ChannelMessageSend("729631967905054764", returnText)
}
//====
func checkIfDeviceExists(device string) bool {
// get all the device names and store in an array
i := 0
devicearray := make([]string, len(DeviceMap))
for k := range DeviceMap {
devicearray[i] = k
i++
}
for _, a := range devicearray {
if strings.ToLower(a) == strings.ToLower(device) {
return true
}
}
return false
}
// check if valid action and state
func checkActionAndState(action string, state string) bool {
// get all the device names and store in an array
i := 0
actionarray := make([]string, len(ActionMap))
for k := range ActionMap {
actionarray[i] = k
i++
}
//fmt.Printf("array=%v\n", actionarray)
for _, a := range actionarray {
//fmt.Println("a=", a)
if strings.ToLower(a) == strings.ToLower(action) {
//fmt.Println("Valid Action:", action)
//fmt.Println("strings.ToLower(a)=", strings.ToLower(a), " strings.ToLower(action)=", strings.ToLower(action))
for _, s := range ActionMap[a].States {
//fmt.Println("s=", s)
//fmt.Printf("v=%v\n", s)
if strings.ToLower(s) == strings.ToLower(state) {
// valid state for this action found
return true
}
}
}
}
// if reached here, state or action is bad and thus return false
return false
}
func handlerApi(webprint http.ResponseWriter, r *http.Request) {
fmt.Println("starting handlerLaser2")
queries := r.URL.Query()
fmt.Printf("queries = %q\n", queries)
// check if api token is valid
if APITOKEN != queries.Get("token") {
fmt.Println("ERROR: Invalid API Token", queries.Get("token"))
fmt.Fprintf(webprint, "%s", "ERROR: Invalid API Token")
return
}
if !checkIfDeviceExists(queries.Get("device")) {
fmt.Println("ERROR: Invalid device", queries.Get("device"))
fmt.Fprintf(webprint, "%s", "ERROR: Invalid Device")
return
}
if !checkActionAndState(queries.Get("action"), queries.Get("state")) {
fmt.Println("ERROR: Bad action or state", queries.Get("action"), queries.Get("state"))
fmt.Fprintf(webprint, "%s", "ERROR: Bad action or state")
return
}
fmt.Printf("Device %s is valid\nAction %s is valid\nState %s is valid\n", queries.Get("device"), queries.Get("action"), queries.Get("state"))
fmt.Fprintf(webprint, "Device %s is valid\nAction %s is valid\nState %s is valid", queries.Get("device"), queries.Get("action"), queries.Get("state"))
var returnText = ""
/* switch strings.ToLower(queries.Get("action")) {
case "off":
returnText = Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
case "on":
returnText = Emojis["laseron"] + " **" + queries.Get("user") + " IS FIRING LASER, PEW PEW**"
case "override":
returnText = Emojis["eehboss"] + " **LASER BOSS MODE ENABLED**"
case "maintenanceon":
returnText = Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
default:
return
}
*/
var lookup = queries.Get("action") + queries.Get("state")
fmt.Println("lookup=", lookup)
returnText = Emojis[queries.Get("action")+queries.Get("state")] + " **" + strings.ToUpper(queries.Get("device")) + " " + strings.ToUpper(queries.Get("action")) + ":" + strings.ToUpper(queries.Get("state")) + "**"
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("Unable to create discord session!")
return
}
//fmt.Fprintf(webprint, "%s, %s", DeviceMap[queries.Get("device")].Channel, returnText)
dg.ChannelMessageSend(DeviceMap[queries.Get("device")].Channel, returnText)
fmt.Printf("qdevice=%s dchannel=%s\n", queries.Get("device"), DeviceMap[queries.Get("device")].Channel)
//dg.ChannelMessageSend("730836803556343808", returnText)
fmt.Println("returnText = ", returnText)
//dg.ChannelMessageSend("729631967905054764", returnText)
}
| {
return
} | conditional_block |
main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/gorilla/mux"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
)
var (
Token string
Color = 0x009688
//Icons = "https://kittyhacker101.tk/Static/KatBot"
Icons = "https://cdn.discordapp.com/emojis"
Emojis = make(map[string]string)
Channels = make(map[string]string)
//Devices = make(map[string]string)
DeviceMap = make(map[string]deviceStruct)
ActionMap = make(map[string]actionStruct)
)
const APITOKEN = "sometoken"
const DEVICEAPITOKEN = "xyz"
const LISTENIP = "0.0.0.0"
const LISTENPORT = "57000"
const INDEXHTML = "index.html"
type wifiNetwork struct {
RSSI int `json:"rssi"`
SSID string `json:"ssid"`
BSSID string `json:"bssid"`
Channel int `json:"channel"`
Secure int `json:"secure"`
}
type shortStatusResult struct {
Timestamp string `json:"Timestamp"`
Hostname string `json:"Hostname"`
Device string `json:"Device"`
State string `json:"State"`
Maintenancemode string `json:"MaintenanceMode"`
}
type deviceStruct struct {
Name string `json:"Name"`
Hostname string `json:"Hostname"`
Port int `json:"Port"`
Channel string `json:"Channel"`
}
type actionStruct struct {
Name string `json:"Name"`
States []string
}
//var MyDevices []deviceStruct
func init() {
Emojis["laseron"] = "<:laseron:729726642758615151>"
Emojis["laseroff"] = "<:laseroff:730213748102529064>"
Emojis["maintenanceon"] = "<:maintenanceon:729732695009263616>"
Emojis["maintenanceoff"] = "<:maintenanceoff:729828147414958202>"
Emojis["backlighton"] = "<:backlighton:729820542336761856>"
Emojis["backlightoff"] = "<:backlightoff:729820688516644894>"
Emojis["eehtick"] = "<:eehtick:729828147414958202>"
Emojis["overrideon"] = "<:overrideon:730075631198404649>"
Emojis["overrideoff"] = "<:overrideoff:730448103517454376>"
Emojis["3don"] = "<:3don:730213748102529064>"
Emojis["3doff"] = "<3doff:730213748102529064>"
Emojis["userlogin"] = "<:userlogin:730444250839515297>"
Emojis["userlogout"] = "<:userlogout:730444251695153285>"
Channels["general-junk"] = "729631967905054764"
Channels["laser"] = "730836803556343808"
// Devices["laser"] = "192.168.10.135"
/* MyDevices = []deviceStruct{
deviceStruct{
Name: "laser",
IP: "192.168.10.135",
Channel: "729632142358872138", // laser
},
deviceStruct{
Name: "laser2",
IP: "192.168.10.136",
Channel: "729632142358872138", // laser
},
} */
DeviceMap["laser"] = deviceStruct{
Name: "laser",
Hostname: "192.168.10.135",
Port: 80,
Channel: "730836803556343808", // laser
}
DeviceMap["laser2"] = deviceStruct{
Name: "laser",
Hostname: "192.168.10.136",
Port: 80,
Channel: "730836803556343808", // laser
}
ActionMap["maintenance"] = actionStruct{
Name: "maintenance",
States: []string{"on", "off"},
}
ActionMap["backlight"] = actionStruct{
Name: "backlight",
States: []string{"on", "off"},
}
ActionMap["override"] = actionStruct{
Name: "override",
States: []string{"on", "off"},
}
ActionMap["user"] = actionStruct{
Name: "user",
States: []string{"login", "logout"},
}
/*
var steve string = "laser"
fmt.Println("Name:", DeviceMap[steve].Name, " Host:", DeviceMap[steve].Hostname, ":", DeviceMap[steve].Port, " Channel:", DeviceMap[steve].Channel)
steve = "laser2"
fmt.Println("Name:", DeviceMap[steve].Name, " Host:", DeviceMap[steve].Hostname, ":", DeviceMap[steve].Port, " Channel:", DeviceMap[steve].Channel)
*/
for k, v := range DeviceMap {
fmt.Printf("%s Host:%s:%d Channel:%s\n", k, v.Hostname, v.Port, v.Channel)
}
//os.Exit(0)
flag.StringVar(&Token, "t", "", "Bot Token")
flag.Parse()
}
func main() {
// Create a new Discord session using the provided bot token.
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("error creating Discord session,", err)
return
}
// Register the messageCreate func as a callback for MessageCreate events.
dg.AddHandler(messageCreate)
// Open a websocket connection to Discord and begin listening.
err = dg.Open()
if err != nil {
fmt.Println("error opening connection,", err)
return
}
// Wait here until CTRL-C or other term signal is received.
fmt.Println("Bot is now running. Press CTRL-C to exit.")
startWeb(LISTENIP, LISTENPORT, false)
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Cleanly close down the Discord session.
dg.Close()
}
// This function will be called (due to AddHandler above) every time a new
// message is created on any channel that the authenticated bot has access to.
func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Ignore all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID {
return
}
if m.Content == "!laser backlight on" {
s.ChannelMessageSend(m.ChannelID, backlight("on"))
}
if m.Content == "!laser backlight off" {
s.ChannelMessageSend(m.ChannelID, backlight("off"))
}
if m.Content == "!laser fullstatus" {
s.ChannelMessageSend(m.ChannelID, fullStatus())
}
if m.Content == "!laser help" {
var printText string
printText += "```\n"
printText += "Available Commands:\n"
printText += "-------------------------------\n"
printText += " laser backlight [on|off]\n"
printText += " laser fullstatus\n"
printText += " laser help\n"
printText += " laser maintenance [on|off]\n"
printText += " laser scanwifi\n"
printText += " laser status\n"
printText += "```\n"
s.ChannelMessageSend(m.ChannelID, printText)
}
if m.Content == "!laser maintenance off" {
s.ChannelMessageSend(m.ChannelID, maintenancemode("disable"))
}
if m.Content == "!laser maintenance on" {
s.ChannelMessageSend(m.ChannelID, maintenancemode("enable"))
}
if m.Content == "!laser scanwifi" {
s.ChannelMessageSend(m.ChannelID, scanWifi())
}
if m.Content == "!laser status" {
s.ChannelMessageSend(m.ChannelID, shortStatus())
}
if m.Content == "!cat" {
tr := &http.Transport{DisableKeepAlives: true}
client := &http.Client{Transport: tr}
resp, err := client.Get("https://images-na.ssl-images-amazon.com/images/I/71FcdrSeKlL._AC_SL1001_.jpg")
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
s.ChannelMessageSend(m.ChannelID, "Unable to fetch cat!")
fmt.Println("[Warning] : Cat API Error")
} else {
s.ChannelMessageSendEmbed(m.ChannelID, &discordgo.MessageEmbed{
Author: &discordgo.MessageEmbedAuthor{Name: "Cat Picture", IconURL: Icons + "/729726642758615151.png"},
Color: Color,
Image: &discordgo.MessageEmbedImage{
URL: resp.Request.URL.String(),
},
Footer: &discordgo.MessageEmbedFooter{Text: "Cat pictures provided by TheCatApi", IconURL: Icons + "/729726642758615151.png"},
})
fmt.Println("[Info] : Cat sent successfully to " + m.Author.Username + "(" + m.Author.ID + ") in " + m.ChannelID)
}
}
}
func shortStatus() string {
fmt.Println("starting shortStatus")
url := fmt.Sprintf("http://192.168.10.135/status")
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mystatus shortStatusResult
err = json.Unmarshal([]byte(bodyBytes), &mystatus)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = ""
if mystatus.State == "on" {
returnText = Emojis["laseron"] + " **" + strings.ToUpper(mystatus.Device) + " IN USE**"
} else {
returnText = "**" + strings.ToUpper(mystatus.Device) + " IS FREE**"
}
if mystatus.Maintenancemode == "enabled" {
returnText = Emojis["maintenance"] + " **" + strings.ToUpper(mystatus.Device) + " IN MAINTENANCE MODE**"
}
return returnText
}
return "No status available"
}
func fullStatus() string {
fmt.Println("starting fullStatus")
url := fmt.Sprintf("http://192.168.10.135/fullstatus?api=%s", DEVICEAPITOKEN)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mystatus shortStatusResult
err = json.Unmarshal([]byte(bodyBytes), &mystatus)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = "```\n"
/*
if mystatus.State == "on" {
returnText = Emojis["laseron"] + " **" + strings.ToUpper(mystatus.Device) + " IN USE**"
} else {
returnText = "**" + strings.ToUpper(mystatus.Device) + " IS FREE**"
}
if mystatus.Maintenancemode == "enabled" {
returnText = Emojis["maintenance"] + " **" + strings.ToUpper(mystatus.Device) + " IN MAINTENANCE MODE**"
}
*/
returnText += string(bodyBytes)
returnText += "```\n"
return returnText
}
return "No status available"
}
func scanWifi() string {
fmt.Println("starting scanWifi")
url := fmt.Sprintf("http://192.168.10.135/scanwifi?api=%s", DEVICEAPITOKEN)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return ""
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var mynetworks []wifiNetwork
err = json.Unmarshal([]byte(bodyBytes), &mynetworks)
if err != nil {
fmt.Println("unmarshal error: ")
fmt.Println(err)
}
var returnText = ""
fmt.Println("Number of networks found: ", len(mynetworks))
if len(mynetworks) > 0 {
returnText = "```\n"
}
for i := 0; i < len(mynetworks); i++ {
fmt.Println(mynetworks[i].SSID)
returnText += mynetworks[i].SSID + "\n"
}
if len(mynetworks) > 0 {
returnText += "```"
}
fmt.Println(returnText)
return returnText
}
return "No networks available"
}
func | (mystate string) string {
fmt.Println("starting backlight")
url := fmt.Sprintf("http://192.168.10.135/backlight?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
resp, err := client.Do(req)
if resp.StatusCode == http.StatusOK {
if err != nil {
log.Fatal(err)
}
if mystate == "on" {
return Emojis["backlighton"] + " **LASER BACKLIGHT ON**"
}
if mystate == "off" {
return Emojis["backlightoff"] + " **LASER BACKLIGHT OFF**"
}
} else {
return "ERROR"
}
return ""
}
//=========
func backlight(mystate string) string {
fmt.Println("starting backlight")
url := fmt.Sprintf("http://192.168.10.135/backlight?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
client.Do(req)
return ""
}
//=========
func maintenancemode(mystate string) string {
fmt.Println("starting maintenancemode")
url := fmt.Sprintf("http://192.168.10.135/maintenance?api=%s&state=%s", DEVICEAPITOKEN, mystate)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal("NewRequest: ", err)
return "ERROR"
}
client := &http.Client{}
client.Timeout = time.Second * 15
//resp, err := client.Do(req)
client.Do(req)
/*if resp.StatusCode == http.StatusOK {
if err != nil {
log.Fatal(err)
}
if mystate == "enable" {
return Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
}
if mystate == "disable" {
return Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
}
} else {
return "ERROR"
}*/
return ""
}
func printFile(filename string, webprint http.ResponseWriter) {
fmt.Println("Starting printFile")
texttoprint, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("ERROR: cannot open ", filename)
if webprint != nil {
http.Error(webprint, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
}
if webprint != nil {
fmt.Fprintf(webprint, "%s", string(texttoprint))
} else {
fmt.Print(string(texttoprint))
}
}
func startWeb(listenip string, listenport string, usetls bool) {
r := mux.NewRouter()
r.HandleFunc("/", handlerIndex)
r.HandleFunc("/laser", handlerLaser)
//laserRouter := r.PathPrefix("/laser").Subrouter()
//laserRouter.HandleFunc("/{laser}", handlerLaser)
//laserRouter.Use(loggingMiddleware)
r.HandleFunc("/api", handlerApi)
log.Printf("Starting HTTP Webserver http://%s:%s\n", listenip, listenport)
srv := &http.Server{
Handler: r,
Addr: LISTENIP + ":" + LISTENPORT,
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
err := srv.ListenAndServe()
fmt.Println("cannot start http server:", err)
}
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println("MIDDLEWARE: ", r.RemoteAddr, " ", r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
func handlerIndex(w http.ResponseWriter, r *http.Request) {
log.Println("Starting handlerIndex")
printFile(INDEXHTML, w)
}
func handlerLaser(webprint http.ResponseWriter, r *http.Request) {
fmt.Println("starting handlerLaser2")
queries := r.URL.Query()
fmt.Printf("queries = %q\n", queries)
if APITOKEN != queries.Get("api") {
fmt.Fprintf(webprint, "%s", "ERROR: Invalid API")
return
}
var returnText = ""
switch strings.ToLower(queries.Get("action")) {
case "off":
returnText = Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
case "on":
returnText = Emojis["laseron"] + " **" + queries.Get("user") + " IS FIRING LASER, PEW PEW**"
case "override":
returnText = Emojis["eehboss"] + " **LASER BOSS MODE ENABLED**"
case "maintenanceon":
returnText = Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
default:
return
}
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("Unable to create discord session!")
return
}
fmt.Fprintf(webprint, "%s", returnText)
dg.ChannelMessageSend("729631967905054764", returnText)
}
//====
func checkIfDeviceExists(device string) bool {
// get all the device names and store in an array
i := 0
devicearray := make([]string, len(DeviceMap))
for k := range DeviceMap {
devicearray[i] = k
i++
}
for _, a := range devicearray {
if strings.ToLower(a) == strings.ToLower(device) {
return true
}
}
return false
}
// check if valid action and state
func checkActionAndState(action string, state string) bool {
// get all the device names and store in an array
i := 0
actionarray := make([]string, len(ActionMap))
for k := range ActionMap {
actionarray[i] = k
i++
}
//fmt.Printf("array=%v\n", actionarray)
for _, a := range actionarray {
//fmt.Println("a=", a)
if strings.ToLower(a) == strings.ToLower(action) {
//fmt.Println("Valid Action:", action)
//fmt.Println("strings.ToLower(a)=", strings.ToLower(a), " strings.ToLower(action)=", strings.ToLower(action))
for _, s := range ActionMap[a].States {
//fmt.Println("s=", s)
//fmt.Printf("v=%v\n", s)
if strings.ToLower(s) == strings.ToLower(state) {
// valid state for this action found
return true
}
}
}
}
// if reached here, state or action is bad and thus return false
return false
}
func handlerApi(webprint http.ResponseWriter, r *http.Request) {
fmt.Println("starting handlerLaser2")
queries := r.URL.Query()
fmt.Printf("queries = %q\n", queries)
// check if api token is valid
if APITOKEN != queries.Get("token") {
fmt.Println("ERROR: Invalid API Token", queries.Get("token"))
fmt.Fprintf(webprint, "%s", "ERROR: Invalid API Token")
return
}
if !checkIfDeviceExists(queries.Get("device")) {
fmt.Println("ERROR: Invalid device", queries.Get("device"))
fmt.Fprintf(webprint, "%s", "ERROR: Invalid Device")
return
}
if !checkActionAndState(queries.Get("action"), queries.Get("state")) {
fmt.Println("ERROR: Bad action or state", queries.Get("action"), queries.Get("state"))
fmt.Fprintf(webprint, "%s", "ERROR: Bad action or state")
return
}
fmt.Printf("Device %s is valid\nAction %s is valid\nState %s is valid\n", queries.Get("device"), queries.Get("action"), queries.Get("state"))
fmt.Fprintf(webprint, "Device %s is valid\nAction %s is valid\nState %s is valid", queries.Get("device"), queries.Get("action"), queries.Get("state"))
var returnText = ""
/* switch strings.ToLower(queries.Get("action")) {
case "off":
returnText = Emojis["eehtick"] + " **LASER IS AVAILABLE TO USE**"
case "on":
returnText = Emojis["laseron"] + " **" + queries.Get("user") + " IS FIRING LASER, PEW PEW**"
case "override":
returnText = Emojis["eehboss"] + " **LASER BOSS MODE ENABLED**"
case "maintenanceon":
returnText = Emojis["maintenance"] + " **LASER IN MAINTENANCE MODE**"
default:
return
}
*/
var lookup = queries.Get("action") + queries.Get("state")
fmt.Println("lookup=", lookup)
returnText = Emojis[queries.Get("action")+queries.Get("state")] + " **" + strings.ToUpper(queries.Get("device")) + " " + strings.ToUpper(queries.Get("action")) + ":" + strings.ToUpper(queries.Get("state")) + "**"
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("Unable to create discord session!")
return
}
//fmt.Fprintf(webprint, "%s, %s", DeviceMap[queries.Get("device")].Channel, returnText)
dg.ChannelMessageSend(DeviceMap[queries.Get("device")].Channel, returnText)
fmt.Printf("qdevice=%s dchannel=%s\n", queries.Get("device"), DeviceMap[queries.Get("device")].Channel)
//dg.ChannelMessageSend("730836803556343808", returnText)
fmt.Println("returnText = ", returnText)
//dg.ChannelMessageSend("729631967905054764", returnText)
}
| backlightold | identifier_name |
hkdf_test.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
use maplit::hashmap;
use serde::Deserialize;
use tink_core::Prf;
use tink_prf::subtle::{validate_hkdf_prf_params, HkdfPrf};
use tink_proto::HashType;
struct Rfc5869Test {
hash: HashType,
key: &'static str,
salt: &'static str,
info: &'static str,
output_length: usize,
okm: &'static str,
}
#[test]
fn test_vectors_rfc5869() {
// Test vectors from RFC 5869.
let testvectors = [
Rfc5869Test{
hash: HashType::Sha256,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "000102030405060708090a0b0c",
info: "f0f1f2f3f4f5f6f7f8f9",
output_length: 42,
okm: "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865",
},
Rfc5869Test{
hash: HashType::Sha256,
key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
output_length: 82,
okm: "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87",
},
Rfc5869Test{
hash: HashType::Sha256,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "",
info: "",
output_length: 42,
okm: "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0b0b0b0b0b0b0b0b0b0b0b",
salt: "000102030405060708090a0b0c",
info: "f0f1f2f3f4f5f6f7f8f9",
output_length: 42,
okm: "085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
output_length: 82,
okm: "0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "",
info: "",
output_length: 42,
okm: "0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c",
salt: "",
info: "",
output_length: 42,
okm: "2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48",
},
];
for v in testvectors.iter() {
let key = hex::decode(v.key).expect("Could not decode key");
let salt = hex::decode(v.salt).expect("Could not decode salt");
let info = hex::decode(v.info).expect("Could not decode info");
let p = HkdfPrf::new(v.hash, &key, &salt).expect("Could not create HKDF object");
let output = p
.compute_prf(&info, v.output_length)
.expect("Error computing HKDF");
assert_eq!(
hex::encode(output),
v.okm,
"Computation and test vector differ."
);
}
}
#[derive(Debug, Deserialize)]
struct HkdfTestData {
#[serde(flatten)]
pub suite: tink_tests::WycheproofSuite,
#[serde(rename = "testGroups")]
pub test_groups: Vec<HkdfTestGroup>,
}
#[derive(Debug, Deserialize)]
struct HkdfTestGroup {
#[serde(flatten)]
pub group: tink_tests::WycheproofGroup,
#[serde(rename = "keySize")]
pub key_size: u32,
pub tests: Vec<HkdfTestCase>,
}
#[derive(Debug, Deserialize)]
struct HkdfTestCase {
#[serde(flatten)]
pub case: tink_tests::WycheproofCase,
#[serde(with = "tink_tests::hex_string")]
pub ikm: Vec<u8>,
#[serde(with = "tink_tests::hex_string")]
pub salt: Vec<u8>,
#[serde(with = "tink_tests::hex_string")]
pub info: Vec<u8>,
pub size: usize,
#[serde(with = "tink_tests::hex_string")]
pub okm: Vec<u8>,
}
#[test]
fn test_hkdf_prf_wycheproof_cases() {
for hash in &[HashType::Sha1, HashType::Sha256, HashType::Sha512] {
let hash_name = format!("{:?}", hash);
let filename = format!("testvectors/hkdf_{}_test.json", hash_name.to_lowercase());
println!("wycheproof file '{}' hash {}", filename, hash_name);
let bytes = tink_tests::wycheproof_data(&filename);
let data: HkdfTestData = serde_json::from_slice(&bytes).unwrap();
for g in &data.test_groups {
println!(" key info: key_size={}", g.key_size);
for tc in &g.tests {
println!(
" case {} [{}] {}",
tc.case.case_id, tc.case.result, tc.case.comment
);
assert_eq!(tc.ikm.len() * 8, g.key_size as usize);
let hkdf_prf = HkdfPrf::new(*hash, &tc.ikm, &tc.salt);
let valid = tc.case.result == tink_tests::WycheproofResult::Valid;
if valid && hkdf_prf.is_err() {
panic!(
"Could not create HKDF {:?} PRF for test case {} ({})",
hash, tc.case.case_id, tc.case.comment
);
}
if !valid && hkdf_prf.is_err() {
continue;
}
let res = match hkdf_prf.unwrap().compute_prf(&tc.info, tc.size) {
Err(_) => {
assert!(
!valid,
"Could not compute HKDF {:?} PRF for test case {} ({})",
hash, tc.case.case_id, tc.case.comment
);
continue;
}
Ok(r) => r,
};
if valid {
assert_eq!(
res, tc.okm,
"Computed HKDF {:?} PRF and expected for test case {} ({}) do not match",
hash, tc.case.case_id, tc.case.comment
);
} else {
assert_ne!(
res, tc.okm,
"Computed HKDF {:?} PRF and invalid expected for test case {} ({}) match",
hash, tc.case.case_id, tc.case.comment
);
}
}
}
}
}
#[test]
fn test_hkdf_prf_hash() {
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA256"
);
assert!(
HkdfPrf::new(
HashType::Sha512,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA512"
);
assert!(
HkdfPrf::new(
HashType::Sha1,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA1"
);
assert!(
HkdfPrf::new(
HashType::UnknownHash,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_err(),
"Expected HkdfPrf::new to fail with unknown hash"
);
}
#[test]
fn test_hkdf_prf_salt() |
#[test]
fn test_hkdf_prf_output_length() {
let testdata = hashmap! {
HashType::Sha1 => 20,
HashType::Sha256 => 32,
HashType::Sha512 => 64,
};
for (hash, length) in testdata {
let prf = HkdfPrf::new(
hash,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c,
0x0d, 0x0e, 0x0f, 0x10,
],
&[],
)
.unwrap_or_else(|_| {
panic!(
"Expected HkdfPrf::new to work on 32 byte key with hash {:?}",
hash
)
});
// If overflow checks are enabled (which they are by default for tests),
// this loop runs too slow, so only test every 10th length.
let stride: usize = if cfg!(overflow_checks) { 1 } else { 10 };
for i in (0..=(length * 255)).step_by(stride) {
let output = prf.compute_prf(&[0x01, 0x02], i).unwrap_or_else(|e| {
panic!(
"Expected to be able to compute HKDF {:?} PRF with {} output length: {:?}",
hash, i, e
)
});
assert_eq!(
output.len(),
i,
"Expected HKDF {:?} PRF to compute {} bytes, got {}",
hash,
i,
output.len()
);
}
for i in (length * 255 + 1)..(length * 255 + 100) {
assert!(
prf.compute_prf(&[0x01, 0x02], i).is_err(),
"Expected to not be able to compute HKDF {:?} PRF with {} output length",
hash,
i
);
}
}
}
#[test]
fn test_validate_hkdf_prf_params() {
assert!(
validate_hkdf_prf_params(HashType::Sha256, 32, &[]).is_ok(),
"Unexpected error for valid HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha256, 32, &[0xaf, 0xfe, 0xc0, 0xff, 0xee]).is_ok(),
"Unexpected error for salted valid HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha256, 4, &[]).is_err(),
"Short key size not detected for HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::UnknownHash, 32, &[]).is_err(),
"Unknown hash function not detected for HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha1, 32, &[]).is_err(),
"Weak hash function not detected for HKDF PRF params"
);
}
| {
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work empty salt"
);
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[0xaf, 0xfe, 0xc0, 0xff, 0xee]
)
.is_ok(),
"Expected HkdfPrf::new to work with salt"
);
} | identifier_body |
hkdf_test.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
use maplit::hashmap;
use serde::Deserialize;
use tink_core::Prf;
use tink_prf::subtle::{validate_hkdf_prf_params, HkdfPrf};
use tink_proto::HashType;
struct Rfc5869Test {
hash: HashType,
key: &'static str,
salt: &'static str,
info: &'static str,
output_length: usize,
okm: &'static str,
}
#[test]
fn test_vectors_rfc5869() {
// Test vectors from RFC 5869.
let testvectors = [
Rfc5869Test{
hash: HashType::Sha256,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "000102030405060708090a0b0c",
info: "f0f1f2f3f4f5f6f7f8f9",
output_length: 42,
okm: "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865",
},
Rfc5869Test{
hash: HashType::Sha256,
key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
output_length: 82,
okm: "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87",
},
Rfc5869Test{
hash: HashType::Sha256,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "",
info: "",
output_length: 42,
okm: "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0b0b0b0b0b0b0b0b0b0b0b",
salt: "000102030405060708090a0b0c",
info: "f0f1f2f3f4f5f6f7f8f9",
output_length: 42,
okm: "085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
output_length: 82,
okm: "0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "",
info: "",
output_length: 42,
okm: "0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c",
salt: "",
info: "",
output_length: 42,
okm: "2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48",
},
];
for v in testvectors.iter() {
let key = hex::decode(v.key).expect("Could not decode key");
let salt = hex::decode(v.salt).expect("Could not decode salt");
let info = hex::decode(v.info).expect("Could not decode info");
let p = HkdfPrf::new(v.hash, &key, &salt).expect("Could not create HKDF object");
let output = p
.compute_prf(&info, v.output_length)
.expect("Error computing HKDF");
assert_eq!(
hex::encode(output),
v.okm,
"Computation and test vector differ."
);
}
}
#[derive(Debug, Deserialize)]
struct HkdfTestData {
#[serde(flatten)]
pub suite: tink_tests::WycheproofSuite,
#[serde(rename = "testGroups")]
pub test_groups: Vec<HkdfTestGroup>,
}
#[derive(Debug, Deserialize)]
struct HkdfTestGroup {
#[serde(flatten)]
pub group: tink_tests::WycheproofGroup,
#[serde(rename = "keySize")]
pub key_size: u32,
pub tests: Vec<HkdfTestCase>,
}
#[derive(Debug, Deserialize)]
struct HkdfTestCase {
#[serde(flatten)]
pub case: tink_tests::WycheproofCase,
#[serde(with = "tink_tests::hex_string")]
pub ikm: Vec<u8>,
#[serde(with = "tink_tests::hex_string")]
pub salt: Vec<u8>,
#[serde(with = "tink_tests::hex_string")]
pub info: Vec<u8>,
pub size: usize,
#[serde(with = "tink_tests::hex_string")]
pub okm: Vec<u8>,
}
#[test]
fn test_hkdf_prf_wycheproof_cases() {
for hash in &[HashType::Sha1, HashType::Sha256, HashType::Sha512] {
let hash_name = format!("{:?}", hash);
let filename = format!("testvectors/hkdf_{}_test.json", hash_name.to_lowercase());
println!("wycheproof file '{}' hash {}", filename, hash_name);
let bytes = tink_tests::wycheproof_data(&filename);
let data: HkdfTestData = serde_json::from_slice(&bytes).unwrap();
for g in &data.test_groups {
println!(" key info: key_size={}", g.key_size);
for tc in &g.tests {
println!(
" case {} [{}] {}",
tc.case.case_id, tc.case.result, tc.case.comment
);
assert_eq!(tc.ikm.len() * 8, g.key_size as usize);
let hkdf_prf = HkdfPrf::new(*hash, &tc.ikm, &tc.salt);
let valid = tc.case.result == tink_tests::WycheproofResult::Valid;
if valid && hkdf_prf.is_err() {
panic!(
"Could not create HKDF {:?} PRF for test case {} ({})",
hash, tc.case.case_id, tc.case.comment
);
}
if !valid && hkdf_prf.is_err() {
continue;
}
let res = match hkdf_prf.unwrap().compute_prf(&tc.info, tc.size) {
Err(_) => {
assert!(
!valid,
"Could not compute HKDF {:?} PRF for test case {} ({})",
hash, tc.case.case_id, tc.case.comment
);
continue;
}
Ok(r) => r,
};
if valid {
assert_eq!(
res, tc.okm,
"Computed HKDF {:?} PRF and expected for test case {} ({}) do not match",
hash, tc.case.case_id, tc.case.comment
);
} else {
assert_ne!(
res, tc.okm,
"Computed HKDF {:?} PRF and invalid expected for test case {} ({}) match",
hash, tc.case.case_id, tc.case.comment
);
}
}
}
}
}
#[test]
fn test_hkdf_prf_hash() {
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA256"
);
assert!(
HkdfPrf::new(
HashType::Sha512,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, | &[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA512"
);
assert!(
HkdfPrf::new(
HashType::Sha1,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA1"
);
assert!(
HkdfPrf::new(
HashType::UnknownHash,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_err(),
"Expected HkdfPrf::new to fail with unknown hash"
);
}
#[test]
fn test_hkdf_prf_salt() {
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work empty salt"
);
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[0xaf, 0xfe, 0xc0, 0xff, 0xee]
)
.is_ok(),
"Expected HkdfPrf::new to work with salt"
);
}
#[test]
fn test_hkdf_prf_output_length() {
let testdata = hashmap! {
HashType::Sha1 => 20,
HashType::Sha256 => 32,
HashType::Sha512 => 64,
};
for (hash, length) in testdata {
let prf = HkdfPrf::new(
hash,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c,
0x0d, 0x0e, 0x0f, 0x10,
],
&[],
)
.unwrap_or_else(|_| {
panic!(
"Expected HkdfPrf::new to work on 32 byte key with hash {:?}",
hash
)
});
// If overflow checks are enabled (which they are by default for tests),
// this loop runs too slow, so only test every 10th length.
let stride: usize = if cfg!(overflow_checks) { 1 } else { 10 };
for i in (0..=(length * 255)).step_by(stride) {
let output = prf.compute_prf(&[0x01, 0x02], i).unwrap_or_else(|e| {
panic!(
"Expected to be able to compute HKDF {:?} PRF with {} output length: {:?}",
hash, i, e
)
});
assert_eq!(
output.len(),
i,
"Expected HKDF {:?} PRF to compute {} bytes, got {}",
hash,
i,
output.len()
);
}
for i in (length * 255 + 1)..(length * 255 + 100) {
assert!(
prf.compute_prf(&[0x01, 0x02], i).is_err(),
"Expected to not be able to compute HKDF {:?} PRF with {} output length",
hash,
i
);
}
}
}
#[test]
fn test_validate_hkdf_prf_params() {
assert!(
validate_hkdf_prf_params(HashType::Sha256, 32, &[]).is_ok(),
"Unexpected error for valid HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha256, 32, &[0xaf, 0xfe, 0xc0, 0xff, 0xee]).is_ok(),
"Unexpected error for salted valid HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha256, 4, &[]).is_err(),
"Short key size not detected for HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::UnknownHash, 32, &[]).is_err(),
"Unknown hash function not detected for HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha1, 32, &[]).is_err(),
"Weak hash function not detected for HKDF PRF params"
);
} | 0x0f, 0x10
], | random_line_split |
hkdf_test.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
use maplit::hashmap;
use serde::Deserialize;
use tink_core::Prf;
use tink_prf::subtle::{validate_hkdf_prf_params, HkdfPrf};
use tink_proto::HashType;
struct Rfc5869Test {
hash: HashType,
key: &'static str,
salt: &'static str,
info: &'static str,
output_length: usize,
okm: &'static str,
}
#[test]
fn test_vectors_rfc5869() {
// Test vectors from RFC 5869.
let testvectors = [
Rfc5869Test{
hash: HashType::Sha256,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "000102030405060708090a0b0c",
info: "f0f1f2f3f4f5f6f7f8f9",
output_length: 42,
okm: "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865",
},
Rfc5869Test{
hash: HashType::Sha256,
key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
output_length: 82,
okm: "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87",
},
Rfc5869Test{
hash: HashType::Sha256,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "",
info: "",
output_length: 42,
okm: "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0b0b0b0b0b0b0b0b0b0b0b",
salt: "000102030405060708090a0b0c",
info: "f0f1f2f3f4f5f6f7f8f9",
output_length: 42,
okm: "085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
output_length: 82,
okm: "0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "",
info: "",
output_length: 42,
okm: "0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c",
salt: "",
info: "",
output_length: 42,
okm: "2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48",
},
];
for v in testvectors.iter() {
let key = hex::decode(v.key).expect("Could not decode key");
let salt = hex::decode(v.salt).expect("Could not decode salt");
let info = hex::decode(v.info).expect("Could not decode info");
let p = HkdfPrf::new(v.hash, &key, &salt).expect("Could not create HKDF object");
let output = p
.compute_prf(&info, v.output_length)
.expect("Error computing HKDF");
assert_eq!(
hex::encode(output),
v.okm,
"Computation and test vector differ."
);
}
}
#[derive(Debug, Deserialize)]
struct HkdfTestData {
#[serde(flatten)]
pub suite: tink_tests::WycheproofSuite,
#[serde(rename = "testGroups")]
pub test_groups: Vec<HkdfTestGroup>,
}
#[derive(Debug, Deserialize)]
struct HkdfTestGroup {
#[serde(flatten)]
pub group: tink_tests::WycheproofGroup,
#[serde(rename = "keySize")]
pub key_size: u32,
pub tests: Vec<HkdfTestCase>,
}
#[derive(Debug, Deserialize)]
struct HkdfTestCase {
#[serde(flatten)]
pub case: tink_tests::WycheproofCase,
#[serde(with = "tink_tests::hex_string")]
pub ikm: Vec<u8>,
#[serde(with = "tink_tests::hex_string")]
pub salt: Vec<u8>,
#[serde(with = "tink_tests::hex_string")]
pub info: Vec<u8>,
pub size: usize,
#[serde(with = "tink_tests::hex_string")]
pub okm: Vec<u8>,
}
#[test]
fn test_hkdf_prf_wycheproof_cases() {
for hash in &[HashType::Sha1, HashType::Sha256, HashType::Sha512] {
let hash_name = format!("{:?}", hash);
let filename = format!("testvectors/hkdf_{}_test.json", hash_name.to_lowercase());
println!("wycheproof file '{}' hash {}", filename, hash_name);
let bytes = tink_tests::wycheproof_data(&filename);
let data: HkdfTestData = serde_json::from_slice(&bytes).unwrap();
for g in &data.test_groups {
println!(" key info: key_size={}", g.key_size);
for tc in &g.tests {
println!(
" case {} [{}] {}",
tc.case.case_id, tc.case.result, tc.case.comment
);
assert_eq!(tc.ikm.len() * 8, g.key_size as usize);
let hkdf_prf = HkdfPrf::new(*hash, &tc.ikm, &tc.salt);
let valid = tc.case.result == tink_tests::WycheproofResult::Valid;
if valid && hkdf_prf.is_err() {
panic!(
"Could not create HKDF {:?} PRF for test case {} ({})",
hash, tc.case.case_id, tc.case.comment
);
}
if !valid && hkdf_prf.is_err() {
continue;
}
let res = match hkdf_prf.unwrap().compute_prf(&tc.info, tc.size) {
Err(_) => {
assert!(
!valid,
"Could not compute HKDF {:?} PRF for test case {} ({})",
hash, tc.case.case_id, tc.case.comment
);
continue;
}
Ok(r) => r,
};
if valid | else {
assert_ne!(
res, tc.okm,
"Computed HKDF {:?} PRF and invalid expected for test case {} ({}) match",
hash, tc.case.case_id, tc.case.comment
);
}
}
}
}
}
#[test]
fn test_hkdf_prf_hash() {
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA256"
);
assert!(
HkdfPrf::new(
HashType::Sha512,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA512"
);
assert!(
HkdfPrf::new(
HashType::Sha1,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA1"
);
assert!(
HkdfPrf::new(
HashType::UnknownHash,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_err(),
"Expected HkdfPrf::new to fail with unknown hash"
);
}
#[test]
fn test_hkdf_prf_salt() {
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work empty salt"
);
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[0xaf, 0xfe, 0xc0, 0xff, 0xee]
)
.is_ok(),
"Expected HkdfPrf::new to work with salt"
);
}
#[test]
fn test_hkdf_prf_output_length() {
let testdata = hashmap! {
HashType::Sha1 => 20,
HashType::Sha256 => 32,
HashType::Sha512 => 64,
};
for (hash, length) in testdata {
let prf = HkdfPrf::new(
hash,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c,
0x0d, 0x0e, 0x0f, 0x10,
],
&[],
)
.unwrap_or_else(|_| {
panic!(
"Expected HkdfPrf::new to work on 32 byte key with hash {:?}",
hash
)
});
// If overflow checks are enabled (which they are by default for tests),
// this loop runs too slow, so only test every 10th length.
let stride: usize = if cfg!(overflow_checks) { 1 } else { 10 };
for i in (0..=(length * 255)).step_by(stride) {
let output = prf.compute_prf(&[0x01, 0x02], i).unwrap_or_else(|e| {
panic!(
"Expected to be able to compute HKDF {:?} PRF with {} output length: {:?}",
hash, i, e
)
});
assert_eq!(
output.len(),
i,
"Expected HKDF {:?} PRF to compute {} bytes, got {}",
hash,
i,
output.len()
);
}
for i in (length * 255 + 1)..(length * 255 + 100) {
assert!(
prf.compute_prf(&[0x01, 0x02], i).is_err(),
"Expected to not be able to compute HKDF {:?} PRF with {} output length",
hash,
i
);
}
}
}
#[test]
fn test_validate_hkdf_prf_params() {
assert!(
validate_hkdf_prf_params(HashType::Sha256, 32, &[]).is_ok(),
"Unexpected error for valid HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha256, 32, &[0xaf, 0xfe, 0xc0, 0xff, 0xee]).is_ok(),
"Unexpected error for salted valid HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha256, 4, &[]).is_err(),
"Short key size not detected for HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::UnknownHash, 32, &[]).is_err(),
"Unknown hash function not detected for HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha1, 32, &[]).is_err(),
"Weak hash function not detected for HKDF PRF params"
);
}
| {
assert_eq!(
res, tc.okm,
"Computed HKDF {:?} PRF and expected for test case {} ({}) do not match",
hash, tc.case.case_id, tc.case.comment
);
} | conditional_block |
hkdf_test.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
use maplit::hashmap;
use serde::Deserialize;
use tink_core::Prf;
use tink_prf::subtle::{validate_hkdf_prf_params, HkdfPrf};
use tink_proto::HashType;
struct Rfc5869Test {
hash: HashType,
key: &'static str,
salt: &'static str,
info: &'static str,
output_length: usize,
okm: &'static str,
}
#[test]
fn test_vectors_rfc5869() {
// Test vectors from RFC 5869.
let testvectors = [
Rfc5869Test{
hash: HashType::Sha256,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "000102030405060708090a0b0c",
info: "f0f1f2f3f4f5f6f7f8f9",
output_length: 42,
okm: "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865",
},
Rfc5869Test{
hash: HashType::Sha256,
key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
output_length: 82,
okm: "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87",
},
Rfc5869Test{
hash: HashType::Sha256,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "",
info: "",
output_length: 42,
okm: "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0b0b0b0b0b0b0b0b0b0b0b",
salt: "000102030405060708090a0b0c",
info: "f0f1f2f3f4f5f6f7f8f9",
output_length: 42,
okm: "085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
output_length: 82,
okm: "0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
salt: "",
info: "",
output_length: 42,
okm: "0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918",
},
Rfc5869Test{
hash: HashType::Sha1,
key: "0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c",
salt: "",
info: "",
output_length: 42,
okm: "2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48",
},
];
for v in testvectors.iter() {
let key = hex::decode(v.key).expect("Could not decode key");
let salt = hex::decode(v.salt).expect("Could not decode salt");
let info = hex::decode(v.info).expect("Could not decode info");
let p = HkdfPrf::new(v.hash, &key, &salt).expect("Could not create HKDF object");
let output = p
.compute_prf(&info, v.output_length)
.expect("Error computing HKDF");
assert_eq!(
hex::encode(output),
v.okm,
"Computation and test vector differ."
);
}
}
#[derive(Debug, Deserialize)]
struct HkdfTestData {
#[serde(flatten)]
pub suite: tink_tests::WycheproofSuite,
#[serde(rename = "testGroups")]
pub test_groups: Vec<HkdfTestGroup>,
}
#[derive(Debug, Deserialize)]
struct HkdfTestGroup {
#[serde(flatten)]
pub group: tink_tests::WycheproofGroup,
#[serde(rename = "keySize")]
pub key_size: u32,
pub tests: Vec<HkdfTestCase>,
}
#[derive(Debug, Deserialize)]
struct HkdfTestCase {
#[serde(flatten)]
pub case: tink_tests::WycheproofCase,
#[serde(with = "tink_tests::hex_string")]
pub ikm: Vec<u8>,
#[serde(with = "tink_tests::hex_string")]
pub salt: Vec<u8>,
#[serde(with = "tink_tests::hex_string")]
pub info: Vec<u8>,
pub size: usize,
#[serde(with = "tink_tests::hex_string")]
pub okm: Vec<u8>,
}
#[test]
fn | () {
for hash in &[HashType::Sha1, HashType::Sha256, HashType::Sha512] {
let hash_name = format!("{:?}", hash);
let filename = format!("testvectors/hkdf_{}_test.json", hash_name.to_lowercase());
println!("wycheproof file '{}' hash {}", filename, hash_name);
let bytes = tink_tests::wycheproof_data(&filename);
let data: HkdfTestData = serde_json::from_slice(&bytes).unwrap();
for g in &data.test_groups {
println!(" key info: key_size={}", g.key_size);
for tc in &g.tests {
println!(
" case {} [{}] {}",
tc.case.case_id, tc.case.result, tc.case.comment
);
assert_eq!(tc.ikm.len() * 8, g.key_size as usize);
let hkdf_prf = HkdfPrf::new(*hash, &tc.ikm, &tc.salt);
let valid = tc.case.result == tink_tests::WycheproofResult::Valid;
if valid && hkdf_prf.is_err() {
panic!(
"Could not create HKDF {:?} PRF for test case {} ({})",
hash, tc.case.case_id, tc.case.comment
);
}
if !valid && hkdf_prf.is_err() {
continue;
}
let res = match hkdf_prf.unwrap().compute_prf(&tc.info, tc.size) {
Err(_) => {
assert!(
!valid,
"Could not compute HKDF {:?} PRF for test case {} ({})",
hash, tc.case.case_id, tc.case.comment
);
continue;
}
Ok(r) => r,
};
if valid {
assert_eq!(
res, tc.okm,
"Computed HKDF {:?} PRF and expected for test case {} ({}) do not match",
hash, tc.case.case_id, tc.case.comment
);
} else {
assert_ne!(
res, tc.okm,
"Computed HKDF {:?} PRF and invalid expected for test case {} ({}) match",
hash, tc.case.case_id, tc.case.comment
);
}
}
}
}
}
#[test]
fn test_hkdf_prf_hash() {
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA256"
);
assert!(
HkdfPrf::new(
HashType::Sha512,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA512"
);
assert!(
HkdfPrf::new(
HashType::Sha1,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work with SHA1"
);
assert!(
HkdfPrf::new(
HashType::UnknownHash,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_err(),
"Expected HkdfPrf::new to fail with unknown hash"
);
}
#[test]
fn test_hkdf_prf_salt() {
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[]
)
.is_ok(),
"Expected HkdfPrf::new to work empty salt"
);
assert!(
HkdfPrf::new(
HashType::Sha256,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10
],
&[0xaf, 0xfe, 0xc0, 0xff, 0xee]
)
.is_ok(),
"Expected HkdfPrf::new to work with salt"
);
}
#[test]
fn test_hkdf_prf_output_length() {
let testdata = hashmap! {
HashType::Sha1 => 20,
HashType::Sha256 => 32,
HashType::Sha512 => 64,
};
for (hash, length) in testdata {
let prf = HkdfPrf::new(
hash,
&[
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c,
0x0d, 0x0e, 0x0f, 0x10,
],
&[],
)
.unwrap_or_else(|_| {
panic!(
"Expected HkdfPrf::new to work on 32 byte key with hash {:?}",
hash
)
});
// If overflow checks are enabled (which they are by default for tests),
// this loop runs too slow, so only test every 10th length.
let stride: usize = if cfg!(overflow_checks) { 1 } else { 10 };
for i in (0..=(length * 255)).step_by(stride) {
let output = prf.compute_prf(&[0x01, 0x02], i).unwrap_or_else(|e| {
panic!(
"Expected to be able to compute HKDF {:?} PRF with {} output length: {:?}",
hash, i, e
)
});
assert_eq!(
output.len(),
i,
"Expected HKDF {:?} PRF to compute {} bytes, got {}",
hash,
i,
output.len()
);
}
for i in (length * 255 + 1)..(length * 255 + 100) {
assert!(
prf.compute_prf(&[0x01, 0x02], i).is_err(),
"Expected to not be able to compute HKDF {:?} PRF with {} output length",
hash,
i
);
}
}
}
#[test]
fn test_validate_hkdf_prf_params() {
assert!(
validate_hkdf_prf_params(HashType::Sha256, 32, &[]).is_ok(),
"Unexpected error for valid HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha256, 32, &[0xaf, 0xfe, 0xc0, 0xff, 0xee]).is_ok(),
"Unexpected error for salted valid HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha256, 4, &[]).is_err(),
"Short key size not detected for HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::UnknownHash, 32, &[]).is_err(),
"Unknown hash function not detected for HKDF PRF params"
);
assert!(
validate_hkdf_prf_params(HashType::Sha1, 32, &[]).is_err(),
"Weak hash function not detected for HKDF PRF params"
);
}
| test_hkdf_prf_wycheproof_cases | identifier_name |
config.rs | //! Tendermint configuration file types (with serde parsers/serializers)
//!
//! This module contains types which correspond to the following config files:
//!
//! - `config.toml`: `config::TendermintConfig`
//! - `node_key.rs`: `config::node_key::NodeKey`
//! - `priv_validator_key.rs`: `config::priv_validator_key::PrivValidatorKey`
mod node_key;
mod priv_validator_key;
pub use self::{node_key::NodeKey, priv_validator_key::PrivValidatorKey};
use crate::{
abci::tag,
error::{Error, ErrorKind},
genesis::Genesis,
net, node, Moniker, Timeout,
};
use serde::{de, de::Error as _, ser, Deserialize, Serialize};
use std::{
collections::BTreeMap,
fmt, fs,
path::{Path, PathBuf},
str::FromStr,
};
/// Tendermint `config.toml` file
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct TendermintConfig {
/// TCP or UNIX socket address of the ABCI application,
/// or the name of an ABCI application compiled in with the Tendermint binary.
pub proxy_app: net::Address,
/// A custom human readable name for this node
pub moniker: Moniker,
/// If this node is many blocks behind the tip of the chain, FastSync
/// allows them to catchup quickly by downloading blocks in parallel
/// and verifying their commits
pub fast_sync: bool,
/// Database backend: `leveldb | memdb | cleveldb`
pub db_backend: DbBackend,
/// Database directory
pub db_dir: PathBuf,
/// Output level for logging, including package level options
pub log_level: LogLevel,
/// Output format: 'plain' (colored text) or 'json'
pub log_format: LogFormat,
/// Path to the JSON file containing the initial validator set and other meta data
pub genesis_file: PathBuf,
/// Path to the JSON file containing the private key to use as a validator in the consensus protocol
pub priv_validator_key_file: Option<PathBuf>,
/// Path to the JSON file containing the last sign state of a validator
pub priv_validator_state_file: PathBuf,
/// TCP or UNIX socket address for Tendermint to listen on for
/// connections from an external PrivValidator process
#[serde(deserialize_with = "deserialize_optional_value")]
pub priv_validator_laddr: Option<net::Address>,
/// Path to the JSON file containing the private key to use for node authentication in the p2p protocol
pub node_key_file: PathBuf,
/// Mechanism to connect to the ABCI application: socket | grpc
pub abci: AbciMode,
/// TCP or UNIX socket address for the profiling server to listen on
#[serde(deserialize_with = "deserialize_optional_value")]
pub prof_laddr: Option<net::Address>,
/// If `true`, query the ABCI app on connecting to a new peer
/// so the app can decide if we should keep the connection or not
pub filter_peers: bool,
/// rpc server configuration options
pub rpc: RpcConfig,
/// peer to peer configuration options
pub p2p: P2PConfig,
/// mempool configuration options
pub mempool: MempoolConfig,
/// consensus configuration options
pub consensus: ConsensusConfig,
/// transactions indexer configuration options
pub tx_index: TxIndexConfig,
/// instrumentation configuration options
pub instrumentation: InstrumentationConfig,
}
impl TendermintConfig {
/// Parse Tendermint `config.toml`
pub fn parse_toml<T: AsRef<str>>(toml_string: T) -> Result<Self, Error> {
Ok(toml::from_str(toml_string.as_ref())?)
}
/// Load `config.toml` from a file
pub fn load_toml_file<P>(path: &P) -> Result<Self, Error>
where
P: AsRef<Path>,
{
let toml_string = fs::read_to_string(path).map_err(|e| {
err!(
ErrorKind::Parse,
"couldn't open {}: {}",
path.as_ref().display(),
e
)
})?;
Self::parse_toml(toml_string)
}
/// Load `genesis.json` file from the configured location
pub fn load_genesis_file(&self, home: impl AsRef<Path>) -> Result<Genesis, Error> {
let path = home.as_ref().join(&self.genesis_file);
let genesis_json = fs::read_to_string(&path)
.map_err(|e| err!(ErrorKind::Parse, "couldn't open {}: {}", path.display(), e))?;
Ok(serde_json::from_str(genesis_json.as_ref())?)
}
/// Load `node_key.json` file from the configured location
pub fn load_node_key(&self, home: impl AsRef<Path>) -> Result<NodeKey, Error> {
let path = home.as_ref().join(&self.node_key_file);
NodeKey::load_json_file(&path)
}
}
/// Database backend
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum DbBackend {
/// LevelDB backend
#[serde(rename = "leveldb")]
LevelDb,
/// MemDB backend
#[serde(rename = "memdb")]
MemDb,
/// CLevelDB backend
#[serde(rename = "cleveldb")]
CLevelDb,
}
/// Loglevel configuration
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct LogLevel(BTreeMap<String, String>);
impl LogLevel {
/// Get the setting for the given key
pub fn get<S>(&self, key: S) -> Option<&str>
where
S: AsRef<str>,
{
self.0.get(key.as_ref()).map(AsRef::as_ref)
}
/// Iterate over the levels
pub fn iter(&self) -> LogLevelIter<'_> {
self.0.iter()
}
}
/// Iterator over log levels
pub type LogLevelIter<'a> = std::collections::btree_map::Iter<'a, String, String>;
impl FromStr for LogLevel {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut levels = BTreeMap::new();
for level in s.split(',') {
let parts = level.split(':').collect::<Vec<_>>();
if parts.len() != 2 {
return Err(err!(ErrorKind::Parse, "error parsing log level: {}", level));
}
let key = parts[0].to_owned();
let value = parts[1].to_owned();
if levels.insert(key, value).is_some() {
return Err(err!(
ErrorKind::Parse,
"duplicate log level setting for: {}",
level
));
}
}
Ok(LogLevel(levels))
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (i, (k, v)) in self.0.iter().enumerate() {
write!(f, "{}:{}", k, v)?;
if i < self.0.len() - 1 {
write!(f, ",")?;
}
}
Ok(())
}
}
impl<'de> Deserialize<'de> for LogLevel {
fn deserialize<D: de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let levels = String::deserialize(deserializer)?;
Ok(Self::from_str(&levels).map_err(|e| D::Error::custom(format!("{}", e)))?)
}
}
impl Serialize for LogLevel {
fn serialize<S: ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.to_string().serialize(serializer)
}
}
/// Logging format
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum LogFormat {
/// Plain (colored text)
#[serde(rename = "plain")]
Plain,
/// JSON
#[serde(rename = "json")]
Json,
}
/// Mechanism to connect to the ABCI application: socket | grpc
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum AbciMode {
/// Socket
#[serde(rename = "socket")]
Socket,
/// GRPC
#[serde(rename = "grpc")]
Grpc,
}
/// Tendermint `config.toml` file's `[rpc]` section
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct RpcConfig {
/// TCP or UNIX socket address for the RPC server to listen on
pub laddr: net::Address,
/// A list of origins a cross-domain request can be executed from
/// Default value `[]` disables cors support
/// Use `["*"]` to allow any origin
pub cors_allowed_origins: Vec<CorsOrigin>,
/// A list of methods the client is allowed to use with cross-domain requests
pub cors_allowed_methods: Vec<CorsMethod>,
/// A list of non simple headers the client is allowed to use with cross-domain requests
pub cors_allowed_headers: Vec<CorsHeader>,
/// TCP or UNIX socket address for the gRPC server to listen on
/// NOTE: This server only supports `/broadcast_tx_commit`
#[serde(deserialize_with = "deserialize_optional_value")]
pub grpc_laddr: Option<net::Address>,
/// Maximum number of simultaneous GRPC connections.
/// Does not include RPC (HTTP&WebSocket) connections. See `max_open_connections`.
pub grpc_max_open_connections: u64,
/// Activate unsafe RPC commands like `/dial_seeds` and `/unsafe_flush_mempool`
#[serde(rename = "unsafe")]
pub unsafe_commands: bool,
/// Maximum number of simultaneous connections (including WebSocket).
/// Does not include gRPC connections. See `grpc_max_open_connections`.
pub max_open_connections: u64,
/// Maximum number of unique clientIDs that can `/subscribe`.
pub max_subscription_clients: u64,
/// Maximum number of unique queries a given client can `/subscribe` to.
pub max_subscriptions_per_client: u64,
/// How long to wait for a tx to be committed during `/broadcast_tx_commit`.
pub timeout_broadcast_tx_commit: Timeout,
/// The name of a file containing certificate that is used to create the HTTPS server.
#[serde(deserialize_with = "deserialize_optional_value")]
pub tls_cert_file: Option<PathBuf>,
/// The name of a file containing matching private key that is used to create the HTTPS server.
#[serde(deserialize_with = "deserialize_optional_value")]
pub tls_key_file: Option<PathBuf>,
}
/// Origin hosts allowed with CORS requests to the RPC API
// TODO(tarcieri): parse and validate this string
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct CorsOrigin(String);
impl AsRef<str> for CorsOrigin {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for CorsOrigin {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
/// HTTP methods allowed with CORS requests to the RPC API
// TODO(tarcieri): parse and validate this string
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct | (String);
impl AsRef<str> for CorsMethod {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for CorsMethod {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
/// HTTP headers allowed to be sent via CORS to the RPC API
// TODO(tarcieri): parse and validate this string
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct CorsHeader(String);
impl AsRef<str> for CorsHeader {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for CorsHeader {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
/// peer to peer configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct P2PConfig {
/// Address to listen for incoming connections
pub laddr: net::Address,
/// Address to advertise to peers for them to dial
/// If empty, will use the same port as the laddr,
/// and will introspect on the listener or use UPnP
/// to figure out the address.
#[serde(deserialize_with = "deserialize_optional_value")]
pub external_address: Option<net::Address>,
/// Comma separated list of seed nodes to connect to
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub seeds: Vec<net::Address>,
/// Comma separated list of nodes to keep persistent connections to
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub persistent_peers: Vec<net::Address>,
/// UPNP port forwarding
pub upnp: bool,
/// Path to address book
pub addr_book_file: PathBuf,
/// Set `true` for strict address routability rules
/// Set `false` for private or local networks
pub addr_book_strict: bool,
/// Maximum number of inbound peers
pub max_num_inbound_peers: u64,
/// Maximum number of outbound peers to connect to, excluding persistent peers
pub max_num_outbound_peers: u64,
/// Time to wait before flushing messages out on the connection
pub flush_throttle_timeout: Timeout,
/// Maximum size of a message packet payload, in bytes
pub max_packet_msg_payload_size: u64,
/// Rate at which packets can be sent, in bytes/second
pub send_rate: TransferRate,
/// Rate at which packets can be received, in bytes/second
pub recv_rate: TransferRate,
/// Set `true` to enable the peer-exchange reactor
pub pex: bool,
/// Seed mode, in which node constantly crawls the network and looks for
/// peers. If another node asks it for addresses, it responds and disconnects.
///
/// Does not work if the peer-exchange reactor is disabled.
pub seed_mode: bool,
/// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub private_peer_ids: Vec<node::Id>,
/// Toggle to disable guard against peers connecting from the same ip.
pub allow_duplicate_ip: bool,
/// Handshake timeout
pub handshake_timeout: Timeout,
/// Timeout when dialing other peers
pub dial_timeout: Timeout,
}
/// mempool configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct MempoolConfig {
/// Recheck enabled
pub recheck: bool,
/// Broadcast enabled
pub broadcast: bool,
/// WAL dir
#[serde(deserialize_with = "deserialize_optional_value")]
pub wal_dir: Option<PathBuf>,
/// Maximum number of transactions in the mempool
pub size: u64,
/// Limit the total size of all txs in the mempool.
/// This only accounts for raw transactions (e.g. given 1MB transactions and
/// `max_txs_bytes`=5MB, mempool will only accept 5 transactions).
pub max_txs_bytes: u64,
/// Size of the cache (used to filter transactions we saw earlier) in transactions
pub cache_size: u64,
}
/// consensus configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ConsensusConfig {
/// Path to WAL file
pub wal_file: PathBuf,
/// Propose timeout
pub timeout_propose: Timeout,
/// Propose timeout delta
pub timeout_propose_delta: Timeout,
/// Prevote timeout
pub timeout_prevote: Timeout,
/// Prevote timeout delta
pub timeout_prevote_delta: Timeout,
/// Precommit timeout
pub timeout_precommit: Timeout,
/// Precommit timeout delta
pub timeout_precommit_delta: Timeout,
/// Commit timeout
pub timeout_commit: Timeout,
/// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
pub skip_timeout_commit: bool,
/// EmptyBlocks mode
pub create_empty_blocks: bool,
/// Interval between empty blocks
pub create_empty_blocks_interval: Timeout,
/// Reactor sleep duration
pub peer_gossip_sleep_duration: Timeout,
/// Reactor query sleep duration
pub peer_query_maj23_sleep_duration: Timeout,
}
/// transactions indexer configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct TxIndexConfig {
/// What indexer to use for transactions
#[serde(default)]
pub indexer: TxIndexer,
/// Comma-separated list of tags to index (by default the only tag is `tx.hash`)
// TODO(tarcieri): switch to `tendermint::abci::Tag`
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub index_tags: Vec<tag::Key>,
/// When set to true, tells indexer to index all tags (predefined tags:
/// `tx.hash`, `tx.height` and all tags from DeliverTx responses).
pub index_all_tags: bool,
}
/// What indexer to use for transactions
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum TxIndexer {
/// "null"
// TODO(tarcieri): use an `Option` type here?
#[serde(rename = "null")]
Null,
/// "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
#[serde(rename = "kv")]
Kv,
}
impl Default for TxIndexer {
fn default() -> TxIndexer {
TxIndexer::Kv
}
}
/// instrumentation configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct InstrumentationConfig {
/// When `true`, Prometheus metrics are served under /metrics on
/// PrometheusListenAddr.
pub prometheus: bool,
/// Address to listen for Prometheus collector(s) connections
// TODO(tarcieri): parse to `tendermint::net::Addr`
pub prometheus_listen_addr: String,
/// Maximum number of simultaneous connections.
pub max_open_connections: u64,
/// Instrumentation namespace
pub namespace: String,
}
/// Rate at which bytes can be sent/received
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub struct TransferRate(u64);
impl TransferRate {
/// Get the trasfer rate in bytes per second
pub fn bytes_per_sec(self) -> u64 {
self.0
}
}
/// Deserialize `Option<T: FromStr>` where an empty string indicates `None`
fn deserialize_optional_value<'de, D, T, E>(deserializer: D) -> Result<Option<T>, D::Error>
where
D: de::Deserializer<'de>,
T: FromStr<Err = E>,
E: fmt::Display,
{
let string = String::deserialize(deserializer)?;
if string.is_empty() {
return Ok(None);
}
string
.parse()
.map(Some)
.map_err(|e| D::Error::custom(format!("{}", e)))
}
/// Deserialize a comma separated list of types that impl `FromStr` as a `Vec`
fn deserialize_comma_separated_list<'de, D, T, E>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: de::Deserializer<'de>,
T: FromStr<Err = E>,
E: fmt::Display,
{
let mut result = vec![];
let string = String::deserialize(deserializer)?;
if string.is_empty() {
return Ok(result);
}
for item in string.split(',') {
result.push(
item.parse()
.map_err(|e| D::Error::custom(format!("{}", e)))?,
);
}
Ok(result)
}
/// Serialize a comma separated list types that impl `ToString`
fn serialize_comma_separated_list<S, T>(list: &[T], serializer: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
T: ToString,
{
let str_list = list.iter().map(|addr| addr.to_string()).collect::<Vec<_>>();
str_list.join(",").serialize(serializer)
}
| CorsMethod | identifier_name |
config.rs | //! Tendermint configuration file types (with serde parsers/serializers)
//!
//! This module contains types which correspond to the following config files:
//!
//! - `config.toml`: `config::TendermintConfig`
//! - `node_key.rs`: `config::node_key::NodeKey`
//! - `priv_validator_key.rs`: `config::priv_validator_key::PrivValidatorKey`
mod node_key;
mod priv_validator_key;
pub use self::{node_key::NodeKey, priv_validator_key::PrivValidatorKey};
use crate::{
abci::tag,
error::{Error, ErrorKind},
genesis::Genesis,
net, node, Moniker, Timeout,
};
use serde::{de, de::Error as _, ser, Deserialize, Serialize};
use std::{
collections::BTreeMap,
fmt, fs,
path::{Path, PathBuf},
str::FromStr,
};
/// Tendermint `config.toml` file
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct TendermintConfig {
/// TCP or UNIX socket address of the ABCI application,
/// or the name of an ABCI application compiled in with the Tendermint binary.
pub proxy_app: net::Address,
/// A custom human readable name for this node
pub moniker: Moniker,
/// If this node is many blocks behind the tip of the chain, FastSync
/// allows them to catchup quickly by downloading blocks in parallel
/// and verifying their commits
pub fast_sync: bool,
/// Database backend: `leveldb | memdb | cleveldb`
pub db_backend: DbBackend,
/// Database directory
pub db_dir: PathBuf,
/// Output level for logging, including package level options
pub log_level: LogLevel,
/// Output format: 'plain' (colored text) or 'json'
pub log_format: LogFormat,
/// Path to the JSON file containing the initial validator set and other meta data
pub genesis_file: PathBuf,
/// Path to the JSON file containing the private key to use as a validator in the consensus protocol
pub priv_validator_key_file: Option<PathBuf>,
/// Path to the JSON file containing the last sign state of a validator
pub priv_validator_state_file: PathBuf,
/// TCP or UNIX socket address for Tendermint to listen on for
/// connections from an external PrivValidator process
#[serde(deserialize_with = "deserialize_optional_value")]
pub priv_validator_laddr: Option<net::Address>,
/// Path to the JSON file containing the private key to use for node authentication in the p2p protocol
pub node_key_file: PathBuf,
/// Mechanism to connect to the ABCI application: socket | grpc
pub abci: AbciMode,
/// TCP or UNIX socket address for the profiling server to listen on
#[serde(deserialize_with = "deserialize_optional_value")]
pub prof_laddr: Option<net::Address>,
/// If `true`, query the ABCI app on connecting to a new peer
/// so the app can decide if we should keep the connection or not
pub filter_peers: bool,
/// rpc server configuration options
pub rpc: RpcConfig,
/// peer to peer configuration options
pub p2p: P2PConfig,
/// mempool configuration options
pub mempool: MempoolConfig,
/// consensus configuration options
pub consensus: ConsensusConfig,
/// transactions indexer configuration options
pub tx_index: TxIndexConfig,
/// instrumentation configuration options
pub instrumentation: InstrumentationConfig,
}
impl TendermintConfig {
/// Parse Tendermint `config.toml`
pub fn parse_toml<T: AsRef<str>>(toml_string: T) -> Result<Self, Error> {
Ok(toml::from_str(toml_string.as_ref())?)
}
/// Load `config.toml` from a file
pub fn load_toml_file<P>(path: &P) -> Result<Self, Error>
where
P: AsRef<Path>,
{
let toml_string = fs::read_to_string(path).map_err(|e| {
err!(
ErrorKind::Parse,
"couldn't open {}: {}",
path.as_ref().display(),
e
)
})?;
Self::parse_toml(toml_string)
}
/// Load `genesis.json` file from the configured location
pub fn load_genesis_file(&self, home: impl AsRef<Path>) -> Result<Genesis, Error> {
let path = home.as_ref().join(&self.genesis_file);
let genesis_json = fs::read_to_string(&path)
.map_err(|e| err!(ErrorKind::Parse, "couldn't open {}: {}", path.display(), e))?;
Ok(serde_json::from_str(genesis_json.as_ref())?)
}
/// Load `node_key.json` file from the configured location
pub fn load_node_key(&self, home: impl AsRef<Path>) -> Result<NodeKey, Error> {
let path = home.as_ref().join(&self.node_key_file);
NodeKey::load_json_file(&path)
}
}
/// Database backend
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum DbBackend {
/// LevelDB backend
#[serde(rename = "leveldb")]
LevelDb,
/// MemDB backend
#[serde(rename = "memdb")]
MemDb,
/// CLevelDB backend
#[serde(rename = "cleveldb")]
CLevelDb,
}
/// Loglevel configuration
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct LogLevel(BTreeMap<String, String>);
impl LogLevel {
/// Get the setting for the given key
pub fn get<S>(&self, key: S) -> Option<&str>
where
S: AsRef<str>,
{
self.0.get(key.as_ref()).map(AsRef::as_ref)
}
/// Iterate over the levels
pub fn iter(&self) -> LogLevelIter<'_> {
self.0.iter()
}
}
/// Iterator over log levels
pub type LogLevelIter<'a> = std::collections::btree_map::Iter<'a, String, String>;
impl FromStr for LogLevel {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut levels = BTreeMap::new();
for level in s.split(',') {
let parts = level.split(':').collect::<Vec<_>>();
if parts.len() != 2 |
let key = parts[0].to_owned();
let value = parts[1].to_owned();
if levels.insert(key, value).is_some() {
return Err(err!(
ErrorKind::Parse,
"duplicate log level setting for: {}",
level
));
}
}
Ok(LogLevel(levels))
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (i, (k, v)) in self.0.iter().enumerate() {
write!(f, "{}:{}", k, v)?;
if i < self.0.len() - 1 {
write!(f, ",")?;
}
}
Ok(())
}
}
impl<'de> Deserialize<'de> for LogLevel {
fn deserialize<D: de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let levels = String::deserialize(deserializer)?;
Ok(Self::from_str(&levels).map_err(|e| D::Error::custom(format!("{}", e)))?)
}
}
impl Serialize for LogLevel {
fn serialize<S: ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.to_string().serialize(serializer)
}
}
/// Logging format
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum LogFormat {
/// Plain (colored text)
#[serde(rename = "plain")]
Plain,
/// JSON
#[serde(rename = "json")]
Json,
}
/// Mechanism to connect to the ABCI application: socket | grpc
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum AbciMode {
/// Socket
#[serde(rename = "socket")]
Socket,
/// GRPC
#[serde(rename = "grpc")]
Grpc,
}
/// Tendermint `config.toml` file's `[rpc]` section
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct RpcConfig {
/// TCP or UNIX socket address for the RPC server to listen on
pub laddr: net::Address,
/// A list of origins a cross-domain request can be executed from
/// Default value `[]` disables cors support
/// Use `["*"]` to allow any origin
pub cors_allowed_origins: Vec<CorsOrigin>,
/// A list of methods the client is allowed to use with cross-domain requests
pub cors_allowed_methods: Vec<CorsMethod>,
/// A list of non simple headers the client is allowed to use with cross-domain requests
pub cors_allowed_headers: Vec<CorsHeader>,
/// TCP or UNIX socket address for the gRPC server to listen on
/// NOTE: This server only supports `/broadcast_tx_commit`
#[serde(deserialize_with = "deserialize_optional_value")]
pub grpc_laddr: Option<net::Address>,
/// Maximum number of simultaneous GRPC connections.
/// Does not include RPC (HTTP&WebSocket) connections. See `max_open_connections`.
pub grpc_max_open_connections: u64,
/// Activate unsafe RPC commands like `/dial_seeds` and `/unsafe_flush_mempool`
#[serde(rename = "unsafe")]
pub unsafe_commands: bool,
/// Maximum number of simultaneous connections (including WebSocket).
/// Does not include gRPC connections. See `grpc_max_open_connections`.
pub max_open_connections: u64,
/// Maximum number of unique clientIDs that can `/subscribe`.
pub max_subscription_clients: u64,
/// Maximum number of unique queries a given client can `/subscribe` to.
pub max_subscriptions_per_client: u64,
/// How long to wait for a tx to be committed during `/broadcast_tx_commit`.
pub timeout_broadcast_tx_commit: Timeout,
/// The name of a file containing certificate that is used to create the HTTPS server.
#[serde(deserialize_with = "deserialize_optional_value")]
pub tls_cert_file: Option<PathBuf>,
/// The name of a file containing matching private key that is used to create the HTTPS server.
#[serde(deserialize_with = "deserialize_optional_value")]
pub tls_key_file: Option<PathBuf>,
}
/// Origin hosts allowed with CORS requests to the RPC API
// TODO(tarcieri): parse and validate this string
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct CorsOrigin(String);
impl AsRef<str> for CorsOrigin {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for CorsOrigin {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
/// HTTP methods allowed with CORS requests to the RPC API
// TODO(tarcieri): parse and validate this string
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct CorsMethod(String);
impl AsRef<str> for CorsMethod {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for CorsMethod {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
/// HTTP headers allowed to be sent via CORS to the RPC API
// TODO(tarcieri): parse and validate this string
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct CorsHeader(String);
impl AsRef<str> for CorsHeader {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for CorsHeader {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
/// peer to peer configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct P2PConfig {
/// Address to listen for incoming connections
pub laddr: net::Address,
/// Address to advertise to peers for them to dial
/// If empty, will use the same port as the laddr,
/// and will introspect on the listener or use UPnP
/// to figure out the address.
#[serde(deserialize_with = "deserialize_optional_value")]
pub external_address: Option<net::Address>,
/// Comma separated list of seed nodes to connect to
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub seeds: Vec<net::Address>,
/// Comma separated list of nodes to keep persistent connections to
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub persistent_peers: Vec<net::Address>,
/// UPNP port forwarding
pub upnp: bool,
/// Path to address book
pub addr_book_file: PathBuf,
/// Set `true` for strict address routability rules
/// Set `false` for private or local networks
pub addr_book_strict: bool,
/// Maximum number of inbound peers
pub max_num_inbound_peers: u64,
/// Maximum number of outbound peers to connect to, excluding persistent peers
pub max_num_outbound_peers: u64,
/// Time to wait before flushing messages out on the connection
pub flush_throttle_timeout: Timeout,
/// Maximum size of a message packet payload, in bytes
pub max_packet_msg_payload_size: u64,
/// Rate at which packets can be sent, in bytes/second
pub send_rate: TransferRate,
/// Rate at which packets can be received, in bytes/second
pub recv_rate: TransferRate,
/// Set `true` to enable the peer-exchange reactor
pub pex: bool,
/// Seed mode, in which node constantly crawls the network and looks for
/// peers. If another node asks it for addresses, it responds and disconnects.
///
/// Does not work if the peer-exchange reactor is disabled.
pub seed_mode: bool,
/// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub private_peer_ids: Vec<node::Id>,
/// Toggle to disable guard against peers connecting from the same ip.
pub allow_duplicate_ip: bool,
/// Handshake timeout
pub handshake_timeout: Timeout,
/// Timeout when dialing other peers
pub dial_timeout: Timeout,
}
/// mempool configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct MempoolConfig {
/// Recheck enabled
pub recheck: bool,
/// Broadcast enabled
pub broadcast: bool,
/// WAL dir
#[serde(deserialize_with = "deserialize_optional_value")]
pub wal_dir: Option<PathBuf>,
/// Maximum number of transactions in the mempool
pub size: u64,
/// Limit the total size of all txs in the mempool.
/// This only accounts for raw transactions (e.g. given 1MB transactions and
/// `max_txs_bytes`=5MB, mempool will only accept 5 transactions).
pub max_txs_bytes: u64,
/// Size of the cache (used to filter transactions we saw earlier) in transactions
pub cache_size: u64,
}
/// consensus configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ConsensusConfig {
/// Path to WAL file
pub wal_file: PathBuf,
/// Propose timeout
pub timeout_propose: Timeout,
/// Propose timeout delta
pub timeout_propose_delta: Timeout,
/// Prevote timeout
pub timeout_prevote: Timeout,
/// Prevote timeout delta
pub timeout_prevote_delta: Timeout,
/// Precommit timeout
pub timeout_precommit: Timeout,
/// Precommit timeout delta
pub timeout_precommit_delta: Timeout,
/// Commit timeout
pub timeout_commit: Timeout,
/// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
pub skip_timeout_commit: bool,
/// EmptyBlocks mode
pub create_empty_blocks: bool,
/// Interval between empty blocks
pub create_empty_blocks_interval: Timeout,
/// Reactor sleep duration
pub peer_gossip_sleep_duration: Timeout,
/// Reactor query sleep duration
pub peer_query_maj23_sleep_duration: Timeout,
}
/// transactions indexer configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct TxIndexConfig {
/// What indexer to use for transactions
#[serde(default)]
pub indexer: TxIndexer,
/// Comma-separated list of tags to index (by default the only tag is `tx.hash`)
// TODO(tarcieri): switch to `tendermint::abci::Tag`
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub index_tags: Vec<tag::Key>,
/// When set to true, tells indexer to index all tags (predefined tags:
/// `tx.hash`, `tx.height` and all tags from DeliverTx responses).
pub index_all_tags: bool,
}
/// What indexer to use for transactions
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum TxIndexer {
/// "null"
// TODO(tarcieri): use an `Option` type here?
#[serde(rename = "null")]
Null,
/// "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
#[serde(rename = "kv")]
Kv,
}
impl Default for TxIndexer {
fn default() -> TxIndexer {
TxIndexer::Kv
}
}
/// instrumentation configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct InstrumentationConfig {
/// When `true`, Prometheus metrics are served under /metrics on
/// PrometheusListenAddr.
pub prometheus: bool,
/// Address to listen for Prometheus collector(s) connections
// TODO(tarcieri): parse to `tendermint::net::Addr`
pub prometheus_listen_addr: String,
/// Maximum number of simultaneous connections.
pub max_open_connections: u64,
/// Instrumentation namespace
pub namespace: String,
}
/// Rate at which bytes can be sent/received
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub struct TransferRate(u64);
impl TransferRate {
/// Get the trasfer rate in bytes per second
pub fn bytes_per_sec(self) -> u64 {
self.0
}
}
/// Deserialize `Option<T: FromStr>` where an empty string indicates `None`
fn deserialize_optional_value<'de, D, T, E>(deserializer: D) -> Result<Option<T>, D::Error>
where
D: de::Deserializer<'de>,
T: FromStr<Err = E>,
E: fmt::Display,
{
let string = String::deserialize(deserializer)?;
if string.is_empty() {
return Ok(None);
}
string
.parse()
.map(Some)
.map_err(|e| D::Error::custom(format!("{}", e)))
}
/// Deserialize a comma separated list of types that impl `FromStr` as a `Vec`
fn deserialize_comma_separated_list<'de, D, T, E>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: de::Deserializer<'de>,
T: FromStr<Err = E>,
E: fmt::Display,
{
let mut result = vec![];
let string = String::deserialize(deserializer)?;
if string.is_empty() {
return Ok(result);
}
for item in string.split(',') {
result.push(
item.parse()
.map_err(|e| D::Error::custom(format!("{}", e)))?,
);
}
Ok(result)
}
/// Serialize a comma separated list types that impl `ToString`
fn serialize_comma_separated_list<S, T>(list: &[T], serializer: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
T: ToString,
{
let str_list = list.iter().map(|addr| addr.to_string()).collect::<Vec<_>>();
str_list.join(",").serialize(serializer)
}
| {
return Err(err!(ErrorKind::Parse, "error parsing log level: {}", level));
} | conditional_block |
config.rs | //! Tendermint configuration file types (with serde parsers/serializers)
//!
//! This module contains types which correspond to the following config files:
//!
//! - `config.toml`: `config::TendermintConfig`
//! - `node_key.rs`: `config::node_key::NodeKey`
//! - `priv_validator_key.rs`: `config::priv_validator_key::PrivValidatorKey`
mod node_key;
mod priv_validator_key;
pub use self::{node_key::NodeKey, priv_validator_key::PrivValidatorKey};
use crate::{
abci::tag,
error::{Error, ErrorKind},
genesis::Genesis,
net, node, Moniker, Timeout,
};
use serde::{de, de::Error as _, ser, Deserialize, Serialize};
use std::{
collections::BTreeMap,
fmt, fs,
path::{Path, PathBuf},
str::FromStr,
};
/// Tendermint `config.toml` file
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct TendermintConfig {
/// TCP or UNIX socket address of the ABCI application,
/// or the name of an ABCI application compiled in with the Tendermint binary.
pub proxy_app: net::Address,
/// A custom human readable name for this node
pub moniker: Moniker,
/// If this node is many blocks behind the tip of the chain, FastSync
/// allows them to catchup quickly by downloading blocks in parallel
/// and verifying their commits
pub fast_sync: bool,
/// Database backend: `leveldb | memdb | cleveldb`
pub db_backend: DbBackend,
/// Database directory
pub db_dir: PathBuf,
/// Output level for logging, including package level options
pub log_level: LogLevel,
/// Output format: 'plain' (colored text) or 'json'
pub log_format: LogFormat,
/// Path to the JSON file containing the initial validator set and other meta data
pub genesis_file: PathBuf,
/// Path to the JSON file containing the private key to use as a validator in the consensus protocol
pub priv_validator_key_file: Option<PathBuf>,
/// Path to the JSON file containing the last sign state of a validator
pub priv_validator_state_file: PathBuf,
/// TCP or UNIX socket address for Tendermint to listen on for
/// connections from an external PrivValidator process
#[serde(deserialize_with = "deserialize_optional_value")]
pub priv_validator_laddr: Option<net::Address>,
/// Path to the JSON file containing the private key to use for node authentication in the p2p protocol
pub node_key_file: PathBuf,
/// Mechanism to connect to the ABCI application: socket | grpc
pub abci: AbciMode,
/// TCP or UNIX socket address for the profiling server to listen on
#[serde(deserialize_with = "deserialize_optional_value")]
pub prof_laddr: Option<net::Address>,
/// If `true`, query the ABCI app on connecting to a new peer
/// so the app can decide if we should keep the connection or not
pub filter_peers: bool,
/// rpc server configuration options
pub rpc: RpcConfig,
/// peer to peer configuration options
pub p2p: P2PConfig,
/// mempool configuration options
pub mempool: MempoolConfig,
/// consensus configuration options
pub consensus: ConsensusConfig,
/// transactions indexer configuration options
pub tx_index: TxIndexConfig,
/// instrumentation configuration options
pub instrumentation: InstrumentationConfig,
}
impl TendermintConfig {
/// Parse Tendermint `config.toml`
pub fn parse_toml<T: AsRef<str>>(toml_string: T) -> Result<Self, Error> {
Ok(toml::from_str(toml_string.as_ref())?)
}
/// Load `config.toml` from a file
pub fn load_toml_file<P>(path: &P) -> Result<Self, Error>
where
P: AsRef<Path>,
{
let toml_string = fs::read_to_string(path).map_err(|e| {
err!(
ErrorKind::Parse,
"couldn't open {}: {}",
path.as_ref().display(),
e
)
})?;
Self::parse_toml(toml_string)
}
/// Load `genesis.json` file from the configured location
pub fn load_genesis_file(&self, home: impl AsRef<Path>) -> Result<Genesis, Error> {
let path = home.as_ref().join(&self.genesis_file);
let genesis_json = fs::read_to_string(&path)
.map_err(|e| err!(ErrorKind::Parse, "couldn't open {}: {}", path.display(), e))?;
Ok(serde_json::from_str(genesis_json.as_ref())?)
}
/// Load `node_key.json` file from the configured location
pub fn load_node_key(&self, home: impl AsRef<Path>) -> Result<NodeKey, Error> {
let path = home.as_ref().join(&self.node_key_file);
NodeKey::load_json_file(&path)
}
}
/// Database backend
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum DbBackend {
/// LevelDB backend
#[serde(rename = "leveldb")]
LevelDb,
/// MemDB backend
#[serde(rename = "memdb")]
MemDb,
/// CLevelDB backend
#[serde(rename = "cleveldb")]
CLevelDb,
}
/// Loglevel configuration
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct LogLevel(BTreeMap<String, String>);
impl LogLevel {
/// Get the setting for the given key
pub fn get<S>(&self, key: S) -> Option<&str>
where
S: AsRef<str>,
{
self.0.get(key.as_ref()).map(AsRef::as_ref)
}
/// Iterate over the levels
pub fn iter(&self) -> LogLevelIter<'_> {
self.0.iter()
}
}
/// Iterator over log levels
pub type LogLevelIter<'a> = std::collections::btree_map::Iter<'a, String, String>;
impl FromStr for LogLevel {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut levels = BTreeMap::new();
for level in s.split(',') {
let parts = level.split(':').collect::<Vec<_>>();
if parts.len() != 2 {
return Err(err!(ErrorKind::Parse, "error parsing log level: {}", level));
}
let key = parts[0].to_owned();
let value = parts[1].to_owned();
if levels.insert(key, value).is_some() {
return Err(err!(
ErrorKind::Parse,
"duplicate log level setting for: {}",
level
));
}
}
Ok(LogLevel(levels))
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (i, (k, v)) in self.0.iter().enumerate() {
write!(f, "{}:{}", k, v)?;
if i < self.0.len() - 1 {
write!(f, ",")?;
}
}
Ok(())
}
}
impl<'de> Deserialize<'de> for LogLevel {
fn deserialize<D: de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let levels = String::deserialize(deserializer)?;
Ok(Self::from_str(&levels).map_err(|e| D::Error::custom(format!("{}", e)))?)
}
}
impl Serialize for LogLevel {
fn serialize<S: ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.to_string().serialize(serializer)
}
}
/// Logging format
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum LogFormat {
/// Plain (colored text)
#[serde(rename = "plain")] | }
/// Mechanism to connect to the ABCI application: socket | grpc
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum AbciMode {
/// Socket
#[serde(rename = "socket")]
Socket,
/// GRPC
#[serde(rename = "grpc")]
Grpc,
}
/// Tendermint `config.toml` file's `[rpc]` section
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct RpcConfig {
/// TCP or UNIX socket address for the RPC server to listen on
pub laddr: net::Address,
/// A list of origins a cross-domain request can be executed from
/// Default value `[]` disables cors support
/// Use `["*"]` to allow any origin
pub cors_allowed_origins: Vec<CorsOrigin>,
/// A list of methods the client is allowed to use with cross-domain requests
pub cors_allowed_methods: Vec<CorsMethod>,
/// A list of non simple headers the client is allowed to use with cross-domain requests
pub cors_allowed_headers: Vec<CorsHeader>,
/// TCP or UNIX socket address for the gRPC server to listen on
/// NOTE: This server only supports `/broadcast_tx_commit`
#[serde(deserialize_with = "deserialize_optional_value")]
pub grpc_laddr: Option<net::Address>,
/// Maximum number of simultaneous GRPC connections.
/// Does not include RPC (HTTP&WebSocket) connections. See `max_open_connections`.
pub grpc_max_open_connections: u64,
/// Activate unsafe RPC commands like `/dial_seeds` and `/unsafe_flush_mempool`
#[serde(rename = "unsafe")]
pub unsafe_commands: bool,
/// Maximum number of simultaneous connections (including WebSocket).
/// Does not include gRPC connections. See `grpc_max_open_connections`.
pub max_open_connections: u64,
/// Maximum number of unique clientIDs that can `/subscribe`.
pub max_subscription_clients: u64,
/// Maximum number of unique queries a given client can `/subscribe` to.
pub max_subscriptions_per_client: u64,
/// How long to wait for a tx to be committed during `/broadcast_tx_commit`.
pub timeout_broadcast_tx_commit: Timeout,
/// The name of a file containing certificate that is used to create the HTTPS server.
#[serde(deserialize_with = "deserialize_optional_value")]
pub tls_cert_file: Option<PathBuf>,
/// The name of a file containing matching private key that is used to create the HTTPS server.
#[serde(deserialize_with = "deserialize_optional_value")]
pub tls_key_file: Option<PathBuf>,
}
/// Origin hosts allowed with CORS requests to the RPC API
// TODO(tarcieri): parse and validate this string
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct CorsOrigin(String);
impl AsRef<str> for CorsOrigin {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for CorsOrigin {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
/// HTTP methods allowed with CORS requests to the RPC API
// TODO(tarcieri): parse and validate this string
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct CorsMethod(String);
impl AsRef<str> for CorsMethod {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for CorsMethod {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
/// HTTP headers allowed to be sent via CORS to the RPC API
// TODO(tarcieri): parse and validate this string
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct CorsHeader(String);
impl AsRef<str> for CorsHeader {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for CorsHeader {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
/// peer to peer configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct P2PConfig {
/// Address to listen for incoming connections
pub laddr: net::Address,
/// Address to advertise to peers for them to dial
/// If empty, will use the same port as the laddr,
/// and will introspect on the listener or use UPnP
/// to figure out the address.
#[serde(deserialize_with = "deserialize_optional_value")]
pub external_address: Option<net::Address>,
/// Comma separated list of seed nodes to connect to
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub seeds: Vec<net::Address>,
/// Comma separated list of nodes to keep persistent connections to
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub persistent_peers: Vec<net::Address>,
/// UPNP port forwarding
pub upnp: bool,
/// Path to address book
pub addr_book_file: PathBuf,
/// Set `true` for strict address routability rules
/// Set `false` for private or local networks
pub addr_book_strict: bool,
/// Maximum number of inbound peers
pub max_num_inbound_peers: u64,
/// Maximum number of outbound peers to connect to, excluding persistent peers
pub max_num_outbound_peers: u64,
/// Time to wait before flushing messages out on the connection
pub flush_throttle_timeout: Timeout,
/// Maximum size of a message packet payload, in bytes
pub max_packet_msg_payload_size: u64,
/// Rate at which packets can be sent, in bytes/second
pub send_rate: TransferRate,
/// Rate at which packets can be received, in bytes/second
pub recv_rate: TransferRate,
/// Set `true` to enable the peer-exchange reactor
pub pex: bool,
/// Seed mode, in which node constantly crawls the network and looks for
/// peers. If another node asks it for addresses, it responds and disconnects.
///
/// Does not work if the peer-exchange reactor is disabled.
pub seed_mode: bool,
/// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub private_peer_ids: Vec<node::Id>,
/// Toggle to disable guard against peers connecting from the same ip.
pub allow_duplicate_ip: bool,
/// Handshake timeout
pub handshake_timeout: Timeout,
/// Timeout when dialing other peers
pub dial_timeout: Timeout,
}
/// mempool configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct MempoolConfig {
/// Recheck enabled
pub recheck: bool,
/// Broadcast enabled
pub broadcast: bool,
/// WAL dir
#[serde(deserialize_with = "deserialize_optional_value")]
pub wal_dir: Option<PathBuf>,
/// Maximum number of transactions in the mempool
pub size: u64,
/// Limit the total size of all txs in the mempool.
/// This only accounts for raw transactions (e.g. given 1MB transactions and
/// `max_txs_bytes`=5MB, mempool will only accept 5 transactions).
pub max_txs_bytes: u64,
/// Size of the cache (used to filter transactions we saw earlier) in transactions
pub cache_size: u64,
}
/// consensus configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ConsensusConfig {
/// Path to WAL file
pub wal_file: PathBuf,
/// Propose timeout
pub timeout_propose: Timeout,
/// Propose timeout delta
pub timeout_propose_delta: Timeout,
/// Prevote timeout
pub timeout_prevote: Timeout,
/// Prevote timeout delta
pub timeout_prevote_delta: Timeout,
/// Precommit timeout
pub timeout_precommit: Timeout,
/// Precommit timeout delta
pub timeout_precommit_delta: Timeout,
/// Commit timeout
pub timeout_commit: Timeout,
/// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
pub skip_timeout_commit: bool,
/// EmptyBlocks mode
pub create_empty_blocks: bool,
/// Interval between empty blocks
pub create_empty_blocks_interval: Timeout,
/// Reactor sleep duration
pub peer_gossip_sleep_duration: Timeout,
/// Reactor query sleep duration
pub peer_query_maj23_sleep_duration: Timeout,
}
/// transactions indexer configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct TxIndexConfig {
/// What indexer to use for transactions
#[serde(default)]
pub indexer: TxIndexer,
/// Comma-separated list of tags to index (by default the only tag is `tx.hash`)
// TODO(tarcieri): switch to `tendermint::abci::Tag`
#[serde(
serialize_with = "serialize_comma_separated_list",
deserialize_with = "deserialize_comma_separated_list"
)]
pub index_tags: Vec<tag::Key>,
/// When set to true, tells indexer to index all tags (predefined tags:
/// `tx.hash`, `tx.height` and all tags from DeliverTx responses).
pub index_all_tags: bool,
}
/// What indexer to use for transactions
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum TxIndexer {
/// "null"
// TODO(tarcieri): use an `Option` type here?
#[serde(rename = "null")]
Null,
/// "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
#[serde(rename = "kv")]
Kv,
}
impl Default for TxIndexer {
fn default() -> TxIndexer {
TxIndexer::Kv
}
}
/// instrumentation configuration options
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct InstrumentationConfig {
/// When `true`, Prometheus metrics are served under /metrics on
/// PrometheusListenAddr.
pub prometheus: bool,
/// Address to listen for Prometheus collector(s) connections
// TODO(tarcieri): parse to `tendermint::net::Addr`
pub prometheus_listen_addr: String,
/// Maximum number of simultaneous connections.
pub max_open_connections: u64,
/// Instrumentation namespace
pub namespace: String,
}
/// Rate at which bytes can be sent/received
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub struct TransferRate(u64);
impl TransferRate {
/// Get the trasfer rate in bytes per second
pub fn bytes_per_sec(self) -> u64 {
self.0
}
}
/// Deserialize `Option<T: FromStr>` where an empty string indicates `None`
fn deserialize_optional_value<'de, D, T, E>(deserializer: D) -> Result<Option<T>, D::Error>
where
D: de::Deserializer<'de>,
T: FromStr<Err = E>,
E: fmt::Display,
{
let string = String::deserialize(deserializer)?;
if string.is_empty() {
return Ok(None);
}
string
.parse()
.map(Some)
.map_err(|e| D::Error::custom(format!("{}", e)))
}
/// Deserialize a comma separated list of types that impl `FromStr` as a `Vec`
fn deserialize_comma_separated_list<'de, D, T, E>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: de::Deserializer<'de>,
T: FromStr<Err = E>,
E: fmt::Display,
{
let mut result = vec![];
let string = String::deserialize(deserializer)?;
if string.is_empty() {
return Ok(result);
}
for item in string.split(',') {
result.push(
item.parse()
.map_err(|e| D::Error::custom(format!("{}", e)))?,
);
}
Ok(result)
}
/// Serialize a comma separated list types that impl `ToString`
fn serialize_comma_separated_list<S, T>(list: &[T], serializer: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
T: ToString,
{
let str_list = list.iter().map(|addr| addr.to_string()).collect::<Vec<_>>();
str_list.join(",").serialize(serializer)
} | Plain,
/// JSON
#[serde(rename = "json")]
Json, | random_line_split |
wvc_data.py | import torch
import torch.utils.data as data
from PIL import Image
import logging, os
import numpy as np
import lmdb
import cv2
from torchvision import transforms
import random
import itertools
from webvision import config as wv_config
from torchvision.transforms import functional as trans_func
_logger = logging.getLogger(__name__)
class WebVision(data.Dataset):
def __init__(self, db_info, split='train', transform=None, jigsaw=False, frac=None, subset=None):
# Load data
self.split = split
self.img_ids = db_info[db_info.type == split].image_id.values.astype(np.str)
self.img_files = db_info[db_info.type == split].image_path.values.astype(np.str)
self.img_labels = db_info[db_info.type == split].label.values.astype(np.long)
self.transform = transform
self.jigsaw = jigsaw
assert len(self.img_ids) == len(self.img_files)
assert len(self.img_ids) == len(self.img_labels)
if subset is not None:
subset_idx = np.isin(self.img_ids, subset)
self.img_ids = self.img_ids[subset_idx]
self.img_files = self.img_files[subset_idx]
self.img_labels = self.img_labels[subset_idx]
assert np.unique(self.img_labels).size == 5000
_logger.info("Selecting subset of {} images".format(len(subset)))
# compute fraction of the dataset
if frac is not None:
sampled_idxs, frac = [], 0.01
for k in np.unique(self.img_labels):
idxs = np.where(self.img_labels == k)[0]
idxs = np.random.choice(idxs, int(np.ceil(len(idxs)*frac)), replace=False)
sampled_idxs.extend(idxs.tolist())
self.img_ids, self.img_files, self.img_labels = self.img_ids[sampled_idxs], self.img_files[sampled_idxs], self.img_labels[sampled_idxs]
# Adapt filenames to jpg
for i in range(self.img_files.size):
self.img_files[i] = os.path.splitext(self.img_files[i])[0] + ".jpg"
# Compute class frequency
if self.split != 'test':
self.class_freq = np.bincount(self.img_labels)
# assert self.class_freq.size == 5000
self.sample_weight = self.img_labels.size / (self.class_freq[self.img_labels] + 1e-6)
else:
self.class_freq = -1*np.ones(5000)
self.sample_weight = np.ones(self.img_labels.size, np.float)
_logger.info("Webvision {} dataset read with {} images".format(split, len(self.img_ids)))
def __getitem__(self, index):
img_id = self.img_ids[index]
label = self.img_labels[index]
img = Image.open(self.img_files[index])
if img.mode != 'RGB':
img = img.convert(mode='RGB')
if self.transform is not None:
img = self.transform(img)
if self.jigsaw:
assert img.dim() > 3
label = torch.randperm(img.size(0)).long()
img = img[label]
label = torch.eye(img.size(0), img.size(0))[label]
label = label.view(-1)
return img_id, img, label
def __len__(self):
return len(self.img_ids)
class LMDBDataset(data.Dataset):
def __init__(self, lmdb_path, image_transform=None, jigsaw=False):
# read parameters
self.lmdb_path = lmdb_path
self.transform = image_transform
self.jigsaw = jigsaw
# open and read general info of the dataset
self.lmdb_env = lmdb.open(self.lmdb_path, max_readers=1, readonly=True, lock=False, readahead=False,
meminit=False)
with self.lmdb_env.begin() as lmdb_txn:
self.num_samples = np.fromstring(lmdb_txn.get('num_samples'.encode()), dtype=np.int)[0]
self.class_feq = np.fromstring(lmdb_txn.get('class_feq'.encode()), dtype=np.float)
self.sample_weight = np.fromstring(lmdb_txn.get('sample_weight'.encode()), dtype=np.float)
_logger.info("Webvision dataset loaded from LMDB {} with {} images".format(lmdb_path, self.num_samples))
def __getitem__(self, index):
id_key, img_key, lbl_key = "id_{:09d}".format(index), "img_{:09d}".format(index), "lbl_{:09d}".format(index)
with self.lmdb_env.begin() as lmdb_txn:
id_b, img_b, lbl_b = lmdb_txn.get(id_key.encode()), lmdb_txn.get(img_key.encode()), lmdb_txn.get(
lbl_key.encode())
img_id = id_b.decode()
img = Image.fromarray(cv2.imdecode(np.fromstring(img_b, dtype=np.uint8), cv2.IMREAD_COLOR), mode='RGB')
label = np.fromstring(lbl_b, dtype=np.float).astype(np.long)[0]
if self.transform is not None:
img = self.transform(img)
if self.jigsaw:
assert img.dim() > 3
label = torch.randperm(img.size(0)).long()
img = img[label]
label = torch.eye(img.size(0), img.size(0))[label]
label = label.view(-1)
return img_id, img, label
def __len__(self):
|
class JigsawTransform:
def __init__(self, grid_size=3, patch_size=64):
self.grid_size = grid_size
self.patch_size = patch_size
def __call__(self, img):
w, h = img.size
crops = []
for c_i, c_j in itertools.product(range(self.grid_size), range(self.grid_size)):
# find patch coordinates
tile_h, tile_w = (h / self.grid_size), (w / self.grid_size)
pad_h, pad_w = (tile_h - self.patch_size) / 2.0, (tile_w - self.patch_size) / 2.0
l, u, r, b = c_j * tile_w + pad_w, c_i * tile_h + pad_h, (c_j + 1) * tile_w - pad_w, (
c_i + 1) * tile_h - pad_h
l, u, r, b = int(np.floor(l)), int(np.floor(u)), int(np.floor(r)), int(np.floor(b))
# jitter
l = l + random.randint(0, r - self.patch_size - l)
u = u + random.randint(0, b - self.patch_size - u)
r = l + self.patch_size
b = u + self.patch_size
# crop
crops.append(img.crop((l, u, r, b)))
return crops
def get_datasets(pre_train, is_lmdb, subset=None):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if pre_train:
image_transform = {'train': transforms.Compose([transforms.RandomCrop(224), JigsawTransform(3, 64),
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'val': transforms.Compose([transforms.CenterCrop(224),
JigsawTransform(3, 64), transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'test': transforms.Compose([transforms.CenterCrop(224),
JigsawTransform(3, 64), transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))])
}
else:
image_transform = {'train': transforms.Compose([transforms.RandomCrop(224), transforms.ToTensor(), normalize]),
'val': transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize]),
# 'val': transforms.Compose([DenseCropTransform(), transforms.Lambda(
# lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
# 'val': transforms.Compose([transforms.TenCrop(224), transforms.Lambda(
# lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'test': transforms.Compose([transforms.TenCrop(224), transforms.Lambda(
lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))])
}
if is_lmdb:
train_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/train/', jigsaw=pre_train, image_transform=image_transform['train'])
val_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/val/', jigsaw=pre_train, image_transform=image_transform['val'])
test_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/test/', jigsaw=pre_train, image_transform=image_transform['test'])
else:
db_info = wv_config.LoadInfo()
train_db = WebVision(db_info, 'train', jigsaw=pre_train, transform=image_transform['train'], subset=subset)
val_db = WebVision(db_info, 'val', jigsaw=pre_train, transform=image_transform['val'])
test_db = WebVision(db_info, 'test', jigsaw=pre_train, transform=image_transform['test'])
return train_db, val_db, test_db
class DenseCropTransform:
def __init__(self, scales=(256, 288, 320, 352), crop_size=224):
self.scales = scales
self.crop_size = crop_size
def __call__(self, img):
crops = []
for scale in self.scales:
if min(img.size) != scale:
r_img = trans_func.resize(img, scale)
else:
r_img = img.copy()
w, h = r_img.size
square_crops_coord = [(0, 0, scale, scale),
(int(round((h - scale) / 2.)), int(round((w - scale) / 2.)), scale, scale),
(h-scale, w-scale, scale, scale)]
for upper, left, height, width in square_crops_coord:
square = trans_func.crop(r_img, upper, left, height, width)
sq_ten_crops = trans_func.ten_crop(square, self.crop_size)
sq_crop = trans_func.resize(square, self.crop_size)
sq_crop_mirror = trans_func.hflip(sq_crop)
crops.extend((sq_crop, sq_crop_mirror) + sq_ten_crops)
return crops
# import wvc_utils
# num_models = 5
# frac_samples = 0.25
# output_dir = '/home/rfsc/Projects/webvision_challenge/outputs/esemble/'
#
# db_info = wv_config.LoadInfo()
# img_ids = db_info[db_info.type == 'train'].image_id.values.astype(np.str)
# img_labels = db_info[db_info.type == 'train'].label.values.astype(np.long)
# sampler_dict = {label: wvc_utils.CycleIterator(img_ids[np.equal(img_labels, label)].tolist(), shuffle=True)
# for label in np.unique(img_labels)}
#
# # compute subsets
# for num in range(num_models):
# subset = []
# for label, it in sampler_dict.items():
# print("Computing model {}, label {}".format(num+1, label))
# num_samples = int(np.ceil(frac_samples * len(it._items)))
# samples = [next(it) for _ in range(num_samples)]
# subset.extend(samples)
# subset = np.array(subset, dtype=np.str)
# print("Model {} has found {} samples".format(num+1, len(subset)))
# np.save(os.path.join(output_dir, 'subset_{}'.format(num+1)), subset)
# files = ['/data/home/rfsc/wvc/outputs/baseline/sub_file_val_10crops_NEW.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/1/sub_file_val_10crops_new_e1.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/2/sub_file_val_10crops_new_e2.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/3/sub_file_val_10crops_new_e3.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/sub_val_esemble_base10crop.txt']
#
#
# with open(files[0], 'r') as p1, open(files[1], 'r') as p2, open(files[2], 'r') as p3, open(files[3], 'r') as p4, open(files[4], 'w') as r :
# for lines in zip(p1, p2, p3, p4):
# lines = np.stack([np.array(line.split('\t'), dtype=np.str) for line in lines])
# id = [lines[0, 0]]
# vals = lines[:, 1:].astype(np.float).mean(axis=0)
# vals = np.argsort(vals)[-5:][::-1].astype(np.str).tolist()
# r.write("{}\n".format("\t".join(id + vals)))
# probs = np.zeros((294099, 5000), dtype=np.float)
# ids = np.zeros((294099, 1), dtype=np.str)
# for file in files:
# b = np.loadtxt(file, dtype=np.str)
# ids = b[:, 0]
# probs += b[:, 1:].astype(np.float)
# probs = probs * 0.25
#
# sc = np.argsort(probs, axis=1)[:, -5:][:, ::-1].astype(np.str)
# sc = np.concatenate([ids, sc], axis=1).tolist()
# with open('/data/home/rfsc/wvc/outputs/sub_val_esemble.txt', 'w') as f:
# for line in sc:
# f.write("{}\n".format("\t".join(line)))
| return self.num_samples | identifier_body |
wvc_data.py | import torch
import torch.utils.data as data
from PIL import Image
import logging, os
import numpy as np
import lmdb
import cv2
from torchvision import transforms
import random
import itertools
from webvision import config as wv_config
from torchvision.transforms import functional as trans_func
_logger = logging.getLogger(__name__)
class WebVision(data.Dataset):
def __init__(self, db_info, split='train', transform=None, jigsaw=False, frac=None, subset=None):
# Load data
self.split = split
self.img_ids = db_info[db_info.type == split].image_id.values.astype(np.str)
self.img_files = db_info[db_info.type == split].image_path.values.astype(np.str)
self.img_labels = db_info[db_info.type == split].label.values.astype(np.long)
self.transform = transform
self.jigsaw = jigsaw
assert len(self.img_ids) == len(self.img_files)
assert len(self.img_ids) == len(self.img_labels)
if subset is not None:
subset_idx = np.isin(self.img_ids, subset)
self.img_ids = self.img_ids[subset_idx]
self.img_files = self.img_files[subset_idx]
self.img_labels = self.img_labels[subset_idx]
assert np.unique(self.img_labels).size == 5000
_logger.info("Selecting subset of {} images".format(len(subset)))
# compute fraction of the dataset
if frac is not None:
sampled_idxs, frac = [], 0.01
for k in np.unique(self.img_labels):
idxs = np.where(self.img_labels == k)[0]
idxs = np.random.choice(idxs, int(np.ceil(len(idxs)*frac)), replace=False)
sampled_idxs.extend(idxs.tolist())
self.img_ids, self.img_files, self.img_labels = self.img_ids[sampled_idxs], self.img_files[sampled_idxs], self.img_labels[sampled_idxs]
# Adapt filenames to jpg
for i in range(self.img_files.size):
self.img_files[i] = os.path.splitext(self.img_files[i])[0] + ".jpg"
# Compute class frequency
if self.split != 'test':
self.class_freq = np.bincount(self.img_labels) | self.class_freq = -1*np.ones(5000)
self.sample_weight = np.ones(self.img_labels.size, np.float)
_logger.info("Webvision {} dataset read with {} images".format(split, len(self.img_ids)))
def __getitem__(self, index):
img_id = self.img_ids[index]
label = self.img_labels[index]
img = Image.open(self.img_files[index])
if img.mode != 'RGB':
img = img.convert(mode='RGB')
if self.transform is not None:
img = self.transform(img)
if self.jigsaw:
assert img.dim() > 3
label = torch.randperm(img.size(0)).long()
img = img[label]
label = torch.eye(img.size(0), img.size(0))[label]
label = label.view(-1)
return img_id, img, label
def __len__(self):
return len(self.img_ids)
class LMDBDataset(data.Dataset):
def __init__(self, lmdb_path, image_transform=None, jigsaw=False):
# read parameters
self.lmdb_path = lmdb_path
self.transform = image_transform
self.jigsaw = jigsaw
# open and read general info of the dataset
self.lmdb_env = lmdb.open(self.lmdb_path, max_readers=1, readonly=True, lock=False, readahead=False,
meminit=False)
with self.lmdb_env.begin() as lmdb_txn:
self.num_samples = np.fromstring(lmdb_txn.get('num_samples'.encode()), dtype=np.int)[0]
self.class_feq = np.fromstring(lmdb_txn.get('class_feq'.encode()), dtype=np.float)
self.sample_weight = np.fromstring(lmdb_txn.get('sample_weight'.encode()), dtype=np.float)
_logger.info("Webvision dataset loaded from LMDB {} with {} images".format(lmdb_path, self.num_samples))
def __getitem__(self, index):
id_key, img_key, lbl_key = "id_{:09d}".format(index), "img_{:09d}".format(index), "lbl_{:09d}".format(index)
with self.lmdb_env.begin() as lmdb_txn:
id_b, img_b, lbl_b = lmdb_txn.get(id_key.encode()), lmdb_txn.get(img_key.encode()), lmdb_txn.get(
lbl_key.encode())
img_id = id_b.decode()
img = Image.fromarray(cv2.imdecode(np.fromstring(img_b, dtype=np.uint8), cv2.IMREAD_COLOR), mode='RGB')
label = np.fromstring(lbl_b, dtype=np.float).astype(np.long)[0]
if self.transform is not None:
img = self.transform(img)
if self.jigsaw:
assert img.dim() > 3
label = torch.randperm(img.size(0)).long()
img = img[label]
label = torch.eye(img.size(0), img.size(0))[label]
label = label.view(-1)
return img_id, img, label
def __len__(self):
return self.num_samples
class JigsawTransform:
def __init__(self, grid_size=3, patch_size=64):
self.grid_size = grid_size
self.patch_size = patch_size
def __call__(self, img):
w, h = img.size
crops = []
for c_i, c_j in itertools.product(range(self.grid_size), range(self.grid_size)):
# find patch coordinates
tile_h, tile_w = (h / self.grid_size), (w / self.grid_size)
pad_h, pad_w = (tile_h - self.patch_size) / 2.0, (tile_w - self.patch_size) / 2.0
l, u, r, b = c_j * tile_w + pad_w, c_i * tile_h + pad_h, (c_j + 1) * tile_w - pad_w, (
c_i + 1) * tile_h - pad_h
l, u, r, b = int(np.floor(l)), int(np.floor(u)), int(np.floor(r)), int(np.floor(b))
# jitter
l = l + random.randint(0, r - self.patch_size - l)
u = u + random.randint(0, b - self.patch_size - u)
r = l + self.patch_size
b = u + self.patch_size
# crop
crops.append(img.crop((l, u, r, b)))
return crops
def get_datasets(pre_train, is_lmdb, subset=None):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if pre_train:
image_transform = {'train': transforms.Compose([transforms.RandomCrop(224), JigsawTransform(3, 64),
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'val': transforms.Compose([transforms.CenterCrop(224),
JigsawTransform(3, 64), transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'test': transforms.Compose([transforms.CenterCrop(224),
JigsawTransform(3, 64), transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))])
}
else:
image_transform = {'train': transforms.Compose([transforms.RandomCrop(224), transforms.ToTensor(), normalize]),
'val': transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize]),
# 'val': transforms.Compose([DenseCropTransform(), transforms.Lambda(
# lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
# 'val': transforms.Compose([transforms.TenCrop(224), transforms.Lambda(
# lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'test': transforms.Compose([transforms.TenCrop(224), transforms.Lambda(
lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))])
}
if is_lmdb:
train_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/train/', jigsaw=pre_train, image_transform=image_transform['train'])
val_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/val/', jigsaw=pre_train, image_transform=image_transform['val'])
test_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/test/', jigsaw=pre_train, image_transform=image_transform['test'])
else:
db_info = wv_config.LoadInfo()
train_db = WebVision(db_info, 'train', jigsaw=pre_train, transform=image_transform['train'], subset=subset)
val_db = WebVision(db_info, 'val', jigsaw=pre_train, transform=image_transform['val'])
test_db = WebVision(db_info, 'test', jigsaw=pre_train, transform=image_transform['test'])
return train_db, val_db, test_db
class DenseCropTransform:
def __init__(self, scales=(256, 288, 320, 352), crop_size=224):
self.scales = scales
self.crop_size = crop_size
def __call__(self, img):
crops = []
for scale in self.scales:
if min(img.size) != scale:
r_img = trans_func.resize(img, scale)
else:
r_img = img.copy()
w, h = r_img.size
square_crops_coord = [(0, 0, scale, scale),
(int(round((h - scale) / 2.)), int(round((w - scale) / 2.)), scale, scale),
(h-scale, w-scale, scale, scale)]
for upper, left, height, width in square_crops_coord:
square = trans_func.crop(r_img, upper, left, height, width)
sq_ten_crops = trans_func.ten_crop(square, self.crop_size)
sq_crop = trans_func.resize(square, self.crop_size)
sq_crop_mirror = trans_func.hflip(sq_crop)
crops.extend((sq_crop, sq_crop_mirror) + sq_ten_crops)
return crops
# import wvc_utils
# num_models = 5
# frac_samples = 0.25
# output_dir = '/home/rfsc/Projects/webvision_challenge/outputs/esemble/'
#
# db_info = wv_config.LoadInfo()
# img_ids = db_info[db_info.type == 'train'].image_id.values.astype(np.str)
# img_labels = db_info[db_info.type == 'train'].label.values.astype(np.long)
# sampler_dict = {label: wvc_utils.CycleIterator(img_ids[np.equal(img_labels, label)].tolist(), shuffle=True)
# for label in np.unique(img_labels)}
#
# # compute subsets
# for num in range(num_models):
# subset = []
# for label, it in sampler_dict.items():
# print("Computing model {}, label {}".format(num+1, label))
# num_samples = int(np.ceil(frac_samples * len(it._items)))
# samples = [next(it) for _ in range(num_samples)]
# subset.extend(samples)
# subset = np.array(subset, dtype=np.str)
# print("Model {} has found {} samples".format(num+1, len(subset)))
# np.save(os.path.join(output_dir, 'subset_{}'.format(num+1)), subset)
# files = ['/data/home/rfsc/wvc/outputs/baseline/sub_file_val_10crops_NEW.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/1/sub_file_val_10crops_new_e1.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/2/sub_file_val_10crops_new_e2.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/3/sub_file_val_10crops_new_e3.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/sub_val_esemble_base10crop.txt']
#
#
# with open(files[0], 'r') as p1, open(files[1], 'r') as p2, open(files[2], 'r') as p3, open(files[3], 'r') as p4, open(files[4], 'w') as r :
# for lines in zip(p1, p2, p3, p4):
# lines = np.stack([np.array(line.split('\t'), dtype=np.str) for line in lines])
# id = [lines[0, 0]]
# vals = lines[:, 1:].astype(np.float).mean(axis=0)
# vals = np.argsort(vals)[-5:][::-1].astype(np.str).tolist()
# r.write("{}\n".format("\t".join(id + vals)))
# probs = np.zeros((294099, 5000), dtype=np.float)
# ids = np.zeros((294099, 1), dtype=np.str)
# for file in files:
# b = np.loadtxt(file, dtype=np.str)
# ids = b[:, 0]
# probs += b[:, 1:].astype(np.float)
# probs = probs * 0.25
#
# sc = np.argsort(probs, axis=1)[:, -5:][:, ::-1].astype(np.str)
# sc = np.concatenate([ids, sc], axis=1).tolist()
# with open('/data/home/rfsc/wvc/outputs/sub_val_esemble.txt', 'w') as f:
# for line in sc:
# f.write("{}\n".format("\t".join(line))) | # assert self.class_freq.size == 5000
self.sample_weight = self.img_labels.size / (self.class_freq[self.img_labels] + 1e-6)
else: | random_line_split |
wvc_data.py | import torch
import torch.utils.data as data
from PIL import Image
import logging, os
import numpy as np
import lmdb
import cv2
from torchvision import transforms
import random
import itertools
from webvision import config as wv_config
from torchvision.transforms import functional as trans_func
_logger = logging.getLogger(__name__)
class WebVision(data.Dataset):
def __init__(self, db_info, split='train', transform=None, jigsaw=False, frac=None, subset=None):
# Load data
self.split = split
self.img_ids = db_info[db_info.type == split].image_id.values.astype(np.str)
self.img_files = db_info[db_info.type == split].image_path.values.astype(np.str)
self.img_labels = db_info[db_info.type == split].label.values.astype(np.long)
self.transform = transform
self.jigsaw = jigsaw
assert len(self.img_ids) == len(self.img_files)
assert len(self.img_ids) == len(self.img_labels)
if subset is not None:
subset_idx = np.isin(self.img_ids, subset)
self.img_ids = self.img_ids[subset_idx]
self.img_files = self.img_files[subset_idx]
self.img_labels = self.img_labels[subset_idx]
assert np.unique(self.img_labels).size == 5000
_logger.info("Selecting subset of {} images".format(len(subset)))
# compute fraction of the dataset
if frac is not None:
sampled_idxs, frac = [], 0.01
for k in np.unique(self.img_labels):
idxs = np.where(self.img_labels == k)[0]
idxs = np.random.choice(idxs, int(np.ceil(len(idxs)*frac)), replace=False)
sampled_idxs.extend(idxs.tolist())
self.img_ids, self.img_files, self.img_labels = self.img_ids[sampled_idxs], self.img_files[sampled_idxs], self.img_labels[sampled_idxs]
# Adapt filenames to jpg
for i in range(self.img_files.size):
self.img_files[i] = os.path.splitext(self.img_files[i])[0] + ".jpg"
# Compute class frequency
if self.split != 'test':
self.class_freq = np.bincount(self.img_labels)
# assert self.class_freq.size == 5000
self.sample_weight = self.img_labels.size / (self.class_freq[self.img_labels] + 1e-6)
else:
self.class_freq = -1*np.ones(5000)
self.sample_weight = np.ones(self.img_labels.size, np.float)
_logger.info("Webvision {} dataset read with {} images".format(split, len(self.img_ids)))
def __getitem__(self, index):
img_id = self.img_ids[index]
label = self.img_labels[index]
img = Image.open(self.img_files[index])
if img.mode != 'RGB':
img = img.convert(mode='RGB')
if self.transform is not None:
img = self.transform(img)
if self.jigsaw:
assert img.dim() > 3
label = torch.randperm(img.size(0)).long()
img = img[label]
label = torch.eye(img.size(0), img.size(0))[label]
label = label.view(-1)
return img_id, img, label
def __len__(self):
return len(self.img_ids)
class LMDBDataset(data.Dataset):
def __init__(self, lmdb_path, image_transform=None, jigsaw=False):
# read parameters
self.lmdb_path = lmdb_path
self.transform = image_transform
self.jigsaw = jigsaw
# open and read general info of the dataset
self.lmdb_env = lmdb.open(self.lmdb_path, max_readers=1, readonly=True, lock=False, readahead=False,
meminit=False)
with self.lmdb_env.begin() as lmdb_txn:
self.num_samples = np.fromstring(lmdb_txn.get('num_samples'.encode()), dtype=np.int)[0]
self.class_feq = np.fromstring(lmdb_txn.get('class_feq'.encode()), dtype=np.float)
self.sample_weight = np.fromstring(lmdb_txn.get('sample_weight'.encode()), dtype=np.float)
_logger.info("Webvision dataset loaded from LMDB {} with {} images".format(lmdb_path, self.num_samples))
def __getitem__(self, index):
id_key, img_key, lbl_key = "id_{:09d}".format(index), "img_{:09d}".format(index), "lbl_{:09d}".format(index)
with self.lmdb_env.begin() as lmdb_txn:
id_b, img_b, lbl_b = lmdb_txn.get(id_key.encode()), lmdb_txn.get(img_key.encode()), lmdb_txn.get(
lbl_key.encode())
img_id = id_b.decode()
img = Image.fromarray(cv2.imdecode(np.fromstring(img_b, dtype=np.uint8), cv2.IMREAD_COLOR), mode='RGB')
label = np.fromstring(lbl_b, dtype=np.float).astype(np.long)[0]
if self.transform is not None:
img = self.transform(img)
if self.jigsaw:
assert img.dim() > 3
label = torch.randperm(img.size(0)).long()
img = img[label]
label = torch.eye(img.size(0), img.size(0))[label]
label = label.view(-1)
return img_id, img, label
def __len__(self):
return self.num_samples
class JigsawTransform:
def | (self, grid_size=3, patch_size=64):
self.grid_size = grid_size
self.patch_size = patch_size
def __call__(self, img):
w, h = img.size
crops = []
for c_i, c_j in itertools.product(range(self.grid_size), range(self.grid_size)):
# find patch coordinates
tile_h, tile_w = (h / self.grid_size), (w / self.grid_size)
pad_h, pad_w = (tile_h - self.patch_size) / 2.0, (tile_w - self.patch_size) / 2.0
l, u, r, b = c_j * tile_w + pad_w, c_i * tile_h + pad_h, (c_j + 1) * tile_w - pad_w, (
c_i + 1) * tile_h - pad_h
l, u, r, b = int(np.floor(l)), int(np.floor(u)), int(np.floor(r)), int(np.floor(b))
# jitter
l = l + random.randint(0, r - self.patch_size - l)
u = u + random.randint(0, b - self.patch_size - u)
r = l + self.patch_size
b = u + self.patch_size
# crop
crops.append(img.crop((l, u, r, b)))
return crops
def get_datasets(pre_train, is_lmdb, subset=None):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if pre_train:
image_transform = {'train': transforms.Compose([transforms.RandomCrop(224), JigsawTransform(3, 64),
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'val': transforms.Compose([transforms.CenterCrop(224),
JigsawTransform(3, 64), transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'test': transforms.Compose([transforms.CenterCrop(224),
JigsawTransform(3, 64), transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))])
}
else:
image_transform = {'train': transforms.Compose([transforms.RandomCrop(224), transforms.ToTensor(), normalize]),
'val': transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize]),
# 'val': transforms.Compose([DenseCropTransform(), transforms.Lambda(
# lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
# 'val': transforms.Compose([transforms.TenCrop(224), transforms.Lambda(
# lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'test': transforms.Compose([transforms.TenCrop(224), transforms.Lambda(
lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))])
}
if is_lmdb:
train_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/train/', jigsaw=pre_train, image_transform=image_transform['train'])
val_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/val/', jigsaw=pre_train, image_transform=image_transform['val'])
test_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/test/', jigsaw=pre_train, image_transform=image_transform['test'])
else:
db_info = wv_config.LoadInfo()
train_db = WebVision(db_info, 'train', jigsaw=pre_train, transform=image_transform['train'], subset=subset)
val_db = WebVision(db_info, 'val', jigsaw=pre_train, transform=image_transform['val'])
test_db = WebVision(db_info, 'test', jigsaw=pre_train, transform=image_transform['test'])
return train_db, val_db, test_db
class DenseCropTransform:
def __init__(self, scales=(256, 288, 320, 352), crop_size=224):
self.scales = scales
self.crop_size = crop_size
def __call__(self, img):
crops = []
for scale in self.scales:
if min(img.size) != scale:
r_img = trans_func.resize(img, scale)
else:
r_img = img.copy()
w, h = r_img.size
square_crops_coord = [(0, 0, scale, scale),
(int(round((h - scale) / 2.)), int(round((w - scale) / 2.)), scale, scale),
(h-scale, w-scale, scale, scale)]
for upper, left, height, width in square_crops_coord:
square = trans_func.crop(r_img, upper, left, height, width)
sq_ten_crops = trans_func.ten_crop(square, self.crop_size)
sq_crop = trans_func.resize(square, self.crop_size)
sq_crop_mirror = trans_func.hflip(sq_crop)
crops.extend((sq_crop, sq_crop_mirror) + sq_ten_crops)
return crops
# import wvc_utils
# num_models = 5
# frac_samples = 0.25
# output_dir = '/home/rfsc/Projects/webvision_challenge/outputs/esemble/'
#
# db_info = wv_config.LoadInfo()
# img_ids = db_info[db_info.type == 'train'].image_id.values.astype(np.str)
# img_labels = db_info[db_info.type == 'train'].label.values.astype(np.long)
# sampler_dict = {label: wvc_utils.CycleIterator(img_ids[np.equal(img_labels, label)].tolist(), shuffle=True)
# for label in np.unique(img_labels)}
#
# # compute subsets
# for num in range(num_models):
# subset = []
# for label, it in sampler_dict.items():
# print("Computing model {}, label {}".format(num+1, label))
# num_samples = int(np.ceil(frac_samples * len(it._items)))
# samples = [next(it) for _ in range(num_samples)]
# subset.extend(samples)
# subset = np.array(subset, dtype=np.str)
# print("Model {} has found {} samples".format(num+1, len(subset)))
# np.save(os.path.join(output_dir, 'subset_{}'.format(num+1)), subset)
# files = ['/data/home/rfsc/wvc/outputs/baseline/sub_file_val_10crops_NEW.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/1/sub_file_val_10crops_new_e1.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/2/sub_file_val_10crops_new_e2.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/3/sub_file_val_10crops_new_e3.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/sub_val_esemble_base10crop.txt']
#
#
# with open(files[0], 'r') as p1, open(files[1], 'r') as p2, open(files[2], 'r') as p3, open(files[3], 'r') as p4, open(files[4], 'w') as r :
# for lines in zip(p1, p2, p3, p4):
# lines = np.stack([np.array(line.split('\t'), dtype=np.str) for line in lines])
# id = [lines[0, 0]]
# vals = lines[:, 1:].astype(np.float).mean(axis=0)
# vals = np.argsort(vals)[-5:][::-1].astype(np.str).tolist()
# r.write("{}\n".format("\t".join(id + vals)))
# probs = np.zeros((294099, 5000), dtype=np.float)
# ids = np.zeros((294099, 1), dtype=np.str)
# for file in files:
# b = np.loadtxt(file, dtype=np.str)
# ids = b[:, 0]
# probs += b[:, 1:].astype(np.float)
# probs = probs * 0.25
#
# sc = np.argsort(probs, axis=1)[:, -5:][:, ::-1].astype(np.str)
# sc = np.concatenate([ids, sc], axis=1).tolist()
# with open('/data/home/rfsc/wvc/outputs/sub_val_esemble.txt', 'w') as f:
# for line in sc:
# f.write("{}\n".format("\t".join(line)))
| __init__ | identifier_name |
wvc_data.py | import torch
import torch.utils.data as data
from PIL import Image
import logging, os
import numpy as np
import lmdb
import cv2
from torchvision import transforms
import random
import itertools
from webvision import config as wv_config
from torchvision.transforms import functional as trans_func
_logger = logging.getLogger(__name__)
class WebVision(data.Dataset):
def __init__(self, db_info, split='train', transform=None, jigsaw=False, frac=None, subset=None):
# Load data
self.split = split
self.img_ids = db_info[db_info.type == split].image_id.values.astype(np.str)
self.img_files = db_info[db_info.type == split].image_path.values.astype(np.str)
self.img_labels = db_info[db_info.type == split].label.values.astype(np.long)
self.transform = transform
self.jigsaw = jigsaw
assert len(self.img_ids) == len(self.img_files)
assert len(self.img_ids) == len(self.img_labels)
if subset is not None:
subset_idx = np.isin(self.img_ids, subset)
self.img_ids = self.img_ids[subset_idx]
self.img_files = self.img_files[subset_idx]
self.img_labels = self.img_labels[subset_idx]
assert np.unique(self.img_labels).size == 5000
_logger.info("Selecting subset of {} images".format(len(subset)))
# compute fraction of the dataset
if frac is not None:
sampled_idxs, frac = [], 0.01
for k in np.unique(self.img_labels):
idxs = np.where(self.img_labels == k)[0]
idxs = np.random.choice(idxs, int(np.ceil(len(idxs)*frac)), replace=False)
sampled_idxs.extend(idxs.tolist())
self.img_ids, self.img_files, self.img_labels = self.img_ids[sampled_idxs], self.img_files[sampled_idxs], self.img_labels[sampled_idxs]
# Adapt filenames to jpg
for i in range(self.img_files.size):
self.img_files[i] = os.path.splitext(self.img_files[i])[0] + ".jpg"
# Compute class frequency
if self.split != 'test':
|
else:
self.class_freq = -1*np.ones(5000)
self.sample_weight = np.ones(self.img_labels.size, np.float)
_logger.info("Webvision {} dataset read with {} images".format(split, len(self.img_ids)))
def __getitem__(self, index):
img_id = self.img_ids[index]
label = self.img_labels[index]
img = Image.open(self.img_files[index])
if img.mode != 'RGB':
img = img.convert(mode='RGB')
if self.transform is not None:
img = self.transform(img)
if self.jigsaw:
assert img.dim() > 3
label = torch.randperm(img.size(0)).long()
img = img[label]
label = torch.eye(img.size(0), img.size(0))[label]
label = label.view(-1)
return img_id, img, label
def __len__(self):
return len(self.img_ids)
class LMDBDataset(data.Dataset):
def __init__(self, lmdb_path, image_transform=None, jigsaw=False):
# read parameters
self.lmdb_path = lmdb_path
self.transform = image_transform
self.jigsaw = jigsaw
# open and read general info of the dataset
self.lmdb_env = lmdb.open(self.lmdb_path, max_readers=1, readonly=True, lock=False, readahead=False,
meminit=False)
with self.lmdb_env.begin() as lmdb_txn:
self.num_samples = np.fromstring(lmdb_txn.get('num_samples'.encode()), dtype=np.int)[0]
self.class_feq = np.fromstring(lmdb_txn.get('class_feq'.encode()), dtype=np.float)
self.sample_weight = np.fromstring(lmdb_txn.get('sample_weight'.encode()), dtype=np.float)
_logger.info("Webvision dataset loaded from LMDB {} with {} images".format(lmdb_path, self.num_samples))
def __getitem__(self, index):
id_key, img_key, lbl_key = "id_{:09d}".format(index), "img_{:09d}".format(index), "lbl_{:09d}".format(index)
with self.lmdb_env.begin() as lmdb_txn:
id_b, img_b, lbl_b = lmdb_txn.get(id_key.encode()), lmdb_txn.get(img_key.encode()), lmdb_txn.get(
lbl_key.encode())
img_id = id_b.decode()
img = Image.fromarray(cv2.imdecode(np.fromstring(img_b, dtype=np.uint8), cv2.IMREAD_COLOR), mode='RGB')
label = np.fromstring(lbl_b, dtype=np.float).astype(np.long)[0]
if self.transform is not None:
img = self.transform(img)
if self.jigsaw:
assert img.dim() > 3
label = torch.randperm(img.size(0)).long()
img = img[label]
label = torch.eye(img.size(0), img.size(0))[label]
label = label.view(-1)
return img_id, img, label
def __len__(self):
return self.num_samples
class JigsawTransform:
def __init__(self, grid_size=3, patch_size=64):
self.grid_size = grid_size
self.patch_size = patch_size
def __call__(self, img):
w, h = img.size
crops = []
for c_i, c_j in itertools.product(range(self.grid_size), range(self.grid_size)):
# find patch coordinates
tile_h, tile_w = (h / self.grid_size), (w / self.grid_size)
pad_h, pad_w = (tile_h - self.patch_size) / 2.0, (tile_w - self.patch_size) / 2.0
l, u, r, b = c_j * tile_w + pad_w, c_i * tile_h + pad_h, (c_j + 1) * tile_w - pad_w, (
c_i + 1) * tile_h - pad_h
l, u, r, b = int(np.floor(l)), int(np.floor(u)), int(np.floor(r)), int(np.floor(b))
# jitter
l = l + random.randint(0, r - self.patch_size - l)
u = u + random.randint(0, b - self.patch_size - u)
r = l + self.patch_size
b = u + self.patch_size
# crop
crops.append(img.crop((l, u, r, b)))
return crops
def get_datasets(pre_train, is_lmdb, subset=None):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if pre_train:
image_transform = {'train': transforms.Compose([transforms.RandomCrop(224), JigsawTransform(3, 64),
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'val': transforms.Compose([transforms.CenterCrop(224),
JigsawTransform(3, 64), transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'test': transforms.Compose([transforms.CenterCrop(224),
JigsawTransform(3, 64), transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))])
}
else:
image_transform = {'train': transforms.Compose([transforms.RandomCrop(224), transforms.ToTensor(), normalize]),
'val': transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize]),
# 'val': transforms.Compose([DenseCropTransform(), transforms.Lambda(
# lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
# 'val': transforms.Compose([transforms.TenCrop(224), transforms.Lambda(
# lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))]),
'test': transforms.Compose([transforms.TenCrop(224), transforms.Lambda(
lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))])
}
if is_lmdb:
train_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/train/', jigsaw=pre_train, image_transform=image_transform['train'])
val_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/val/', jigsaw=pre_train, image_transform=image_transform['val'])
test_db = LMDBDataset('/data/home/rfsc/wvc/lmdb/test/', jigsaw=pre_train, image_transform=image_transform['test'])
else:
db_info = wv_config.LoadInfo()
train_db = WebVision(db_info, 'train', jigsaw=pre_train, transform=image_transform['train'], subset=subset)
val_db = WebVision(db_info, 'val', jigsaw=pre_train, transform=image_transform['val'])
test_db = WebVision(db_info, 'test', jigsaw=pre_train, transform=image_transform['test'])
return train_db, val_db, test_db
class DenseCropTransform:
def __init__(self, scales=(256, 288, 320, 352), crop_size=224):
self.scales = scales
self.crop_size = crop_size
def __call__(self, img):
crops = []
for scale in self.scales:
if min(img.size) != scale:
r_img = trans_func.resize(img, scale)
else:
r_img = img.copy()
w, h = r_img.size
square_crops_coord = [(0, 0, scale, scale),
(int(round((h - scale) / 2.)), int(round((w - scale) / 2.)), scale, scale),
(h-scale, w-scale, scale, scale)]
for upper, left, height, width in square_crops_coord:
square = trans_func.crop(r_img, upper, left, height, width)
sq_ten_crops = trans_func.ten_crop(square, self.crop_size)
sq_crop = trans_func.resize(square, self.crop_size)
sq_crop_mirror = trans_func.hflip(sq_crop)
crops.extend((sq_crop, sq_crop_mirror) + sq_ten_crops)
return crops
# import wvc_utils
# num_models = 5
# frac_samples = 0.25
# output_dir = '/home/rfsc/Projects/webvision_challenge/outputs/esemble/'
#
# db_info = wv_config.LoadInfo()
# img_ids = db_info[db_info.type == 'train'].image_id.values.astype(np.str)
# img_labels = db_info[db_info.type == 'train'].label.values.astype(np.long)
# sampler_dict = {label: wvc_utils.CycleIterator(img_ids[np.equal(img_labels, label)].tolist(), shuffle=True)
# for label in np.unique(img_labels)}
#
# # compute subsets
# for num in range(num_models):
# subset = []
# for label, it in sampler_dict.items():
# print("Computing model {}, label {}".format(num+1, label))
# num_samples = int(np.ceil(frac_samples * len(it._items)))
# samples = [next(it) for _ in range(num_samples)]
# subset.extend(samples)
# subset = np.array(subset, dtype=np.str)
# print("Model {} has found {} samples".format(num+1, len(subset)))
# np.save(os.path.join(output_dir, 'subset_{}'.format(num+1)), subset)
# files = ['/data/home/rfsc/wvc/outputs/baseline/sub_file_val_10crops_NEW.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/1/sub_file_val_10crops_new_e1.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/2/sub_file_val_10crops_new_e2.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/esemble/3/sub_file_val_10crops_new_e3.txt.prob.txt',
# '/data/home/rfsc/wvc/outputs/sub_val_esemble_base10crop.txt']
#
#
# with open(files[0], 'r') as p1, open(files[1], 'r') as p2, open(files[2], 'r') as p3, open(files[3], 'r') as p4, open(files[4], 'w') as r :
# for lines in zip(p1, p2, p3, p4):
# lines = np.stack([np.array(line.split('\t'), dtype=np.str) for line in lines])
# id = [lines[0, 0]]
# vals = lines[:, 1:].astype(np.float).mean(axis=0)
# vals = np.argsort(vals)[-5:][::-1].astype(np.str).tolist()
# r.write("{}\n".format("\t".join(id + vals)))
# probs = np.zeros((294099, 5000), dtype=np.float)
# ids = np.zeros((294099, 1), dtype=np.str)
# for file in files:
# b = np.loadtxt(file, dtype=np.str)
# ids = b[:, 0]
# probs += b[:, 1:].astype(np.float)
# probs = probs * 0.25
#
# sc = np.argsort(probs, axis=1)[:, -5:][:, ::-1].astype(np.str)
# sc = np.concatenate([ids, sc], axis=1).tolist()
# with open('/data/home/rfsc/wvc/outputs/sub_val_esemble.txt', 'w') as f:
# for line in sc:
# f.write("{}\n".format("\t".join(line)))
| self.class_freq = np.bincount(self.img_labels)
# assert self.class_freq.size == 5000
self.sample_weight = self.img_labels.size / (self.class_freq[self.img_labels] + 1e-6) | conditional_block |
eeglab2hadoop.py | #!/oasis/scratch/csd181/mdburns/python/bin/python
# Copyright (C) 2012 Matthew Burns <mdburns@ucsd.edu>
from datetime import datetime
import helpers
import helpers.float_open as fop
from scipy import stats, zeros, ones, signal
from scipy.io import loadmat, savemat
import numpy as np
from numpy import array
import os
import sys
import pickle
import argparse
import base64
import gc
import multiprocessing as mp
from hadoop.io import SequenceFile, Text
NUMFOLDS = 5
NUM_SAMPLES = 0
SAMPLE_RATE = 0
NUM_EVENTS = 0
NUM_POINTS = 0
pool=None
def get_eeg(path, file_name):
print 'get_eeg: reading EEGLAB .set file '+ file_name
fullpath = path+file_name
try:
f = loadmat(fullpath+'.set', appendmat=False)
except:
print >> sys.stderr, 'get_eeg: could not load '+ file_name + '.set'
return 1
EEG = f['EEG']
events = {}
eeg = {}
label = []
latencyInFrame=[]
uniqueLabel=[]
event = EEG['event'][0][0][0]
gc.disable()
for t_event in event:
this_latency = str(t_event[0][0][0])
this_label = str(t_event[1][0][0])
latencyInFrame.append(this_latency)
label.append(this_label)
if this_label not in uniqueLabel:
uniqueLabel.append(this_label)
gc.enable()
uniqueLabel=[int(x) for x in uniqueLabel]
events['uniqueLabel'] = [str(x) for x in sorted(uniqueLabel)]
#-1 for Matlab indexing conversion
events['latencyInFrame'] = [(int(x)-1) for x in latencyInFrame]
events['label'] = label
eeg['events']=events
eeg['num_events']=len(events.keys())
eeg['sample_rate']=EEG['srate'][0][0][0][0]
eeg['num_samples']=EEG['pnts'][0][0][0][0]
eeg['num_channels']=EEG['nbchan'][0][0][0][0]
eeg['trials']=EEG['trials'][0][0][0][0]
eeg['ica_weights']=EEG['icaweights'][0][0]
eeg['ica_sphere']=EEG['icasphere'][0][0]
eeg['ica_winv=EEG']=['icawinv'][0][0]
eeg['file_name']=file_name
eeg['path']=path;
eeg['channel_locations']=EEG['chanlocs'][0][0]
eeg['prior_data_path']=EEG['data'][0][0][0]
return eeg
def find_artifact_indexes(eeg, data):
windowTimeLength = 200;# in ms.
windowFrameLength = int(round((eeg['sample_rate'] * windowTimeLength/1000)));
coefs = ones((windowFrameLength,))
threshold = 2.1
args=[data[:,i] for i in np.arange(data.shape[1])]
result = pool.map(tied_rank, args)
tdrnk = array(result)/data.shape[0]
twosidep = np.minimum(tdrnk, 1-tdrnk)
logliklihood = -np.log(twosidep)
meanLogLikelihood = np.mean(np.transpose(logliklihood),1)
windowFrame = np.arange((int(round(-windowFrameLength/2))),int(round((windowFrameLength/2)))).reshape((1,-1))
meanLogLikelihood = np.nan_to_num(meanLogLikelihood)
meanLogLikelihood[meanLogLikelihood > 1e20]=1e20
smoothMeanLogLikelihood = signal.filtfilt(coefs, array([1]), meanLogLikelihood)/(np.power(windowFrameLength,2))
isArtifactWindowCenter = np.where(smoothMeanLogLikelihood > threshold)[0].reshape((-1,1))
print 'clean indexes: number of artifact frames detected = %d' % len(isArtifactWindowCenter)
artifactFrames = np.tile(windowFrame, (isArtifactWindowCenter.shape[0], 1)) + np.tile(isArtifactWindowCenter, (1 , windowFrame.shape[0]))
artifactFrames = np.maximum(artifactFrames, 1)
artifactFrames = np.minimum(artifactFrames, meanLogLikelihood.shape[0])
artifactFrames = np.unique(artifactFrames[:])-1
return artifactFrames
def tied_rank(x):
"""
from: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/auc.py
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r
"""
compile_data: input_str is the path to your imput files, with the character '?' inserted where you want to specify different values.
For instance: 'X:\RSVP\exp?\realtime\exp?_continuous_with_ica' with substitute = range(44,61) will process files
'X:\RSVP\exp44\realtime\exp44_continuous_with_ica.set' ... all the way to 60. The sequence files will be created in the outputpath and
automatically uploaded to hdfs_target_path in the HDFS file system if those are specified. Assumes you are running this on the head node.
"""
def compile_data(input_str, substitute, outputpath='', compression=False, test_file=False, p=None):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
if not p==None:
global pool
pool=p
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
eeg = get_eeg(path_to_data + os.sep, filename)
if eeg is not 1:
raw_data, ica_act = read_full_float(eeg)
else:
|
if raw_data is None:
continue
print(filename + ': identifying outliers')
artifact_indexes = find_artifact_indexes(eeg, ica_act)
eeg['artifact_indexes'] = artifact_indexes;
f=open('..\\artifact_indexes', 'w')
pickle.dump(artifact_indexes,f)
f.close()
eegstr = pickle.dumps(eeg, protocol=2)
print(filename + ': compiling dataset into hadoop sequence file')
if outputpath is '':
outputpath = path_to_data;
#Enable compression if requested
if compression:
comp_type=SequenceFile.CompressionType.RECORD
else:
comp_type=SequenceFile.CompressionType.NONE
writer = SequenceFile.createWriter(outputpath + os.sep + filename + '.seq', Text, Text, compression_type=comp_type)
for i in range(raw_data.shape[1]):
if test_file and i > 3:
break
this_raw = np.ascontiguousarray(raw_data[:,i], dtype=raw_data.dtype)
this_ica = np.ascontiguousarray(ica_act[:,i], dtype=ica_act.dtype)
ica_key.set(outputpath + os.sep + filename + '.ica.' + str(i+1))
raw_key.set(outputpath + os.sep + filename + '.raw.' + str(i+1))
ica_temp = pickle.dumps((this_ica, eegstr), protocol=2)
raw_temp = pickle.dumps((this_raw, eegstr), protocol=2)
ica = base64.b64encode(ica_temp)
raw = base64.b64encode(raw_temp)
ica_val.set(ica)
raw_val.set(raw)
writer.append(raw_key, raw_val)
writer.append(ica_key, ica_val)
print(filename + ': '+str(i+1))
writer.close()
print filename + ': finished writing file'
return 0
def read_full_float(eeg):
print(eeg['file_name'] + ': reading full float file')
fn =eeg['path'] + eeg['file_name'] + '.fdt';
try:
f = fop.fopen(fn, 'r', 'l')
except:
print eeg['file_name']+': could not open ' + fn
return None, None
raw_data = f.read((eeg['num_samples'], eeg['num_channels']), 'float32')
f.close();
#Recompute ICA activations
print (eeg['file_name'] + ': recomputing ICA activations')
ica_act= np.transpose(np.float32(np.dot(np.dot(eeg['ica_weights'], eeg['ica_sphere']), np.transpose(raw_data))))
return raw_data, ica_act
def create_file_manifest(input_str, substitute, outputpath=''):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
f=open(outputpath+os.sep+'manifest.txt','w')
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
def hadoop2mat(directory):
result={}
for fl in os.listdir(directory):
if fl.split('-')[0]=='part':
current = os.path.join(directory, fl)
print current
if os.path.isfile(current):
f = open(current, 'rb')
result_str = f.read().strip('\n')
f.close()
if not result_str=='':
experiments = result_str.split('\n')
kvps = [exp.split('\t') for exp in experiments]
for kvp in kvps:
this_result = pickle.loads(base64.b64decode(kvp[1]))
path, name = kvp[0].rsplit('/', 1)
print name
result[name]=this_result
savemat(directory+os.sep+'result.mat', result)
def main():
parser = argparse.ArgumentParser(description='Recompile EEGLAB files into sequence files for Hadoop')
parser.add_argument('file_str', type=str)
parser.add_argument('range', type=int, nargs=2)
parser.add_argument('outputpath', type=str)
parser.add_argument('--hdfs_target_path', type=str,default='', dest='hdfs_path')
parser.add_argument('--compression', help='compression on',action='store_true')
parser.add_argument('--manifest', help='compile the output as a list of file locations',action='store_true')
parser.add_argument('--sequencefile', help='compile the output as a hadoop sequencefile for use with hdfs',action='store_true')
parser.add_argument('--testfile', help='compile a small sequencefile for testing (~10 channels)',action='store_true')
parser.add_argument('--hadoop2mat', help='collect hadoop output files into a single .mat file',action='store_true')
#b= ['X:\RSVP\exp?\\realtime\exp?_continuous_with_ica','54' ,'54', 'X:\RSVP\hadoop\\']
theseargs = parser.parse_args()
if theseargs.range[0] is theseargs.range[1]:
trange = [theseargs.range[0]]
else:
trange = range(theseargs.range[0] ,theseargs.range[1]+1)
global pool
pool = mp.Pool(mp.cpu_count()-1)
ts = datetime.now()
#Creates full sequencefiles
if theseargs.sequencefile:
print 'eeglab2hadoop: creating sequence file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression)
#Creates list of file locations to bypass hdfs
if theseargs.manifest:
print 'eeglab2hadoop: creating manifest'
create_file_manifest(theseargs.file_str, trange, theseargs.outputpath)
#Creates small sequencefile for testing purposes
if theseargs.testfile:
print 'eeglab2hadoop: creating test file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression, test_file=True)
#Puts the files created by hadoop into a JSON string for Matlab
if theseargs.hadoop2mat:
print 'eeglab2hadoop: consolidating hadoop parts into result.mat'
hadoop2mat(theseargs.file_str)
c=datetime.now()-ts
print ' '
print 'eeglab2hadoop: Completed processing in ' + str(c.seconds) + ' seconds'
pool.close()
pool.join()
return 0
if __name__ == "__main__":
mp.freeze_support()
main() | continue | conditional_block |
eeglab2hadoop.py | #!/oasis/scratch/csd181/mdburns/python/bin/python
# Copyright (C) 2012 Matthew Burns <mdburns@ucsd.edu>
from datetime import datetime
import helpers
import helpers.float_open as fop
from scipy import stats, zeros, ones, signal
from scipy.io import loadmat, savemat
import numpy as np
from numpy import array
import os
import sys
import pickle
import argparse
import base64
import gc
import multiprocessing as mp
from hadoop.io import SequenceFile, Text
NUMFOLDS = 5
NUM_SAMPLES = 0
SAMPLE_RATE = 0
NUM_EVENTS = 0
NUM_POINTS = 0
pool=None
def get_eeg(path, file_name):
print 'get_eeg: reading EEGLAB .set file '+ file_name
fullpath = path+file_name
try:
f = loadmat(fullpath+'.set', appendmat=False)
except:
print >> sys.stderr, 'get_eeg: could not load '+ file_name + '.set'
return 1
EEG = f['EEG']
events = {}
eeg = {}
label = []
latencyInFrame=[]
uniqueLabel=[]
event = EEG['event'][0][0][0]
gc.disable()
for t_event in event:
this_latency = str(t_event[0][0][0])
this_label = str(t_event[1][0][0])
latencyInFrame.append(this_latency)
label.append(this_label)
if this_label not in uniqueLabel:
uniqueLabel.append(this_label)
gc.enable()
uniqueLabel=[int(x) for x in uniqueLabel]
events['uniqueLabel'] = [str(x) for x in sorted(uniqueLabel)]
#-1 for Matlab indexing conversion
events['latencyInFrame'] = [(int(x)-1) for x in latencyInFrame]
events['label'] = label
eeg['events']=events
eeg['num_events']=len(events.keys())
eeg['sample_rate']=EEG['srate'][0][0][0][0]
eeg['num_samples']=EEG['pnts'][0][0][0][0]
eeg['num_channels']=EEG['nbchan'][0][0][0][0]
eeg['trials']=EEG['trials'][0][0][0][0]
eeg['ica_weights']=EEG['icaweights'][0][0]
eeg['ica_sphere']=EEG['icasphere'][0][0]
eeg['ica_winv=EEG']=['icawinv'][0][0]
eeg['file_name']=file_name
eeg['path']=path;
eeg['channel_locations']=EEG['chanlocs'][0][0]
eeg['prior_data_path']=EEG['data'][0][0][0]
return eeg
def find_artifact_indexes(eeg, data):
windowTimeLength = 200;# in ms.
windowFrameLength = int(round((eeg['sample_rate'] * windowTimeLength/1000)));
coefs = ones((windowFrameLength,))
threshold = 2.1
args=[data[:,i] for i in np.arange(data.shape[1])]
result = pool.map(tied_rank, args)
tdrnk = array(result)/data.shape[0]
twosidep = np.minimum(tdrnk, 1-tdrnk)
logliklihood = -np.log(twosidep)
meanLogLikelihood = np.mean(np.transpose(logliklihood),1)
windowFrame = np.arange((int(round(-windowFrameLength/2))),int(round((windowFrameLength/2)))).reshape((1,-1))
meanLogLikelihood = np.nan_to_num(meanLogLikelihood)
meanLogLikelihood[meanLogLikelihood > 1e20]=1e20
smoothMeanLogLikelihood = signal.filtfilt(coefs, array([1]), meanLogLikelihood)/(np.power(windowFrameLength,2))
isArtifactWindowCenter = np.where(smoothMeanLogLikelihood > threshold)[0].reshape((-1,1))
print 'clean indexes: number of artifact frames detected = %d' % len(isArtifactWindowCenter)
artifactFrames = np.tile(windowFrame, (isArtifactWindowCenter.shape[0], 1)) + np.tile(isArtifactWindowCenter, (1 , windowFrame.shape[0]))
artifactFrames = np.maximum(artifactFrames, 1)
artifactFrames = np.minimum(artifactFrames, meanLogLikelihood.shape[0])
artifactFrames = np.unique(artifactFrames[:])-1
return artifactFrames
def tied_rank(x):
|
"""
compile_data: input_str is the path to your imput files, with the character '?' inserted where you want to specify different values.
For instance: 'X:\RSVP\exp?\realtime\exp?_continuous_with_ica' with substitute = range(44,61) will process files
'X:\RSVP\exp44\realtime\exp44_continuous_with_ica.set' ... all the way to 60. The sequence files will be created in the outputpath and
automatically uploaded to hdfs_target_path in the HDFS file system if those are specified. Assumes you are running this on the head node.
"""
def compile_data(input_str, substitute, outputpath='', compression=False, test_file=False, p=None):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
if not p==None:
global pool
pool=p
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
eeg = get_eeg(path_to_data + os.sep, filename)
if eeg is not 1:
raw_data, ica_act = read_full_float(eeg)
else:
continue
if raw_data is None:
continue
print(filename + ': identifying outliers')
artifact_indexes = find_artifact_indexes(eeg, ica_act)
eeg['artifact_indexes'] = artifact_indexes;
f=open('..\\artifact_indexes', 'w')
pickle.dump(artifact_indexes,f)
f.close()
eegstr = pickle.dumps(eeg, protocol=2)
print(filename + ': compiling dataset into hadoop sequence file')
if outputpath is '':
outputpath = path_to_data;
#Enable compression if requested
if compression:
comp_type=SequenceFile.CompressionType.RECORD
else:
comp_type=SequenceFile.CompressionType.NONE
writer = SequenceFile.createWriter(outputpath + os.sep + filename + '.seq', Text, Text, compression_type=comp_type)
for i in range(raw_data.shape[1]):
if test_file and i > 3:
break
this_raw = np.ascontiguousarray(raw_data[:,i], dtype=raw_data.dtype)
this_ica = np.ascontiguousarray(ica_act[:,i], dtype=ica_act.dtype)
ica_key.set(outputpath + os.sep + filename + '.ica.' + str(i+1))
raw_key.set(outputpath + os.sep + filename + '.raw.' + str(i+1))
ica_temp = pickle.dumps((this_ica, eegstr), protocol=2)
raw_temp = pickle.dumps((this_raw, eegstr), protocol=2)
ica = base64.b64encode(ica_temp)
raw = base64.b64encode(raw_temp)
ica_val.set(ica)
raw_val.set(raw)
writer.append(raw_key, raw_val)
writer.append(ica_key, ica_val)
print(filename + ': '+str(i+1))
writer.close()
print filename + ': finished writing file'
return 0
def read_full_float(eeg):
print(eeg['file_name'] + ': reading full float file')
fn =eeg['path'] + eeg['file_name'] + '.fdt';
try:
f = fop.fopen(fn, 'r', 'l')
except:
print eeg['file_name']+': could not open ' + fn
return None, None
raw_data = f.read((eeg['num_samples'], eeg['num_channels']), 'float32')
f.close();
#Recompute ICA activations
print (eeg['file_name'] + ': recomputing ICA activations')
ica_act= np.transpose(np.float32(np.dot(np.dot(eeg['ica_weights'], eeg['ica_sphere']), np.transpose(raw_data))))
return raw_data, ica_act
def create_file_manifest(input_str, substitute, outputpath=''):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
f=open(outputpath+os.sep+'manifest.txt','w')
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
def hadoop2mat(directory):
result={}
for fl in os.listdir(directory):
if fl.split('-')[0]=='part':
current = os.path.join(directory, fl)
print current
if os.path.isfile(current):
f = open(current, 'rb')
result_str = f.read().strip('\n')
f.close()
if not result_str=='':
experiments = result_str.split('\n')
kvps = [exp.split('\t') for exp in experiments]
for kvp in kvps:
this_result = pickle.loads(base64.b64decode(kvp[1]))
path, name = kvp[0].rsplit('/', 1)
print name
result[name]=this_result
savemat(directory+os.sep+'result.mat', result)
def main():
parser = argparse.ArgumentParser(description='Recompile EEGLAB files into sequence files for Hadoop')
parser.add_argument('file_str', type=str)
parser.add_argument('range', type=int, nargs=2)
parser.add_argument('outputpath', type=str)
parser.add_argument('--hdfs_target_path', type=str,default='', dest='hdfs_path')
parser.add_argument('--compression', help='compression on',action='store_true')
parser.add_argument('--manifest', help='compile the output as a list of file locations',action='store_true')
parser.add_argument('--sequencefile', help='compile the output as a hadoop sequencefile for use with hdfs',action='store_true')
parser.add_argument('--testfile', help='compile a small sequencefile for testing (~10 channels)',action='store_true')
parser.add_argument('--hadoop2mat', help='collect hadoop output files into a single .mat file',action='store_true')
#b= ['X:\RSVP\exp?\\realtime\exp?_continuous_with_ica','54' ,'54', 'X:\RSVP\hadoop\\']
theseargs = parser.parse_args()
if theseargs.range[0] is theseargs.range[1]:
trange = [theseargs.range[0]]
else:
trange = range(theseargs.range[0] ,theseargs.range[1]+1)
global pool
pool = mp.Pool(mp.cpu_count()-1)
ts = datetime.now()
#Creates full sequencefiles
if theseargs.sequencefile:
print 'eeglab2hadoop: creating sequence file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression)
#Creates list of file locations to bypass hdfs
if theseargs.manifest:
print 'eeglab2hadoop: creating manifest'
create_file_manifest(theseargs.file_str, trange, theseargs.outputpath)
#Creates small sequencefile for testing purposes
if theseargs.testfile:
print 'eeglab2hadoop: creating test file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression, test_file=True)
#Puts the files created by hadoop into a JSON string for Matlab
if theseargs.hadoop2mat:
print 'eeglab2hadoop: consolidating hadoop parts into result.mat'
hadoop2mat(theseargs.file_str)
c=datetime.now()-ts
print ' '
print 'eeglab2hadoop: Completed processing in ' + str(c.seconds) + ' seconds'
pool.close()
pool.join()
return 0
if __name__ == "__main__":
mp.freeze_support()
main() | """
from: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/auc.py
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r | identifier_body |
eeglab2hadoop.py | #!/oasis/scratch/csd181/mdburns/python/bin/python
# Copyright (C) 2012 Matthew Burns <mdburns@ucsd.edu>
from datetime import datetime
import helpers
import helpers.float_open as fop
from scipy import stats, zeros, ones, signal | import numpy as np
from numpy import array
import os
import sys
import pickle
import argparse
import base64
import gc
import multiprocessing as mp
from hadoop.io import SequenceFile, Text
NUMFOLDS = 5
NUM_SAMPLES = 0
SAMPLE_RATE = 0
NUM_EVENTS = 0
NUM_POINTS = 0
pool=None
def get_eeg(path, file_name):
print 'get_eeg: reading EEGLAB .set file '+ file_name
fullpath = path+file_name
try:
f = loadmat(fullpath+'.set', appendmat=False)
except:
print >> sys.stderr, 'get_eeg: could not load '+ file_name + '.set'
return 1
EEG = f['EEG']
events = {}
eeg = {}
label = []
latencyInFrame=[]
uniqueLabel=[]
event = EEG['event'][0][0][0]
gc.disable()
for t_event in event:
this_latency = str(t_event[0][0][0])
this_label = str(t_event[1][0][0])
latencyInFrame.append(this_latency)
label.append(this_label)
if this_label not in uniqueLabel:
uniqueLabel.append(this_label)
gc.enable()
uniqueLabel=[int(x) for x in uniqueLabel]
events['uniqueLabel'] = [str(x) for x in sorted(uniqueLabel)]
#-1 for Matlab indexing conversion
events['latencyInFrame'] = [(int(x)-1) for x in latencyInFrame]
events['label'] = label
eeg['events']=events
eeg['num_events']=len(events.keys())
eeg['sample_rate']=EEG['srate'][0][0][0][0]
eeg['num_samples']=EEG['pnts'][0][0][0][0]
eeg['num_channels']=EEG['nbchan'][0][0][0][0]
eeg['trials']=EEG['trials'][0][0][0][0]
eeg['ica_weights']=EEG['icaweights'][0][0]
eeg['ica_sphere']=EEG['icasphere'][0][0]
eeg['ica_winv=EEG']=['icawinv'][0][0]
eeg['file_name']=file_name
eeg['path']=path;
eeg['channel_locations']=EEG['chanlocs'][0][0]
eeg['prior_data_path']=EEG['data'][0][0][0]
return eeg
def find_artifact_indexes(eeg, data):
windowTimeLength = 200;# in ms.
windowFrameLength = int(round((eeg['sample_rate'] * windowTimeLength/1000)));
coefs = ones((windowFrameLength,))
threshold = 2.1
args=[data[:,i] for i in np.arange(data.shape[1])]
result = pool.map(tied_rank, args)
tdrnk = array(result)/data.shape[0]
twosidep = np.minimum(tdrnk, 1-tdrnk)
logliklihood = -np.log(twosidep)
meanLogLikelihood = np.mean(np.transpose(logliklihood),1)
windowFrame = np.arange((int(round(-windowFrameLength/2))),int(round((windowFrameLength/2)))).reshape((1,-1))
meanLogLikelihood = np.nan_to_num(meanLogLikelihood)
meanLogLikelihood[meanLogLikelihood > 1e20]=1e20
smoothMeanLogLikelihood = signal.filtfilt(coefs, array([1]), meanLogLikelihood)/(np.power(windowFrameLength,2))
isArtifactWindowCenter = np.where(smoothMeanLogLikelihood > threshold)[0].reshape((-1,1))
print 'clean indexes: number of artifact frames detected = %d' % len(isArtifactWindowCenter)
artifactFrames = np.tile(windowFrame, (isArtifactWindowCenter.shape[0], 1)) + np.tile(isArtifactWindowCenter, (1 , windowFrame.shape[0]))
artifactFrames = np.maximum(artifactFrames, 1)
artifactFrames = np.minimum(artifactFrames, meanLogLikelihood.shape[0])
artifactFrames = np.unique(artifactFrames[:])-1
return artifactFrames
def tied_rank(x):
"""
from: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/auc.py
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r
"""
compile_data: input_str is the path to your imput files, with the character '?' inserted where you want to specify different values.
For instance: 'X:\RSVP\exp?\realtime\exp?_continuous_with_ica' with substitute = range(44,61) will process files
'X:\RSVP\exp44\realtime\exp44_continuous_with_ica.set' ... all the way to 60. The sequence files will be created in the outputpath and
automatically uploaded to hdfs_target_path in the HDFS file system if those are specified. Assumes you are running this on the head node.
"""
def compile_data(input_str, substitute, outputpath='', compression=False, test_file=False, p=None):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
if not p==None:
global pool
pool=p
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
eeg = get_eeg(path_to_data + os.sep, filename)
if eeg is not 1:
raw_data, ica_act = read_full_float(eeg)
else:
continue
if raw_data is None:
continue
print(filename + ': identifying outliers')
artifact_indexes = find_artifact_indexes(eeg, ica_act)
eeg['artifact_indexes'] = artifact_indexes;
f=open('..\\artifact_indexes', 'w')
pickle.dump(artifact_indexes,f)
f.close()
eegstr = pickle.dumps(eeg, protocol=2)
print(filename + ': compiling dataset into hadoop sequence file')
if outputpath is '':
outputpath = path_to_data;
#Enable compression if requested
if compression:
comp_type=SequenceFile.CompressionType.RECORD
else:
comp_type=SequenceFile.CompressionType.NONE
writer = SequenceFile.createWriter(outputpath + os.sep + filename + '.seq', Text, Text, compression_type=comp_type)
for i in range(raw_data.shape[1]):
if test_file and i > 3:
break
this_raw = np.ascontiguousarray(raw_data[:,i], dtype=raw_data.dtype)
this_ica = np.ascontiguousarray(ica_act[:,i], dtype=ica_act.dtype)
ica_key.set(outputpath + os.sep + filename + '.ica.' + str(i+1))
raw_key.set(outputpath + os.sep + filename + '.raw.' + str(i+1))
ica_temp = pickle.dumps((this_ica, eegstr), protocol=2)
raw_temp = pickle.dumps((this_raw, eegstr), protocol=2)
ica = base64.b64encode(ica_temp)
raw = base64.b64encode(raw_temp)
ica_val.set(ica)
raw_val.set(raw)
writer.append(raw_key, raw_val)
writer.append(ica_key, ica_val)
print(filename + ': '+str(i+1))
writer.close()
print filename + ': finished writing file'
return 0
def read_full_float(eeg):
print(eeg['file_name'] + ': reading full float file')
fn =eeg['path'] + eeg['file_name'] + '.fdt';
try:
f = fop.fopen(fn, 'r', 'l')
except:
print eeg['file_name']+': could not open ' + fn
return None, None
raw_data = f.read((eeg['num_samples'], eeg['num_channels']), 'float32')
f.close();
#Recompute ICA activations
print (eeg['file_name'] + ': recomputing ICA activations')
ica_act= np.transpose(np.float32(np.dot(np.dot(eeg['ica_weights'], eeg['ica_sphere']), np.transpose(raw_data))))
return raw_data, ica_act
def create_file_manifest(input_str, substitute, outputpath=''):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
f=open(outputpath+os.sep+'manifest.txt','w')
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
def hadoop2mat(directory):
result={}
for fl in os.listdir(directory):
if fl.split('-')[0]=='part':
current = os.path.join(directory, fl)
print current
if os.path.isfile(current):
f = open(current, 'rb')
result_str = f.read().strip('\n')
f.close()
if not result_str=='':
experiments = result_str.split('\n')
kvps = [exp.split('\t') for exp in experiments]
for kvp in kvps:
this_result = pickle.loads(base64.b64decode(kvp[1]))
path, name = kvp[0].rsplit('/', 1)
print name
result[name]=this_result
savemat(directory+os.sep+'result.mat', result)
def main():
parser = argparse.ArgumentParser(description='Recompile EEGLAB files into sequence files for Hadoop')
parser.add_argument('file_str', type=str)
parser.add_argument('range', type=int, nargs=2)
parser.add_argument('outputpath', type=str)
parser.add_argument('--hdfs_target_path', type=str,default='', dest='hdfs_path')
parser.add_argument('--compression', help='compression on',action='store_true')
parser.add_argument('--manifest', help='compile the output as a list of file locations',action='store_true')
parser.add_argument('--sequencefile', help='compile the output as a hadoop sequencefile for use with hdfs',action='store_true')
parser.add_argument('--testfile', help='compile a small sequencefile for testing (~10 channels)',action='store_true')
parser.add_argument('--hadoop2mat', help='collect hadoop output files into a single .mat file',action='store_true')
#b= ['X:\RSVP\exp?\\realtime\exp?_continuous_with_ica','54' ,'54', 'X:\RSVP\hadoop\\']
theseargs = parser.parse_args()
if theseargs.range[0] is theseargs.range[1]:
trange = [theseargs.range[0]]
else:
trange = range(theseargs.range[0] ,theseargs.range[1]+1)
global pool
pool = mp.Pool(mp.cpu_count()-1)
ts = datetime.now()
#Creates full sequencefiles
if theseargs.sequencefile:
print 'eeglab2hadoop: creating sequence file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression)
#Creates list of file locations to bypass hdfs
if theseargs.manifest:
print 'eeglab2hadoop: creating manifest'
create_file_manifest(theseargs.file_str, trange, theseargs.outputpath)
#Creates small sequencefile for testing purposes
if theseargs.testfile:
print 'eeglab2hadoop: creating test file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression, test_file=True)
#Puts the files created by hadoop into a JSON string for Matlab
if theseargs.hadoop2mat:
print 'eeglab2hadoop: consolidating hadoop parts into result.mat'
hadoop2mat(theseargs.file_str)
c=datetime.now()-ts
print ' '
print 'eeglab2hadoop: Completed processing in ' + str(c.seconds) + ' seconds'
pool.close()
pool.join()
return 0
if __name__ == "__main__":
mp.freeze_support()
main() | from scipy.io import loadmat, savemat | random_line_split |
eeglab2hadoop.py | #!/oasis/scratch/csd181/mdburns/python/bin/python
# Copyright (C) 2012 Matthew Burns <mdburns@ucsd.edu>
from datetime import datetime
import helpers
import helpers.float_open as fop
from scipy import stats, zeros, ones, signal
from scipy.io import loadmat, savemat
import numpy as np
from numpy import array
import os
import sys
import pickle
import argparse
import base64
import gc
import multiprocessing as mp
from hadoop.io import SequenceFile, Text
NUMFOLDS = 5
NUM_SAMPLES = 0
SAMPLE_RATE = 0
NUM_EVENTS = 0
NUM_POINTS = 0
pool=None
def get_eeg(path, file_name):
print 'get_eeg: reading EEGLAB .set file '+ file_name
fullpath = path+file_name
try:
f = loadmat(fullpath+'.set', appendmat=False)
except:
print >> sys.stderr, 'get_eeg: could not load '+ file_name + '.set'
return 1
EEG = f['EEG']
events = {}
eeg = {}
label = []
latencyInFrame=[]
uniqueLabel=[]
event = EEG['event'][0][0][0]
gc.disable()
for t_event in event:
this_latency = str(t_event[0][0][0])
this_label = str(t_event[1][0][0])
latencyInFrame.append(this_latency)
label.append(this_label)
if this_label not in uniqueLabel:
uniqueLabel.append(this_label)
gc.enable()
uniqueLabel=[int(x) for x in uniqueLabel]
events['uniqueLabel'] = [str(x) for x in sorted(uniqueLabel)]
#-1 for Matlab indexing conversion
events['latencyInFrame'] = [(int(x)-1) for x in latencyInFrame]
events['label'] = label
eeg['events']=events
eeg['num_events']=len(events.keys())
eeg['sample_rate']=EEG['srate'][0][0][0][0]
eeg['num_samples']=EEG['pnts'][0][0][0][0]
eeg['num_channels']=EEG['nbchan'][0][0][0][0]
eeg['trials']=EEG['trials'][0][0][0][0]
eeg['ica_weights']=EEG['icaweights'][0][0]
eeg['ica_sphere']=EEG['icasphere'][0][0]
eeg['ica_winv=EEG']=['icawinv'][0][0]
eeg['file_name']=file_name
eeg['path']=path;
eeg['channel_locations']=EEG['chanlocs'][0][0]
eeg['prior_data_path']=EEG['data'][0][0][0]
return eeg
def find_artifact_indexes(eeg, data):
windowTimeLength = 200;# in ms.
windowFrameLength = int(round((eeg['sample_rate'] * windowTimeLength/1000)));
coefs = ones((windowFrameLength,))
threshold = 2.1
args=[data[:,i] for i in np.arange(data.shape[1])]
result = pool.map(tied_rank, args)
tdrnk = array(result)/data.shape[0]
twosidep = np.minimum(tdrnk, 1-tdrnk)
logliklihood = -np.log(twosidep)
meanLogLikelihood = np.mean(np.transpose(logliklihood),1)
windowFrame = np.arange((int(round(-windowFrameLength/2))),int(round((windowFrameLength/2)))).reshape((1,-1))
meanLogLikelihood = np.nan_to_num(meanLogLikelihood)
meanLogLikelihood[meanLogLikelihood > 1e20]=1e20
smoothMeanLogLikelihood = signal.filtfilt(coefs, array([1]), meanLogLikelihood)/(np.power(windowFrameLength,2))
isArtifactWindowCenter = np.where(smoothMeanLogLikelihood > threshold)[0].reshape((-1,1))
print 'clean indexes: number of artifact frames detected = %d' % len(isArtifactWindowCenter)
artifactFrames = np.tile(windowFrame, (isArtifactWindowCenter.shape[0], 1)) + np.tile(isArtifactWindowCenter, (1 , windowFrame.shape[0]))
artifactFrames = np.maximum(artifactFrames, 1)
artifactFrames = np.minimum(artifactFrames, meanLogLikelihood.shape[0])
artifactFrames = np.unique(artifactFrames[:])-1
return artifactFrames
def tied_rank(x):
"""
from: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/auc.py
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r
"""
compile_data: input_str is the path to your imput files, with the character '?' inserted where you want to specify different values.
For instance: 'X:\RSVP\exp?\realtime\exp?_continuous_with_ica' with substitute = range(44,61) will process files
'X:\RSVP\exp44\realtime\exp44_continuous_with_ica.set' ... all the way to 60. The sequence files will be created in the outputpath and
automatically uploaded to hdfs_target_path in the HDFS file system if those are specified. Assumes you are running this on the head node.
"""
def compile_data(input_str, substitute, outputpath='', compression=False, test_file=False, p=None):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
if not p==None:
global pool
pool=p
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
eeg = get_eeg(path_to_data + os.sep, filename)
if eeg is not 1:
raw_data, ica_act = read_full_float(eeg)
else:
continue
if raw_data is None:
continue
print(filename + ': identifying outliers')
artifact_indexes = find_artifact_indexes(eeg, ica_act)
eeg['artifact_indexes'] = artifact_indexes;
f=open('..\\artifact_indexes', 'w')
pickle.dump(artifact_indexes,f)
f.close()
eegstr = pickle.dumps(eeg, protocol=2)
print(filename + ': compiling dataset into hadoop sequence file')
if outputpath is '':
outputpath = path_to_data;
#Enable compression if requested
if compression:
comp_type=SequenceFile.CompressionType.RECORD
else:
comp_type=SequenceFile.CompressionType.NONE
writer = SequenceFile.createWriter(outputpath + os.sep + filename + '.seq', Text, Text, compression_type=comp_type)
for i in range(raw_data.shape[1]):
if test_file and i > 3:
break
this_raw = np.ascontiguousarray(raw_data[:,i], dtype=raw_data.dtype)
this_ica = np.ascontiguousarray(ica_act[:,i], dtype=ica_act.dtype)
ica_key.set(outputpath + os.sep + filename + '.ica.' + str(i+1))
raw_key.set(outputpath + os.sep + filename + '.raw.' + str(i+1))
ica_temp = pickle.dumps((this_ica, eegstr), protocol=2)
raw_temp = pickle.dumps((this_raw, eegstr), protocol=2)
ica = base64.b64encode(ica_temp)
raw = base64.b64encode(raw_temp)
ica_val.set(ica)
raw_val.set(raw)
writer.append(raw_key, raw_val)
writer.append(ica_key, ica_val)
print(filename + ': '+str(i+1))
writer.close()
print filename + ': finished writing file'
return 0
def read_full_float(eeg):
print(eeg['file_name'] + ': reading full float file')
fn =eeg['path'] + eeg['file_name'] + '.fdt';
try:
f = fop.fopen(fn, 'r', 'l')
except:
print eeg['file_name']+': could not open ' + fn
return None, None
raw_data = f.read((eeg['num_samples'], eeg['num_channels']), 'float32')
f.close();
#Recompute ICA activations
print (eeg['file_name'] + ': recomputing ICA activations')
ica_act= np.transpose(np.float32(np.dot(np.dot(eeg['ica_weights'], eeg['ica_sphere']), np.transpose(raw_data))))
return raw_data, ica_act
def | (input_str, substitute, outputpath=''):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
f=open(outputpath+os.sep+'manifest.txt','w')
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
def hadoop2mat(directory):
result={}
for fl in os.listdir(directory):
if fl.split('-')[0]=='part':
current = os.path.join(directory, fl)
print current
if os.path.isfile(current):
f = open(current, 'rb')
result_str = f.read().strip('\n')
f.close()
if not result_str=='':
experiments = result_str.split('\n')
kvps = [exp.split('\t') for exp in experiments]
for kvp in kvps:
this_result = pickle.loads(base64.b64decode(kvp[1]))
path, name = kvp[0].rsplit('/', 1)
print name
result[name]=this_result
savemat(directory+os.sep+'result.mat', result)
def main():
parser = argparse.ArgumentParser(description='Recompile EEGLAB files into sequence files for Hadoop')
parser.add_argument('file_str', type=str)
parser.add_argument('range', type=int, nargs=2)
parser.add_argument('outputpath', type=str)
parser.add_argument('--hdfs_target_path', type=str,default='', dest='hdfs_path')
parser.add_argument('--compression', help='compression on',action='store_true')
parser.add_argument('--manifest', help='compile the output as a list of file locations',action='store_true')
parser.add_argument('--sequencefile', help='compile the output as a hadoop sequencefile for use with hdfs',action='store_true')
parser.add_argument('--testfile', help='compile a small sequencefile for testing (~10 channels)',action='store_true')
parser.add_argument('--hadoop2mat', help='collect hadoop output files into a single .mat file',action='store_true')
#b= ['X:\RSVP\exp?\\realtime\exp?_continuous_with_ica','54' ,'54', 'X:\RSVP\hadoop\\']
theseargs = parser.parse_args()
if theseargs.range[0] is theseargs.range[1]:
trange = [theseargs.range[0]]
else:
trange = range(theseargs.range[0] ,theseargs.range[1]+1)
global pool
pool = mp.Pool(mp.cpu_count()-1)
ts = datetime.now()
#Creates full sequencefiles
if theseargs.sequencefile:
print 'eeglab2hadoop: creating sequence file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression)
#Creates list of file locations to bypass hdfs
if theseargs.manifest:
print 'eeglab2hadoop: creating manifest'
create_file_manifest(theseargs.file_str, trange, theseargs.outputpath)
#Creates small sequencefile for testing purposes
if theseargs.testfile:
print 'eeglab2hadoop: creating test file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression, test_file=True)
#Puts the files created by hadoop into a JSON string for Matlab
if theseargs.hadoop2mat:
print 'eeglab2hadoop: consolidating hadoop parts into result.mat'
hadoop2mat(theseargs.file_str)
c=datetime.now()-ts
print ' '
print 'eeglab2hadoop: Completed processing in ' + str(c.seconds) + ' seconds'
pool.close()
pool.join()
return 0
if __name__ == "__main__":
mp.freeze_support()
main() | create_file_manifest | identifier_name |
instream.rs | extern crate libsoundio_sys as raw;
use super::error::*;
use super::format::*;
use super::sample::*;
use super::util::*;
use std::marker::PhantomData;
use std::os::raw::{c_double, c_int};
use std::ptr;
use std::slice;
/// This is called when an instream has been read. The `InStreamUserData` struct is obtained
/// from the stream.userdata, then the user-supplied callback is called with an `InStreamReader`
/// object.
pub extern "C" fn instream_read_callback(
stream: *mut raw::SoundIoInStream,
frame_count_min: c_int,
frame_count_max: c_int,
) {
// Use stream.userdata to get a reference to the InStreamUserData object.
let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData };
let userdata = unsafe { &mut (*raw_userdata_pointer) };
let mut stream_reader = InStreamReader {
instream: userdata.instream,
frame_count_min: frame_count_min as _,
frame_count_max: frame_count_max as _,
read_started: false,
channel_areas: Vec::new(),
frame_count: 0,
phantom: PhantomData,
};
(userdata.read_callback)(&mut stream_reader);
}
pub extern "C" fn instream_overflow_callback(stream: *mut raw::SoundIoInStream) {
// Use stream.userdata to get a reference to the InStreamUserData object.
let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData };
let userdata = unsafe { &mut (*raw_userdata_pointer) };
if let Some(ref mut cb) = userdata.overflow_callback {
cb();
} else {
println!("Overflow!");
}
}
pub extern "C" fn instream_error_callback(stream: *mut raw::SoundIoInStream, err: c_int) {
// Use stream.userdata to get a reference to the InStreamUserData object.
let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData };
let userdata = unsafe { &mut (*raw_userdata_pointer) };
if let Some(ref mut cb) = userdata.error_callback {
cb(err.into());
} else {
println!("Error: {}", Error::from(err));
}
}
/// InStream represents an input stream for recording.
///
/// It is obtained from `Device` using `Device::open_instream()` and
/// can be started and paused.
pub struct InStream<'a> {
pub userdata: Box<InStreamUserData<'a>>,
// This is just here to say that InStream cannot outlive the Device it was created from.
pub phantom: PhantomData<&'a ()>,
}
/// The callbacks required for an instream are stored in this object. We also store a pointer
/// to the raw instream so that it can be passed to `InStreamReader` in the write callback.
pub struct InStreamUserData<'a> {
pub instream: *mut raw::SoundIoInStream,
pub read_callback: Box<dyn FnMut(&mut InStreamReader) + 'a>,
pub overflow_callback: Option<Box<dyn FnMut() + 'a>>,
pub error_callback: Option<Box<dyn FnMut(Error) + 'a>>,
}
impl<'a> Drop for InStreamUserData<'a> {
fn drop(&mut self) {
unsafe {
raw::soundio_instream_destroy(self.instream);
}
}
}
impl<'a> InStream<'a> {
/// Starts the stream, returning `Ok(())` if it started successfully. Once
/// started the read callback will be periodically called according to the
/// requested latency.
///
/// `start()` should only ever be called once on an `InStream`.
/// Do not use `start()` to resume a stream after pausing it. Instead call `pause(false)`.
///
/// # Errors
///
/// * `Error::BackendDisconnected`
/// * `Error::Streaming`
/// * `Error::OpeningDevice`
/// * `Error::SystemResources`
///
pub fn start(&mut self) -> Result<()> {
match unsafe { raw::soundio_instream_start(self.userdata.instream) } {
0 => Ok(()),
x => Err(x.into()), | }
// TODO: Can pause() be called from the read callback?
/// If the underlying backend and device support pausing, this pauses the
/// stream. The `write_callback()` may be called a few more times if
/// the buffer is not full.
///
/// Pausing might put the hardware into a low power state which is ideal if your
/// software is silent for some time.
///
/// This should not be called before `start()`. Pausing when already paused or
/// unpausing when already unpaused has no effect and returns `Ok(())`.
///
/// # Errors
///
/// * `Error::BackendDisconnected`
/// * `Error::Streaming`
/// * `Error::IncompatibleDevice` - device does not support pausing/unpausing
///
pub fn pause(&mut self, pause: bool) -> Result<()> {
match unsafe { raw::soundio_instream_pause(self.userdata.instream, pause as i8) } {
0 => Ok(()),
e => Err(e.into()),
}
}
/// Returns the stream format.
pub fn format(&self) -> Format {
unsafe { (*self.userdata.instream).format.into() }
}
/// Sample rate is the number of frames per second.
pub fn sample_rate(&self) -> i32 {
unsafe { (*self.userdata.instream).sample_rate as _ }
}
/// Ignoring hardware latency, this is the number of seconds it takes for a
/// captured sample to become available for reading.
/// After you call `Device::open_instream()`, this value is replaced with the
/// actual software latency, as near to this value as possible.
///
/// A higher value means less CPU usage. Defaults to a large value,
/// potentially upwards of 2 seconds.
///
/// If the device has unknown software latency min and max values, you may
/// still set this (in `Device::open_instream()`), but you might not
/// get the value you requested.
///
/// For PulseAudio, if you set this value to non-default, it sets
/// `PA_STREAM_ADJUST_LATENCY` and is the value used for `fragsize`.
/// For JACK, this value is always equal to
/// `Device::software_latency().current`.
pub fn software_latency(&self) -> f64 {
unsafe { (*self.userdata.instream).software_latency as _ }
}
/// The name of the stream, which defaults to "SoundIoInStream".
///
/// PulseAudio uses this for the stream name.
/// JACK uses this for the client name of the client that connects when you
/// open the stream.
/// WASAPI uses this for the session display name.
/// Must not contain a colon (":").
///
/// TODO: Currently there is no way to set this.
pub fn name(&self) -> String {
unsafe { utf8_to_string((*self.userdata.instream).name) }
}
/// The number of bytes per frame, equal to the number of bytes
/// per sample, multiplied by the number of channels.
pub fn bytes_per_frame(&self) -> i32 {
unsafe { (*self.userdata.instream).bytes_per_frame as _ }
}
/// The number of bytes in a sample, e.g. 3 for `i24`.
pub fn bytes_per_sample(&self) -> i32 {
unsafe { (*self.userdata.instream).bytes_per_sample as _ }
}
}
/// `InStreamReader` is passed to the read callback and can be used to read from the stream.
///
/// You start by calling `begin_read()` and then you can read the samples. When the `InStreamReader`
/// is dropped the samples are dropped. An error at that point is written to the console and ignored.
///
pub struct InStreamReader<'a> {
instream: *mut raw::SoundIoInStream,
frame_count_min: usize,
frame_count_max: usize,
read_started: bool,
// The memory area to write to - one for each channel. Populated after begin_read()
channel_areas: Vec<raw::SoundIoChannelArea>,
// The actual frame count. Populated after begin_read()
frame_count: usize,
// This cannot outlive the scope that it is spawned from (in the write callback).
phantom: PhantomData<&'a ()>,
}
impl<'a> InStreamReader<'a> {
/// Start a read. You can only call this once per callback otherwise it panics.
///
/// frame_count is the number of frames you want to read. It must be between
/// frame_count_min and frame_count_max inclusive, or `begin_read()` will panic.
///
/// It returns the number of frames you can actually read. The returned value
/// will always be less than or equal to the provided value.
///
/// # Errors
///
/// * `Error::Invalid`
/// * `frame_count` < `frame_count_min` or `frame_count` > `frame_count_max`
/// * `Error::Streaming`
/// * `Error::IncompatibleDevice` - in rare cases it might just now
/// be discovered that the device uses non-byte-aligned access, in which
/// case this error code is returned.
///
pub fn begin_read(&mut self, frame_count: usize) -> Result<usize> {
assert!(
frame_count >= self.frame_count_min && frame_count <= self.frame_count_max,
"frame_count out of range"
);
let mut areas: *mut raw::SoundIoChannelArea = ptr::null_mut();
let mut actual_frame_count: c_int = frame_count as _;
match unsafe {
raw::soundio_instream_begin_read(
self.instream,
&mut areas as *mut _,
&mut actual_frame_count as *mut _,
)
} {
0 => {
self.read_started = true;
self.frame_count = actual_frame_count as _;
// Return now if there's no frames to actually read.
if actual_frame_count <= 0 {
return Ok(0);
}
let cc = self.channel_count();
self.channel_areas = vec![
raw::SoundIoChannelArea {
ptr: ptr::null_mut(),
step: 0
};
cc
];
unsafe {
self.channel_areas.copy_from_slice(slice::from_raw_parts::<
raw::SoundIoChannelArea,
>(areas, cc));
}
Ok(actual_frame_count as _)
}
e => Err(e.into()),
}
}
/// Commits the write that you began with `begin_read()`.
///
/// Errors are currently are just printed to the console and ignored.
///
/// # Errors
///
/// * `Error::Streaming`
/// * `Error::Underflow` - an underflow caused this call to fail. You might
/// also get an `underflow_callback()`, and you might not get
/// this error code when an underflow occurs. Unlike `Error::Streaming`,
/// the outstream is still in a valid state and streaming can continue.
pub fn end_read(&mut self) {
if self.read_started {
unsafe {
match raw::soundio_instream_end_read(self.instream) {
0 => {
self.read_started = false;
}
x => println!("Error ending instream: {}", Error::from(x)),
}
}
}
}
/// Get the minimum frame count that you can call `begin_read()` with.
/// Retreive this value before calling `begin_read()` to ensure you read the correct number
/// of frames.
pub fn frame_count_min(&self) -> usize {
self.frame_count_min
}
/// Get the maximum frame count that you can call `begin_read()` with.
/// Retreive this value before calling `begin_read()` to ensure you read the correct number
/// of frames.
pub fn frame_count_max(&self) -> usize {
self.frame_count_max
}
/// Get the actual frame count that you did call `begin_read()` with. Panics if you haven't called
/// `begin_read()` yet.
pub fn frame_count(&self) -> usize {
assert!(self.read_started);
self.frame_count
}
/// Get latency in seconds due to software only, not including hardware.
pub fn software_latency(&self) -> f64 {
unsafe { (*self.instream).software_latency as _ }
}
/// Return the number of channels in this stream. Guaranteed to be at least 1.
pub fn channel_count(&self) -> usize {
unsafe { (*self.instream).layout.channel_count as _ }
}
/// Get the sample rate in Hertz.
pub fn sample_rate(&self) -> i32 {
unsafe { (*self.instream).sample_rate as _ }
}
/// Obtain the number of seconds that the next frame of sound being
/// captured will take to arrive in the buffer, plus the amount of time that is
/// represented in the buffer. This includes both software and hardware latency.
///
/// # Errors
///
/// * `Error::Streaming`
///
pub fn get_latency(&mut self) -> Result<f64> {
let mut x: c_double = 0.0;
match unsafe { raw::soundio_instream_get_latency(self.instream, &mut x as *mut c_double) } {
0 => Ok(x),
e => Err(e.into()),
}
}
/// Get the value of a sample. This panics if the `channel` or `frame` are
/// out of range or if you haven't called `begin_read()` yet.
///
/// If you request a different type from the actual one it will be converted.
///
/// # Examples
///
/// ```
/// fn read_callback(stream: &mut soundio::InStreamReader) {
/// let frame_count_max = stream.frame_count_max();
/// stream.begin_read(frame_count_max).unwrap();
/// for c in 0..stream.channel_count() {
/// for f in 0..stream.frame_count() {
/// do_something_with(stream.sample::<i16>(c, f));
/// }
/// }
/// }
/// # fn do_something_with(_: i16) { }
/// ```
pub fn sample<T: Sample>(&self, channel: usize, frame: usize) -> T {
assert!(self.read_started);
assert!(channel < self.channel_count(), "Channel out of range");
assert!(frame < self.frame_count(), "Frame out of range");
unsafe {
let ptr = self.channel_areas[channel]
.ptr
.add(frame * self.channel_areas[channel].step as usize)
as *mut u8;
match (*self.instream).format {
raw::SoundIoFormat::SoundIoFormatS8 => T::from_i8(i8::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU8 => T::from_u8(u8::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS16LE => T::from_i16(i16::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS16BE => T::from_i16(i16::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatU16LE => T::from_u16(u16::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU16BE => T::from_u16(u16::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatS24LE => T::from_i24(i24::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS24BE => T::from_i24(i24::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatU24LE => T::from_u24(u24::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU24BE => T::from_u24(u24::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatS32LE => T::from_i32(i32::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS32BE => T::from_i32(i32::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatU32LE => T::from_u32(u32::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU32BE => T::from_u32(u32::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat32LE => T::from_f32(f32::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat32BE => T::from_f32(f32::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat64LE => T::from_f64(f64::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat64BE => T::from_f64(f64::from_raw_be(ptr)),
_ => panic!("Unknown format"),
}
}
}
// TODO: To acheive speed *and* safety I can use iterators. That will be in a future API.
}
impl<'a> Drop for InStreamReader<'a> {
/// This will drop all of the frames from when you called `begin_read()`.
///
/// Errors are currently are just printed to the console and ignored.
///
/// # Errors
///
/// * `Error::Streaming`
fn drop(&mut self) {
if self.read_started {
unsafe {
match raw::soundio_instream_end_read(self.instream) {
0 => {}
x => println!("Error reading instream: {}", Error::from(x)),
}
}
}
}
} | } | random_line_split |
instream.rs | extern crate libsoundio_sys as raw;
use super::error::*;
use super::format::*;
use super::sample::*;
use super::util::*;
use std::marker::PhantomData;
use std::os::raw::{c_double, c_int};
use std::ptr;
use std::slice;
/// This is called when an instream has been read. The `InStreamUserData` struct is obtained
/// from the stream.userdata, then the user-supplied callback is called with an `InStreamReader`
/// object.
pub extern "C" fn instream_read_callback(
stream: *mut raw::SoundIoInStream,
frame_count_min: c_int,
frame_count_max: c_int,
) {
// Use stream.userdata to get a reference to the InStreamUserData object.
let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData };
let userdata = unsafe { &mut (*raw_userdata_pointer) };
let mut stream_reader = InStreamReader {
instream: userdata.instream,
frame_count_min: frame_count_min as _,
frame_count_max: frame_count_max as _,
read_started: false,
channel_areas: Vec::new(),
frame_count: 0,
phantom: PhantomData,
};
(userdata.read_callback)(&mut stream_reader);
}
pub extern "C" fn instream_overflow_callback(stream: *mut raw::SoundIoInStream) {
// Use stream.userdata to get a reference to the InStreamUserData object.
let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData };
let userdata = unsafe { &mut (*raw_userdata_pointer) };
if let Some(ref mut cb) = userdata.overflow_callback {
cb();
} else {
println!("Overflow!");
}
}
pub extern "C" fn instream_error_callback(stream: *mut raw::SoundIoInStream, err: c_int) {
// Use stream.userdata to get a reference to the InStreamUserData object.
let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData };
let userdata = unsafe { &mut (*raw_userdata_pointer) };
if let Some(ref mut cb) = userdata.error_callback {
cb(err.into());
} else {
println!("Error: {}", Error::from(err));
}
}
/// InStream represents an input stream for recording.
///
/// It is obtained from `Device` using `Device::open_instream()` and
/// can be started and paused.
pub struct InStream<'a> {
pub userdata: Box<InStreamUserData<'a>>,
// This is just here to say that InStream cannot outlive the Device it was created from.
pub phantom: PhantomData<&'a ()>,
}
/// The callbacks required for an instream are stored in this object. We also store a pointer
/// to the raw instream so that it can be passed to `InStreamReader` in the write callback.
pub struct InStreamUserData<'a> {
pub instream: *mut raw::SoundIoInStream,
pub read_callback: Box<dyn FnMut(&mut InStreamReader) + 'a>,
pub overflow_callback: Option<Box<dyn FnMut() + 'a>>,
pub error_callback: Option<Box<dyn FnMut(Error) + 'a>>,
}
impl<'a> Drop for InStreamUserData<'a> {
fn drop(&mut self) {
unsafe {
raw::soundio_instream_destroy(self.instream);
}
}
}
impl<'a> InStream<'a> {
/// Starts the stream, returning `Ok(())` if it started successfully. Once
/// started the read callback will be periodically called according to the
/// requested latency.
///
/// `start()` should only ever be called once on an `InStream`.
/// Do not use `start()` to resume a stream after pausing it. Instead call `pause(false)`.
///
/// # Errors
///
/// * `Error::BackendDisconnected`
/// * `Error::Streaming`
/// * `Error::OpeningDevice`
/// * `Error::SystemResources`
///
pub fn start(&mut self) -> Result<()> {
match unsafe { raw::soundio_instream_start(self.userdata.instream) } {
0 => Ok(()),
x => Err(x.into()),
}
}
// TODO: Can pause() be called from the read callback?
/// If the underlying backend and device support pausing, this pauses the
/// stream. The `write_callback()` may be called a few more times if
/// the buffer is not full.
///
/// Pausing might put the hardware into a low power state which is ideal if your
/// software is silent for some time.
///
/// This should not be called before `start()`. Pausing when already paused or
/// unpausing when already unpaused has no effect and returns `Ok(())`.
///
/// # Errors
///
/// * `Error::BackendDisconnected`
/// * `Error::Streaming`
/// * `Error::IncompatibleDevice` - device does not support pausing/unpausing
///
pub fn pause(&mut self, pause: bool) -> Result<()> {
match unsafe { raw::soundio_instream_pause(self.userdata.instream, pause as i8) } {
0 => Ok(()),
e => Err(e.into()),
}
}
/// Returns the stream format.
pub fn format(&self) -> Format {
unsafe { (*self.userdata.instream).format.into() }
}
/// Sample rate is the number of frames per second.
pub fn sample_rate(&self) -> i32 {
unsafe { (*self.userdata.instream).sample_rate as _ }
}
/// Ignoring hardware latency, this is the number of seconds it takes for a
/// captured sample to become available for reading.
/// After you call `Device::open_instream()`, this value is replaced with the
/// actual software latency, as near to this value as possible.
///
/// A higher value means less CPU usage. Defaults to a large value,
/// potentially upwards of 2 seconds.
///
/// If the device has unknown software latency min and max values, you may
/// still set this (in `Device::open_instream()`), but you might not
/// get the value you requested.
///
/// For PulseAudio, if you set this value to non-default, it sets
/// `PA_STREAM_ADJUST_LATENCY` and is the value used for `fragsize`.
/// For JACK, this value is always equal to
/// `Device::software_latency().current`.
pub fn software_latency(&self) -> f64 {
unsafe { (*self.userdata.instream).software_latency as _ }
}
/// The name of the stream, which defaults to "SoundIoInStream".
///
/// PulseAudio uses this for the stream name.
/// JACK uses this for the client name of the client that connects when you
/// open the stream.
/// WASAPI uses this for the session display name.
/// Must not contain a colon (":").
///
/// TODO: Currently there is no way to set this.
pub fn name(&self) -> String {
unsafe { utf8_to_string((*self.userdata.instream).name) }
}
/// The number of bytes per frame, equal to the number of bytes
/// per sample, multiplied by the number of channels.
pub fn bytes_per_frame(&self) -> i32 {
unsafe { (*self.userdata.instream).bytes_per_frame as _ }
}
/// The number of bytes in a sample, e.g. 3 for `i24`.
pub fn bytes_per_sample(&self) -> i32 {
unsafe { (*self.userdata.instream).bytes_per_sample as _ }
}
}
/// `InStreamReader` is passed to the read callback and can be used to read from the stream.
///
/// You start by calling `begin_read()` and then you can read the samples. When the `InStreamReader`
/// is dropped the samples are dropped. An error at that point is written to the console and ignored.
///
pub struct InStreamReader<'a> {
instream: *mut raw::SoundIoInStream,
frame_count_min: usize,
frame_count_max: usize,
read_started: bool,
// The memory area to write to - one for each channel. Populated after begin_read()
channel_areas: Vec<raw::SoundIoChannelArea>,
// The actual frame count. Populated after begin_read()
frame_count: usize,
// This cannot outlive the scope that it is spawned from (in the write callback).
phantom: PhantomData<&'a ()>,
}
impl<'a> InStreamReader<'a> {
/// Start a read. You can only call this once per callback otherwise it panics.
///
/// frame_count is the number of frames you want to read. It must be between
/// frame_count_min and frame_count_max inclusive, or `begin_read()` will panic.
///
/// It returns the number of frames you can actually read. The returned value
/// will always be less than or equal to the provided value.
///
/// # Errors
///
/// * `Error::Invalid`
/// * `frame_count` < `frame_count_min` or `frame_count` > `frame_count_max`
/// * `Error::Streaming`
/// * `Error::IncompatibleDevice` - in rare cases it might just now
/// be discovered that the device uses non-byte-aligned access, in which
/// case this error code is returned.
///
pub fn begin_read(&mut self, frame_count: usize) -> Result<usize> {
assert!(
frame_count >= self.frame_count_min && frame_count <= self.frame_count_max,
"frame_count out of range"
);
let mut areas: *mut raw::SoundIoChannelArea = ptr::null_mut();
let mut actual_frame_count: c_int = frame_count as _;
match unsafe {
raw::soundio_instream_begin_read(
self.instream,
&mut areas as *mut _,
&mut actual_frame_count as *mut _,
)
} {
0 => {
self.read_started = true;
self.frame_count = actual_frame_count as _;
// Return now if there's no frames to actually read.
if actual_frame_count <= 0 {
return Ok(0);
}
let cc = self.channel_count();
self.channel_areas = vec![
raw::SoundIoChannelArea {
ptr: ptr::null_mut(),
step: 0
};
cc
];
unsafe {
self.channel_areas.copy_from_slice(slice::from_raw_parts::<
raw::SoundIoChannelArea,
>(areas, cc));
}
Ok(actual_frame_count as _)
}
e => Err(e.into()),
}
}
/// Commits the write that you began with `begin_read()`.
///
/// Errors are currently are just printed to the console and ignored.
///
/// # Errors
///
/// * `Error::Streaming`
/// * `Error::Underflow` - an underflow caused this call to fail. You might
/// also get an `underflow_callback()`, and you might not get
/// this error code when an underflow occurs. Unlike `Error::Streaming`,
/// the outstream is still in a valid state and streaming can continue.
pub fn end_read(&mut self) {
if self.read_started {
unsafe {
match raw::soundio_instream_end_read(self.instream) {
0 => {
self.read_started = false;
}
x => println!("Error ending instream: {}", Error::from(x)),
}
}
}
}
/// Get the minimum frame count that you can call `begin_read()` with.
/// Retreive this value before calling `begin_read()` to ensure you read the correct number
/// of frames.
pub fn frame_count_min(&self) -> usize {
self.frame_count_min
}
/// Get the maximum frame count that you can call `begin_read()` with.
/// Retreive this value before calling `begin_read()` to ensure you read the correct number
/// of frames.
pub fn frame_count_max(&self) -> usize {
self.frame_count_max
}
/// Get the actual frame count that you did call `begin_read()` with. Panics if you haven't called
/// `begin_read()` yet.
pub fn frame_count(&self) -> usize |
/// Get latency in seconds due to software only, not including hardware.
pub fn software_latency(&self) -> f64 {
unsafe { (*self.instream).software_latency as _ }
}
/// Return the number of channels in this stream. Guaranteed to be at least 1.
pub fn channel_count(&self) -> usize {
unsafe { (*self.instream).layout.channel_count as _ }
}
/// Get the sample rate in Hertz.
pub fn sample_rate(&self) -> i32 {
unsafe { (*self.instream).sample_rate as _ }
}
/// Obtain the number of seconds that the next frame of sound being
/// captured will take to arrive in the buffer, plus the amount of time that is
/// represented in the buffer. This includes both software and hardware latency.
///
/// # Errors
///
/// * `Error::Streaming`
///
pub fn get_latency(&mut self) -> Result<f64> {
let mut x: c_double = 0.0;
match unsafe { raw::soundio_instream_get_latency(self.instream, &mut x as *mut c_double) } {
0 => Ok(x),
e => Err(e.into()),
}
}
/// Get the value of a sample. This panics if the `channel` or `frame` are
/// out of range or if you haven't called `begin_read()` yet.
///
/// If you request a different type from the actual one it will be converted.
///
/// # Examples
///
/// ```
/// fn read_callback(stream: &mut soundio::InStreamReader) {
/// let frame_count_max = stream.frame_count_max();
/// stream.begin_read(frame_count_max).unwrap();
/// for c in 0..stream.channel_count() {
/// for f in 0..stream.frame_count() {
/// do_something_with(stream.sample::<i16>(c, f));
/// }
/// }
/// }
/// # fn do_something_with(_: i16) { }
/// ```
pub fn sample<T: Sample>(&self, channel: usize, frame: usize) -> T {
assert!(self.read_started);
assert!(channel < self.channel_count(), "Channel out of range");
assert!(frame < self.frame_count(), "Frame out of range");
unsafe {
let ptr = self.channel_areas[channel]
.ptr
.add(frame * self.channel_areas[channel].step as usize)
as *mut u8;
match (*self.instream).format {
raw::SoundIoFormat::SoundIoFormatS8 => T::from_i8(i8::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU8 => T::from_u8(u8::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS16LE => T::from_i16(i16::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS16BE => T::from_i16(i16::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatU16LE => T::from_u16(u16::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU16BE => T::from_u16(u16::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatS24LE => T::from_i24(i24::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS24BE => T::from_i24(i24::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatU24LE => T::from_u24(u24::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU24BE => T::from_u24(u24::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatS32LE => T::from_i32(i32::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS32BE => T::from_i32(i32::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatU32LE => T::from_u32(u32::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU32BE => T::from_u32(u32::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat32LE => T::from_f32(f32::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat32BE => T::from_f32(f32::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat64LE => T::from_f64(f64::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat64BE => T::from_f64(f64::from_raw_be(ptr)),
_ => panic!("Unknown format"),
}
}
}
// TODO: To acheive speed *and* safety I can use iterators. That will be in a future API.
}
impl<'a> Drop for InStreamReader<'a> {
/// This will drop all of the frames from when you called `begin_read()`.
///
/// Errors are currently are just printed to the console and ignored.
///
/// # Errors
///
/// * `Error::Streaming`
fn drop(&mut self) {
if self.read_started {
unsafe {
match raw::soundio_instream_end_read(self.instream) {
0 => {}
x => println!("Error reading instream: {}", Error::from(x)),
}
}
}
}
}
| {
assert!(self.read_started);
self.frame_count
} | identifier_body |
instream.rs | extern crate libsoundio_sys as raw;
use super::error::*;
use super::format::*;
use super::sample::*;
use super::util::*;
use std::marker::PhantomData;
use std::os::raw::{c_double, c_int};
use std::ptr;
use std::slice;
/// This is called when an instream has been read. The `InStreamUserData` struct is obtained
/// from the stream.userdata, then the user-supplied callback is called with an `InStreamReader`
/// object.
pub extern "C" fn instream_read_callback(
stream: *mut raw::SoundIoInStream,
frame_count_min: c_int,
frame_count_max: c_int,
) {
// Use stream.userdata to get a reference to the InStreamUserData object.
let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData };
let userdata = unsafe { &mut (*raw_userdata_pointer) };
let mut stream_reader = InStreamReader {
instream: userdata.instream,
frame_count_min: frame_count_min as _,
frame_count_max: frame_count_max as _,
read_started: false,
channel_areas: Vec::new(),
frame_count: 0,
phantom: PhantomData,
};
(userdata.read_callback)(&mut stream_reader);
}
pub extern "C" fn instream_overflow_callback(stream: *mut raw::SoundIoInStream) {
// Use stream.userdata to get a reference to the InStreamUserData object.
let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData };
let userdata = unsafe { &mut (*raw_userdata_pointer) };
if let Some(ref mut cb) = userdata.overflow_callback {
cb();
} else {
println!("Overflow!");
}
}
pub extern "C" fn instream_error_callback(stream: *mut raw::SoundIoInStream, err: c_int) {
// Use stream.userdata to get a reference to the InStreamUserData object.
let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData };
let userdata = unsafe { &mut (*raw_userdata_pointer) };
if let Some(ref mut cb) = userdata.error_callback {
cb(err.into());
} else {
println!("Error: {}", Error::from(err));
}
}
/// InStream represents an input stream for recording.
///
/// It is obtained from `Device` using `Device::open_instream()` and
/// can be started and paused.
pub struct InStream<'a> {
pub userdata: Box<InStreamUserData<'a>>,
// This is just here to say that InStream cannot outlive the Device it was created from.
pub phantom: PhantomData<&'a ()>,
}
/// The callbacks required for an instream are stored in this object. We also store a pointer
/// to the raw instream so that it can be passed to `InStreamReader` in the write callback.
pub struct InStreamUserData<'a> {
pub instream: *mut raw::SoundIoInStream,
pub read_callback: Box<dyn FnMut(&mut InStreamReader) + 'a>,
pub overflow_callback: Option<Box<dyn FnMut() + 'a>>,
pub error_callback: Option<Box<dyn FnMut(Error) + 'a>>,
}
impl<'a> Drop for InStreamUserData<'a> {
fn drop(&mut self) {
unsafe {
raw::soundio_instream_destroy(self.instream);
}
}
}
impl<'a> InStream<'a> {
/// Starts the stream, returning `Ok(())` if it started successfully. Once
/// started the read callback will be periodically called according to the
/// requested latency.
///
/// `start()` should only ever be called once on an `InStream`.
/// Do not use `start()` to resume a stream after pausing it. Instead call `pause(false)`.
///
/// # Errors
///
/// * `Error::BackendDisconnected`
/// * `Error::Streaming`
/// * `Error::OpeningDevice`
/// * `Error::SystemResources`
///
pub fn | (&mut self) -> Result<()> {
match unsafe { raw::soundio_instream_start(self.userdata.instream) } {
0 => Ok(()),
x => Err(x.into()),
}
}
// TODO: Can pause() be called from the read callback?
/// If the underlying backend and device support pausing, this pauses the
/// stream. The `write_callback()` may be called a few more times if
/// the buffer is not full.
///
/// Pausing might put the hardware into a low power state which is ideal if your
/// software is silent for some time.
///
/// This should not be called before `start()`. Pausing when already paused or
/// unpausing when already unpaused has no effect and returns `Ok(())`.
///
/// # Errors
///
/// * `Error::BackendDisconnected`
/// * `Error::Streaming`
/// * `Error::IncompatibleDevice` - device does not support pausing/unpausing
///
pub fn pause(&mut self, pause: bool) -> Result<()> {
match unsafe { raw::soundio_instream_pause(self.userdata.instream, pause as i8) } {
0 => Ok(()),
e => Err(e.into()),
}
}
/// Returns the stream format.
pub fn format(&self) -> Format {
unsafe { (*self.userdata.instream).format.into() }
}
/// Sample rate is the number of frames per second.
pub fn sample_rate(&self) -> i32 {
unsafe { (*self.userdata.instream).sample_rate as _ }
}
/// Ignoring hardware latency, this is the number of seconds it takes for a
/// captured sample to become available for reading.
/// After you call `Device::open_instream()`, this value is replaced with the
/// actual software latency, as near to this value as possible.
///
/// A higher value means less CPU usage. Defaults to a large value,
/// potentially upwards of 2 seconds.
///
/// If the device has unknown software latency min and max values, you may
/// still set this (in `Device::open_instream()`), but you might not
/// get the value you requested.
///
/// For PulseAudio, if you set this value to non-default, it sets
/// `PA_STREAM_ADJUST_LATENCY` and is the value used for `fragsize`.
/// For JACK, this value is always equal to
/// `Device::software_latency().current`.
pub fn software_latency(&self) -> f64 {
unsafe { (*self.userdata.instream).software_latency as _ }
}
/// The name of the stream, which defaults to "SoundIoInStream".
///
/// PulseAudio uses this for the stream name.
/// JACK uses this for the client name of the client that connects when you
/// open the stream.
/// WASAPI uses this for the session display name.
/// Must not contain a colon (":").
///
/// TODO: Currently there is no way to set this.
pub fn name(&self) -> String {
unsafe { utf8_to_string((*self.userdata.instream).name) }
}
/// The number of bytes per frame, equal to the number of bytes
/// per sample, multiplied by the number of channels.
pub fn bytes_per_frame(&self) -> i32 {
unsafe { (*self.userdata.instream).bytes_per_frame as _ }
}
/// The number of bytes in a sample, e.g. 3 for `i24`.
pub fn bytes_per_sample(&self) -> i32 {
unsafe { (*self.userdata.instream).bytes_per_sample as _ }
}
}
/// `InStreamReader` is passed to the read callback and can be used to read from the stream.
///
/// You start by calling `begin_read()` and then you can read the samples. When the `InStreamReader`
/// is dropped the samples are dropped. An error at that point is written to the console and ignored.
///
pub struct InStreamReader<'a> {
instream: *mut raw::SoundIoInStream,
frame_count_min: usize,
frame_count_max: usize,
read_started: bool,
// The memory area to write to - one for each channel. Populated after begin_read()
channel_areas: Vec<raw::SoundIoChannelArea>,
// The actual frame count. Populated after begin_read()
frame_count: usize,
// This cannot outlive the scope that it is spawned from (in the write callback).
phantom: PhantomData<&'a ()>,
}
impl<'a> InStreamReader<'a> {
/// Start a read. You can only call this once per callback otherwise it panics.
///
/// frame_count is the number of frames you want to read. It must be between
/// frame_count_min and frame_count_max inclusive, or `begin_read()` will panic.
///
/// It returns the number of frames you can actually read. The returned value
/// will always be less than or equal to the provided value.
///
/// # Errors
///
/// * `Error::Invalid`
/// * `frame_count` < `frame_count_min` or `frame_count` > `frame_count_max`
/// * `Error::Streaming`
/// * `Error::IncompatibleDevice` - in rare cases it might just now
/// be discovered that the device uses non-byte-aligned access, in which
/// case this error code is returned.
///
pub fn begin_read(&mut self, frame_count: usize) -> Result<usize> {
assert!(
frame_count >= self.frame_count_min && frame_count <= self.frame_count_max,
"frame_count out of range"
);
let mut areas: *mut raw::SoundIoChannelArea = ptr::null_mut();
let mut actual_frame_count: c_int = frame_count as _;
match unsafe {
raw::soundio_instream_begin_read(
self.instream,
&mut areas as *mut _,
&mut actual_frame_count as *mut _,
)
} {
0 => {
self.read_started = true;
self.frame_count = actual_frame_count as _;
// Return now if there's no frames to actually read.
if actual_frame_count <= 0 {
return Ok(0);
}
let cc = self.channel_count();
self.channel_areas = vec![
raw::SoundIoChannelArea {
ptr: ptr::null_mut(),
step: 0
};
cc
];
unsafe {
self.channel_areas.copy_from_slice(slice::from_raw_parts::<
raw::SoundIoChannelArea,
>(areas, cc));
}
Ok(actual_frame_count as _)
}
e => Err(e.into()),
}
}
/// Commits the write that you began with `begin_read()`.
///
/// Errors are currently are just printed to the console and ignored.
///
/// # Errors
///
/// * `Error::Streaming`
/// * `Error::Underflow` - an underflow caused this call to fail. You might
/// also get an `underflow_callback()`, and you might not get
/// this error code when an underflow occurs. Unlike `Error::Streaming`,
/// the outstream is still in a valid state and streaming can continue.
pub fn end_read(&mut self) {
if self.read_started {
unsafe {
match raw::soundio_instream_end_read(self.instream) {
0 => {
self.read_started = false;
}
x => println!("Error ending instream: {}", Error::from(x)),
}
}
}
}
/// Get the minimum frame count that you can call `begin_read()` with.
/// Retreive this value before calling `begin_read()` to ensure you read the correct number
/// of frames.
pub fn frame_count_min(&self) -> usize {
self.frame_count_min
}
/// Get the maximum frame count that you can call `begin_read()` with.
/// Retreive this value before calling `begin_read()` to ensure you read the correct number
/// of frames.
pub fn frame_count_max(&self) -> usize {
self.frame_count_max
}
/// Get the actual frame count that you did call `begin_read()` with. Panics if you haven't called
/// `begin_read()` yet.
pub fn frame_count(&self) -> usize {
assert!(self.read_started);
self.frame_count
}
/// Get latency in seconds due to software only, not including hardware.
pub fn software_latency(&self) -> f64 {
unsafe { (*self.instream).software_latency as _ }
}
/// Return the number of channels in this stream. Guaranteed to be at least 1.
pub fn channel_count(&self) -> usize {
unsafe { (*self.instream).layout.channel_count as _ }
}
/// Get the sample rate in Hertz.
pub fn sample_rate(&self) -> i32 {
unsafe { (*self.instream).sample_rate as _ }
}
/// Obtain the number of seconds that the next frame of sound being
/// captured will take to arrive in the buffer, plus the amount of time that is
/// represented in the buffer. This includes both software and hardware latency.
///
/// # Errors
///
/// * `Error::Streaming`
///
pub fn get_latency(&mut self) -> Result<f64> {
let mut x: c_double = 0.0;
match unsafe { raw::soundio_instream_get_latency(self.instream, &mut x as *mut c_double) } {
0 => Ok(x),
e => Err(e.into()),
}
}
/// Get the value of a sample. This panics if the `channel` or `frame` are
/// out of range or if you haven't called `begin_read()` yet.
///
/// If you request a different type from the actual one it will be converted.
///
/// # Examples
///
/// ```
/// fn read_callback(stream: &mut soundio::InStreamReader) {
/// let frame_count_max = stream.frame_count_max();
/// stream.begin_read(frame_count_max).unwrap();
/// for c in 0..stream.channel_count() {
/// for f in 0..stream.frame_count() {
/// do_something_with(stream.sample::<i16>(c, f));
/// }
/// }
/// }
/// # fn do_something_with(_: i16) { }
/// ```
pub fn sample<T: Sample>(&self, channel: usize, frame: usize) -> T {
assert!(self.read_started);
assert!(channel < self.channel_count(), "Channel out of range");
assert!(frame < self.frame_count(), "Frame out of range");
unsafe {
let ptr = self.channel_areas[channel]
.ptr
.add(frame * self.channel_areas[channel].step as usize)
as *mut u8;
match (*self.instream).format {
raw::SoundIoFormat::SoundIoFormatS8 => T::from_i8(i8::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU8 => T::from_u8(u8::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS16LE => T::from_i16(i16::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS16BE => T::from_i16(i16::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatU16LE => T::from_u16(u16::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU16BE => T::from_u16(u16::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatS24LE => T::from_i24(i24::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS24BE => T::from_i24(i24::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatU24LE => T::from_u24(u24::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU24BE => T::from_u24(u24::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatS32LE => T::from_i32(i32::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatS32BE => T::from_i32(i32::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatU32LE => T::from_u32(u32::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatU32BE => T::from_u32(u32::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat32LE => T::from_f32(f32::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat32BE => T::from_f32(f32::from_raw_be(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat64LE => T::from_f64(f64::from_raw_le(ptr)),
raw::SoundIoFormat::SoundIoFormatFloat64BE => T::from_f64(f64::from_raw_be(ptr)),
_ => panic!("Unknown format"),
}
}
}
// TODO: To acheive speed *and* safety I can use iterators. That will be in a future API.
}
impl<'a> Drop for InStreamReader<'a> {
/// This will drop all of the frames from when you called `begin_read()`.
///
/// Errors are currently are just printed to the console and ignored.
///
/// # Errors
///
/// * `Error::Streaming`
fn drop(&mut self) {
if self.read_started {
unsafe {
match raw::soundio_instream_end_read(self.instream) {
0 => {}
x => println!("Error reading instream: {}", Error::from(x)),
}
}
}
}
}
| start | identifier_name |
__init__.py | # -*- coding: utf-8 -*-
# Copyright 2020, CS GROUP - France, http://www.c-s.fr
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utilities to be used throughout eodag.
Everything that does not fit into one of the specialised categories of utilities in
this package should go here
"""
from __future__ import unicode_literals
import copy
import errno
import os
import re
import string
import sys
import types
import unicodedata
from datetime import datetime
from itertools import repeat, starmap
import click
import six
from rasterio.crs import CRS
from requests.auth import AuthBase
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdm_notebook
from unidecode import unidecode
# All modules using these should import them from utils package
try: # PY3
from urllib.parse import urljoin, urlparse, parse_qs, urlunparse # noqa
except ImportError: # PY2
from urlparse import urljoin, urlparse, parse_qs, urlunparse # noqa
try: # PY3
from urllib.parse import quote, quote_plus # noqa
if sys.version_info.minor < 5:
# Explicitly redefining urlencode the way it is defined in Python 3.5
def urlencode(
query,
doseq=False,
safe="",
encoding=None,
errors=None,
quote_via=quote_plus,
): # noqa
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError(
"not a valid non-string sequence " "or mapping object"
).with_traceback(tb)
l = [] # noqa
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe, encoding, errors)
l.append(k + "=" + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + "=" + v)
elif isinstance(v, str):
v = quote_via(v, safe, encoding, errors)
l.append(k + "=" + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v) # noqa
except TypeError:
# not a sequence
v = quote_via(str(v), safe, encoding, errors)
l.append(k + "=" + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe, encoding, errors)
l.append(k + "=" + elt)
return "&".join(l)
else:
from urllib.parse import urlencode
except ImportError: # PY2
from urllib import quote, quote_plus # noqa
# Explicitly redefining urlencode the way it is defined in Python 3.5
def urlencode(
query, doseq=False, safe="", encoding=None, errors=None, quote_via=quote_plus
):
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError(
"not a valid non-string sequence " "or mapping object"
).with_traceback(tb)
l = [] # noqa
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe)
l.append(k + "=" + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + "=" + v)
elif isinstance(v, six.string_types):
v = quote_via(v, safe)
l.append(k + "=" + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v) # noqa
except TypeError:
# not a sequence
v = quote_via(str(v), safe)
l.append(k + "=" + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe)
l.append(k + "=" + elt)
return "&".join(l)
class RequestsTokenAuth(AuthBase):
"""A custom authentication class to be used with requests module"""
def __init__(self, token, where, qs_key=None):
self.token = token
self.where = where
self.qs_key = qs_key
def __call__(self, request):
"""Perform the actual authentication"""
if self.where == "qs":
parts = urlparse(request.url)
qs = parse_qs(parts.query)
qs[self.qs_key] = self.token
request.url = urlunparse(
(
parts.scheme,
parts.netloc,
parts.path,
parts.params,
urlencode(qs),
parts.fragment,
)
)
elif self.where == "header":
request.headers["Authorization"] = "Bearer {}".format(self.token)
return request
class FloatRange(click.types.FloatParamType):
"""A parameter that works similar to :data:`click.FLOAT` but restricts the
value to fit into a range. Fails if the value doesn't fit into the range.
"""
name = "percentage"
def __init__(self, min=None, max=None):
self.min = min
self.max = max
def convert(self, value, param, ctx):
"""Convert value"""
rv = click.types.FloatParamType.convert(self, value, param, ctx)
if (
self.min is not None
and rv < self.min
or self.max is not None
and rv > self.max
):
if self.min is None:
self.fail(
"%s is bigger than the maximum valid value " "%s." % (rv, self.max),
param,
ctx,
)
elif self.max is None:
self.fail(
"%s is smaller than the minimum valid value "
"%s." % (rv, self.min),
param,
ctx,
)
else:
self.fail(
"%s is not in the valid range of %s to %s."
% (rv, self.min, self.max),
param,
ctx,
)
return rv
def __repr__(self):
return "FloatRange(%r, %r)" % (self.min, self.max)
def slugify(value, allow_unicode=False):
"""Copied from Django Source code, only modifying last line (no need for safe
strings).
source: https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
try: # PY2
value = unicode(value)
except NameError: # PY3
value = str(value)
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
)
value = re.sub(r"[^\w\s-]", "", value).strip().lower()
return re.sub(r"[-\s]+", "-", value)
def utf8_everywhere(mapping):
"""Recursively transforms all string found in the dict mapping to UTF-8 if we are
on Python 2"""
mutate_dict_in_place(
(
lambda value: value.decode("utf-8")
if isinstance(value, str)
and sys.version_info.major == 2
and sys.version_info.minor == 7
else value
),
mapping,
)
def sanitize(value):
"""Sanitize string to be used as a name of a directory.
>>> sanitize('productName') | >>> sanitize('name with multiple spaces')
'name_with_multiple_spaces'
>>> sanitize('âtre fête île alcôve bûche çà génèse où Noël ovoïde capharnaüm')
'atre_fete_ile_alcove_buche_ca_genese_ou_Noel_ovoide_capharnaum'
>>> sanitize('replace,ponctuation:;signs!?byunderscorekeeping-hyphen.dot_and_underscore') # noqa
'replace_ponctuation_signs_byunderscorekeeping-hyphen.dot_and_underscore'
"""
# remove accents
rv = unidecode(value)
# replace punctuation signs and spaces by underscore
# keep hyphen, dot and underscore from punctuation
tobereplaced = re.sub(r"[-_.]", "", string.punctuation)
# add spaces to be removed
tobereplaced += r"\s"
rv = re.sub(r"[" + tobereplaced + r"]+", "_", rv)
return str(rv)
def mutate_dict_in_place(func, mapping):
"""Apply func to values of mapping.
The mapping object's values are modified in-place. The function is recursive,
allowing to also modify values of nested dicts that may be level-1 values of
mapping.
:param func: A function to apply to each value of mapping which is not a dict object
:type func: func
:param mapping: A Python dict object
:type mapping: dict
:returns: None
"""
for key, value in mapping.items():
if isinstance(value, dict):
mutate_dict_in_place(func, value)
else:
mapping[key] = func(value)
def merge_mappings(mapping1, mapping2):
"""Merge two mappings with string keys, values from `mapping2` overriding values
from `mapping1`.
Do its best to detect the key in `mapping1` to override. For example, let's say
we have::
mapping2 = {"keya": "new"}
mapping1 = {"keyA": "obsolete"}
Then::
merge_mappings(mapping1, mapping2) ==> {"keyA": "new"}
If mapping2 has a key that cannot be detected in mapping1, this new key is added
to mapping1 as is.
:param dict mapping1: The mapping containing values to be overridden
:param dict mapping2: The mapping containing values that will override the
first mapping
"""
# A mapping between mapping1 keys as lowercase strings and original mapping1 keys
m1_keys_lowercase = {key.lower(): key for key in mapping1}
for key, value in mapping2.items():
if isinstance(value, dict):
try:
merge_mappings(mapping1[key], value)
except KeyError:
# If the key from mapping2 is not in mapping1, it is either key is
# the lowercased form of the corresponding key in mapping1 or because
# key is a new key to be added in mapping1
current_value = mapping1.setdefault(m1_keys_lowercase.get(key, key), {})
if not current_value:
current_value.update(value)
else:
merge_mappings(current_value, value)
else:
# Even for "scalar" values (a.k.a not nested structures), first check if
# the key from mapping1 is not the lowercase version of a key in mapping2.
# Otherwise, create the key in mapping1. This is the meaning of
# m1_keys_lowercase.get(key, key)
current_value = mapping1.get(m1_keys_lowercase.get(key, key), None)
if current_value is not None:
current_value_type = type(current_value)
if isinstance(value, six.string_types):
# Bool is a type with special meaning in Python, thus the special
# case
if current_value_type is bool:
if value.capitalize() not in ("True", "False"):
raise ValueError(
"Only true or false strings (case insensitive) are "
"allowed for booleans"
)
# Get the real Python value of the boolean. e.g: value='tRuE'
# => eval(value.capitalize())=True.
# str.capitalize() transforms the first character of the string
# to a capital letter
mapping1[m1_keys_lowercase[key]] = eval(value.capitalize())
else:
mapping1[m1_keys_lowercase[key]] = current_value_type(value)
else:
try:
mapping1[m1_keys_lowercase[key]] = current_value_type(value)
except TypeError:
# Ignore any override value that does not have the same type
# as the default value
pass
else:
mapping1[key] = value
def maybe_generator(obj):
"""Generator function that get an arbitrary object and generate values from it if
the object is a generator."""
if isinstance(obj, types.GeneratorType):
for elt in obj:
yield elt
else:
yield obj
DEFAULT_PROJ = CRS.from_epsg(4326)
def get_timestamp(date_time, date_format="%Y-%m-%dT%H:%M:%S"):
"""Returns the given date_time string formatted with date_format as timestamp,
in a PY2/3 compatible way
:param date_time: the datetime string to return as timestamp
:type date_time: str or unicode
:param date_format: (optional) the date format in which date_time is given,
defaults to '%Y-%m-%dT%H:%M:%S'
:type date_format: str or unicode
:returns: the timestamp corresponding to the date_time string in seconds
:rtype: float
"""
date_time = datetime.strptime(date_time, date_format)
try:
return date_time.timestamp()
# There is no timestamp method on datetime objects in Python 2
except AttributeError:
import time
return time.mktime(date_time.timetuple()) + date_time.microsecond / 1e6
class ProgressCallback(object):
"""A callable used to render progress to users for long running processes"""
def __init__(self, max_size=None):
self.pb = None
self.max_size = max_size
def __call__(self, current_size, max_size=None):
"""Update the progress bar.
:param current_size: amount of data already processed
:type current_size: int
:param max_size: maximum amount of data to be processed
:type max_size: int
"""
if max_size is not None:
self.max_size = max_size
if self.pb is None:
self.pb = tqdm(total=self.max_size, unit="B", unit_scale=True)
self.pb.update(current_size)
class NotebookProgressCallback(ProgressCallback):
"""A custom progress bar to be used inside Jupyter notebooks"""
def __call__(self, current_size, max_size=None):
"""Update the progress bar"""
if max_size is not None:
self.max_size = max_size
if self.pb is None:
self.pb = tqdm_notebook(total=self.max_size, unit="B", unit_scale=True)
self.pb.update(current_size)
def repeatfunc(func, n, *args):
"""Call `func` `n` times with `args`"""
return starmap(func, repeat(args, n))
def makedirs(dirpath):
"""Create a directory in filesystem with parents if necessary"""
try:
os.makedirs(dirpath)
except OSError as err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(dirpath):
raise
def update_nested_dict(old_dict, new_dict, extend_list_values=False):
"""Update recursively old_dict items with new_dict ones
:param old_dict: dict to be updated
:type old_dict: dict
:param new_dict: incomming dict
:type new_dict: dict
:param extend_list_values: extend old_dict value if both old/new values are lists
:type extend_list_values: bool
:returns: updated dict
:rtype: dict
"""
for k, v in new_dict.items():
if k in old_dict.keys():
if isinstance(v, dict) and isinstance(old_dict[k], dict):
old_dict[k] = update_nested_dict(
old_dict[k], v, extend_list_values=extend_list_values
)
elif (
extend_list_values
and isinstance(old_dict[k], list)
and isinstance(v, list)
):
old_dict[k].extend(v)
elif v:
old_dict[k] = v
else:
old_dict[k] = v
return old_dict
def dict_items_recursive_apply(config_dict, apply_method, **apply_method_parameters):
"""Recursive apply method to dict elements
:param config_dict: input nested dictionnary
:type config_dict: dict
:param apply_method: method to be applied to dict elements
:type apply_method: :func:`apply_method`
:param apply_method_parameters: optional parameters passed to the method
:type apply_method_parameters: dict
:returns: updated dict
:rtype: dict
"""
jsonpath_dict = copy.deepcopy(config_dict)
for dict_k, dict_v in jsonpath_dict.items():
if isinstance(dict_v, dict):
jsonpath_dict[dict_k] = dict_items_recursive_apply(
dict_v, apply_method, **apply_method_parameters
)
elif any(isinstance(dict_v, t) for t in (list, tuple)):
for list_idx, list_v in enumerate(dict_v):
if isinstance(list_v, dict):
jsonpath_dict[dict_k][list_idx] = dict_items_recursive_apply(
list_v, apply_method, **apply_method_parameters
)
else:
jsonpath_dict[dict_k][list_idx] = apply_method(
dict_k, list_v, **apply_method_parameters
)
else:
jsonpath_dict[dict_k] = apply_method(
dict_k, dict_v, **apply_method_parameters
)
return jsonpath_dict | 'productName' | random_line_split |
__init__.py | # -*- coding: utf-8 -*-
# Copyright 2020, CS GROUP - France, http://www.c-s.fr
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utilities to be used throughout eodag.
Everything that does not fit into one of the specialised categories of utilities in
this package should go here
"""
from __future__ import unicode_literals
import copy
import errno
import os
import re
import string
import sys
import types
import unicodedata
from datetime import datetime
from itertools import repeat, starmap
import click
import six
from rasterio.crs import CRS
from requests.auth import AuthBase
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdm_notebook
from unidecode import unidecode
# All modules using these should import them from utils package
try: # PY3
from urllib.parse import urljoin, urlparse, parse_qs, urlunparse # noqa
except ImportError: # PY2
from urlparse import urljoin, urlparse, parse_qs, urlunparse # noqa
try: # PY3
from urllib.parse import quote, quote_plus # noqa
if sys.version_info.minor < 5:
# Explicitly redefining urlencode the way it is defined in Python 3.5
def urlencode(
query,
doseq=False,
safe="",
encoding=None,
errors=None,
quote_via=quote_plus,
): # noqa
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError(
"not a valid non-string sequence " "or mapping object"
).with_traceback(tb)
l = [] # noqa
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe, encoding, errors)
l.append(k + "=" + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + "=" + v)
elif isinstance(v, str):
v = quote_via(v, safe, encoding, errors)
l.append(k + "=" + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v) # noqa
except TypeError:
# not a sequence
v = quote_via(str(v), safe, encoding, errors)
l.append(k + "=" + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe, encoding, errors)
l.append(k + "=" + elt)
return "&".join(l)
else:
from urllib.parse import urlencode
except ImportError: # PY2
from urllib import quote, quote_plus # noqa
# Explicitly redefining urlencode the way it is defined in Python 3.5
def urlencode(
query, doseq=False, safe="", encoding=None, errors=None, quote_via=quote_plus
):
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError(
"not a valid non-string sequence " "or mapping object"
).with_traceback(tb)
l = [] # noqa
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe)
l.append(k + "=" + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + "=" + v)
elif isinstance(v, six.string_types):
v = quote_via(v, safe)
l.append(k + "=" + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v) # noqa
except TypeError:
# not a sequence
v = quote_via(str(v), safe)
l.append(k + "=" + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe)
l.append(k + "=" + elt)
return "&".join(l)
class RequestsTokenAuth(AuthBase):
"""A custom authentication class to be used with requests module"""
def __init__(self, token, where, qs_key=None):
self.token = token
self.where = where
self.qs_key = qs_key
def __call__(self, request):
"""Perform the actual authentication"""
if self.where == "qs":
parts = urlparse(request.url)
qs = parse_qs(parts.query)
qs[self.qs_key] = self.token
request.url = urlunparse(
(
parts.scheme,
parts.netloc,
parts.path,
parts.params,
urlencode(qs),
parts.fragment,
)
)
elif self.where == "header":
request.headers["Authorization"] = "Bearer {}".format(self.token)
return request
class FloatRange(click.types.FloatParamType):
"""A parameter that works similar to :data:`click.FLOAT` but restricts the
value to fit into a range. Fails if the value doesn't fit into the range.
"""
name = "percentage"
def __init__(self, min=None, max=None):
self.min = min
self.max = max
def convert(self, value, param, ctx):
"""Convert value"""
rv = click.types.FloatParamType.convert(self, value, param, ctx)
if (
self.min is not None
and rv < self.min
or self.max is not None
and rv > self.max
):
if self.min is None:
self.fail(
"%s is bigger than the maximum valid value " "%s." % (rv, self.max),
param,
ctx,
)
elif self.max is None:
self.fail(
"%s is smaller than the minimum valid value "
"%s." % (rv, self.min),
param,
ctx,
)
else:
self.fail(
"%s is not in the valid range of %s to %s."
% (rv, self.min, self.max),
param,
ctx,
)
return rv
def __repr__(self):
return "FloatRange(%r, %r)" % (self.min, self.max)
def slugify(value, allow_unicode=False):
"""Copied from Django Source code, only modifying last line (no need for safe
strings).
source: https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
try: # PY2
value = unicode(value)
except NameError: # PY3
value = str(value)
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
)
value = re.sub(r"[^\w\s-]", "", value).strip().lower()
return re.sub(r"[-\s]+", "-", value)
def utf8_everywhere(mapping):
"""Recursively transforms all string found in the dict mapping to UTF-8 if we are
on Python 2"""
mutate_dict_in_place(
(
lambda value: value.decode("utf-8")
if isinstance(value, str)
and sys.version_info.major == 2
and sys.version_info.minor == 7
else value
),
mapping,
)
def sanitize(value):
"""Sanitize string to be used as a name of a directory.
>>> sanitize('productName')
'productName'
>>> sanitize('name with multiple spaces')
'name_with_multiple_spaces'
>>> sanitize('âtre fête île alcôve bûche çà génèse où Noël ovoïde capharnaüm')
'atre_fete_ile_alcove_buche_ca_genese_ou_Noel_ovoide_capharnaum'
>>> sanitize('replace,ponctuation:;signs!?byunderscorekeeping-hyphen.dot_and_underscore') # noqa
'replace_ponctuation_signs_byunderscorekeeping-hyphen.dot_and_underscore'
"""
# remove accents
rv = unidecode(value)
# replace punctuation signs and spaces by underscore
# keep hyphen, dot and underscore from punctuation
tobereplaced = re.sub(r"[-_.]", "", string.punctuation)
# add spaces to be removed
tobereplaced += r"\s"
rv = re.sub(r"[" + tobereplaced + r"]+", "_", rv)
return str(rv)
def mutate_dict_in_place(func, mapping):
"""Apply func to values of mapping.
The mapping object's values are modified in-place. The function is recursive,
allowing to also modify values of nested dicts that may be level-1 values of
mapping.
:param func: A function to apply to each value of mapping which is not a dict object
:type func: func
:param mapping: A Python dict object
:type mapping: dict
:returns: None
"""
for key, value in mapping.items():
if isinstance(value, dict):
mutate_dict_in_place(func, value)
else:
mapping[key] = func(value)
def merge_mappings(mapping1, mapping2):
"""Merge two mappings with string keys, values from `mapping2` overriding values
from `mapping1`.
Do its best to detect the key in `mapping1` to override. For example, let's say
we have::
mapping2 = {"keya": "new"}
mapping1 = {"keyA": "obsolete"}
Then::
merge_mappings(mapping1, mapping2) ==> {"keyA": "new"}
If mapping2 has a key that cannot be detected in mapping1, this new key is added
to mapping1 as is.
:param dict mapping1: The mapping containing values to be overridden
:param dict mapping2: The mapping containing values that will override the
first mapping
"""
# A mapping between mapping1 keys as lowercase strings and original mapping1 keys
m1_keys_lowercase = {key.lower(): key for key in mapping1}
for key, value in mapping2.items():
if isinstance(value, dict):
try:
merge_mappings(mapping1[key], value)
except KeyError:
# If the key from mapping2 is not in mapping1, it is either key is
# the lowercased form of the corresponding key in mapping1 or because
# key is a new key to be added in mapping1
current_value = mapping1.setdefault(m1_keys_lowercase.get(key, key), {})
if not current_value:
current_value.update(value)
else:
merge_mappings(current_value, value)
else:
# Even for "scalar" values (a.k.a not nested structures), first check if
# the key from mapping1 is not the lowercase version of a key in mapping2.
# Otherwise, create the key in mapping1. This is the meaning of
# m1_keys_lowercase.get(key, key)
current_value = mapping1.get(m1_keys_lowercase.get(key, key), None)
if current_value is not None:
current_value_type = type(current_value)
if isinstance(value, six.string_types):
# Bool is a type with special meaning in Python, thus the special
# case
if current_value_type is bool:
if value.capitalize() not in ("True", "False"):
raise ValueError(
"Only true or false strings (case insensitive) are "
"allowed for booleans"
)
# Get the real Python value of the boolean. e.g: value='tRuE'
# => eval(value.capitalize())=True.
# str.capitalize() transforms the first character of the string
# to a capital letter
mapping1[m1_keys_lowercase[key]] = eval(value.capitalize())
else:
mapping1[m1_keys_lowercase[key]] = current_value_type(value)
else:
try:
mapping1[m1_keys_lowercase[key]] = current_value_type(value)
except TypeError:
# Ignore any override value that does not have the same type
# as the default value
pass
else:
mapping1[key] = value
def maybe_generator(obj):
"""Generator function that get an arbitrary object and generate values from it if
the object is a generator."""
if isinstance(obj, types.GeneratorType):
for elt in obj:
yield elt
else:
yield obj
DEFAULT_PROJ = CRS.from_epsg(4326)
def get_timestamp(date_time, date_format="%Y-%m-%dT%H:%M:%S"):
"""Returns the given date_time string formatted with date_format as timestamp,
in a PY2/3 compatible way
:param date_time: the datetime string to return as timestamp
:type date_time: str or unicode
:param date_format: (optional) the date format in which date_time is given,
defaults to '%Y-%m-%dT%H:%M:%S'
:type date_format: str or unicode
:returns: the timestamp corresponding to the date_time string in seconds
:rtype: float
"""
date_time = datetime.strptime(date_time, date_format)
try:
return date_time.timestamp()
# There is no timestamp method on datetime objects in Python 2
except AttributeError:
import time
return time.mktime(date_time.timetuple()) + date_time.microsecond / 1e6
class ProgressCallback(object):
"""A callable used to render progress to users for long running processes"""
def __init__(self, max_size=None):
self.pb = None
self.max_size = max_size
def __call__(self, current_size, max_size=None):
"""Update the progress bar.
:param current_size: amount of data already processed
:type current_size: int
:param max_size: maximum amount of data to be processed
:type max_size: int
"""
if max_size is not None:
self.max_size = max_size
if self.pb is None:
self.pb = tqdm(total=self.max_size, unit="B", unit_scale=True)
self.pb.update(current_size)
class NotebookProgressCallback(ProgressCallback):
"""A custom progress bar to be used inside Jupyter notebooks"""
def __call__(self, current_size, max_size=None):
"""Update the progress bar"""
if max_size is not None:
self.max_size = max_size
if self.pb is None:
self.pb = tqdm_notebook(total=self.max_size, unit="B", unit_scale=True)
self.pb.update(current_size)
def repeatfunc(func, n, *args):
"""Call `func | rs(dirpath):
"""Create a directory in filesystem with parents if necessary"""
try:
os.makedirs(dirpath)
except OSError as err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(dirpath):
raise
def update_nested_dict(old_dict, new_dict, extend_list_values=False):
"""Update recursively old_dict items with new_dict ones
:param old_dict: dict to be updated
:type old_dict: dict
:param new_dict: incomming dict
:type new_dict: dict
:param extend_list_values: extend old_dict value if both old/new values are lists
:type extend_list_values: bool
:returns: updated dict
:rtype: dict
"""
for k, v in new_dict.items():
if k in old_dict.keys():
if isinstance(v, dict) and isinstance(old_dict[k], dict):
old_dict[k] = update_nested_dict(
old_dict[k], v, extend_list_values=extend_list_values
)
elif (
extend_list_values
and isinstance(old_dict[k], list)
and isinstance(v, list)
):
old_dict[k].extend(v)
elif v:
old_dict[k] = v
else:
old_dict[k] = v
return old_dict
def dict_items_recursive_apply(config_dict, apply_method, **apply_method_parameters):
"""Recursive apply method to dict elements
:param config_dict: input nested dictionnary
:type config_dict: dict
:param apply_method: method to be applied to dict elements
:type apply_method: :func:`apply_method`
:param apply_method_parameters: optional parameters passed to the method
:type apply_method_parameters: dict
:returns: updated dict
:rtype: dict
"""
jsonpath_dict = copy.deepcopy(config_dict)
for dict_k, dict_v in jsonpath_dict.items():
if isinstance(dict_v, dict):
jsonpath_dict[dict_k] = dict_items_recursive_apply(
dict_v, apply_method, **apply_method_parameters
)
elif any(isinstance(dict_v, t) for t in (list, tuple)):
for list_idx, list_v in enumerate(dict_v):
if isinstance(list_v, dict):
jsonpath_dict[dict_k][list_idx] = dict_items_recursive_apply(
list_v, apply_method, **apply_method_parameters
)
else:
jsonpath_dict[dict_k][list_idx] = apply_method(
dict_k, list_v, **apply_method_parameters
)
else:
jsonpath_dict[dict_k] = apply_method(
dict_k, dict_v, **apply_method_parameters
)
return jsonpath_dict
| ` `n` times with `args`"""
return starmap(func, repeat(args, n))
def makedi | identifier_body |
__init__.py | # -*- coding: utf-8 -*-
# Copyright 2020, CS GROUP - France, http://www.c-s.fr
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utilities to be used throughout eodag.
Everything that does not fit into one of the specialised categories of utilities in
this package should go here
"""
from __future__ import unicode_literals
import copy
import errno
import os
import re
import string
import sys
import types
import unicodedata
from datetime import datetime
from itertools import repeat, starmap
import click
import six
from rasterio.crs import CRS
from requests.auth import AuthBase
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdm_notebook
from unidecode import unidecode
# All modules using these should import them from utils package
try: # PY3
from urllib.parse import urljoin, urlparse, parse_qs, urlunparse # noqa
except ImportError: # PY2
from urlparse import urljoin, urlparse, parse_qs, urlunparse # noqa
try: # PY3
from urllib.parse import quote, quote_plus # noqa
if sys.version_info.minor < 5:
# Explicitly redefining urlencode the way it is defined in Python 3.5
def urlencode(
query,
doseq=False,
safe="",
encoding=None,
errors=None,
quote_via=quote_plus,
): # noqa
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError(
"not a valid non-string sequence " "or mapping object"
).with_traceback(tb)
l = [] # noqa
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe, encoding, errors)
l.append(k + "=" + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + "=" + v)
elif isinstance(v, str):
v = quote_via(v, safe, encoding, errors)
l.append(k + "=" + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v) # noqa
except TypeError:
# not a sequence
v = quote_via(str(v), safe, encoding, errors)
l.append(k + "=" + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe, encoding, errors)
l.append(k + "=" + elt)
return "&".join(l)
else:
from urllib.parse import urlencode
except ImportError: # PY2
from urllib import quote, quote_plus # noqa
# Explicitly redefining urlencode the way it is defined in Python 3.5
def urlencode(
query, doseq=False, safe="", encoding=None, errors=None, quote_via=quote_plus
):
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError(
"not a valid non-string sequence " "or mapping object"
).with_traceback(tb)
l = [] # noqa
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe)
l.append(k + "=" + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + "=" + v)
elif isinstance(v, six.string_types):
v = quote_via(v, safe)
l.append(k + "=" + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v) # noqa
except TypeError:
# not a sequence
v = quote_via(str(v), safe)
l.append(k + "=" + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe)
l.append(k + "=" + elt)
return "&".join(l)
class RequestsTokenAuth(AuthBase):
"""A custom authentication class to be used with requests module"""
def __init__(self, token, where, qs_key=None):
self.token = token
self.where = where
self.qs_key = qs_key
def __call__(self, request):
"""Perform the actual authentication"""
if self.where == "qs":
parts = urlparse(request.url)
qs = parse_qs(parts.query)
qs[self.qs_key] = self.token
request.url = urlunparse(
(
parts.scheme,
parts.netloc,
parts.path,
parts.params,
urlencode(qs),
parts.fragment,
)
)
elif self.where == "header":
request.headers["Authorization"] = "Bearer {}".format(self.token)
return request
class FloatRange(click.types.FloatParamType):
"""A parameter that works similar to :data:`click.FLOAT` but restricts the
value to fit into a range. Fails if the value doesn't fit into the range.
"""
name = "percentage"
def __init__(self, min=None, max=None):
self.min = min
self.max = max
def convert(self, value, param, ctx):
"""Convert value"""
rv = click.types.FloatParamType.convert(self, value, param, ctx)
if (
self.min is not None
and rv < self.min
or self.max is not None
and rv > self.max
):
if self.min is None:
self.fail(
"%s is bigger than the maximum valid value " "%s." % (rv, self.max),
param,
ctx,
)
elif self.max is None:
self.fail(
"%s is smaller than the minimum valid value "
"%s." % (rv, self.min),
param,
ctx,
)
else:
self.fail(
"%s is not in the valid range of %s to %s."
% (rv, self.min, self.max),
param,
ctx,
)
return rv
def | (self):
return "FloatRange(%r, %r)" % (self.min, self.max)
def slugify(value, allow_unicode=False):
"""Copied from Django Source code, only modifying last line (no need for safe
strings).
source: https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
try: # PY2
value = unicode(value)
except NameError: # PY3
value = str(value)
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
)
value = re.sub(r"[^\w\s-]", "", value).strip().lower()
return re.sub(r"[-\s]+", "-", value)
def utf8_everywhere(mapping):
"""Recursively transforms all string found in the dict mapping to UTF-8 if we are
on Python 2"""
mutate_dict_in_place(
(
lambda value: value.decode("utf-8")
if isinstance(value, str)
and sys.version_info.major == 2
and sys.version_info.minor == 7
else value
),
mapping,
)
def sanitize(value):
"""Sanitize string to be used as a name of a directory.
>>> sanitize('productName')
'productName'
>>> sanitize('name with multiple spaces')
'name_with_multiple_spaces'
>>> sanitize('âtre fête île alcôve bûche çà génèse où Noël ovoïde capharnaüm')
'atre_fete_ile_alcove_buche_ca_genese_ou_Noel_ovoide_capharnaum'
>>> sanitize('replace,ponctuation:;signs!?byunderscorekeeping-hyphen.dot_and_underscore') # noqa
'replace_ponctuation_signs_byunderscorekeeping-hyphen.dot_and_underscore'
"""
# remove accents
rv = unidecode(value)
# replace punctuation signs and spaces by underscore
# keep hyphen, dot and underscore from punctuation
tobereplaced = re.sub(r"[-_.]", "", string.punctuation)
# add spaces to be removed
tobereplaced += r"\s"
rv = re.sub(r"[" + tobereplaced + r"]+", "_", rv)
return str(rv)
def mutate_dict_in_place(func, mapping):
"""Apply func to values of mapping.
The mapping object's values are modified in-place. The function is recursive,
allowing to also modify values of nested dicts that may be level-1 values of
mapping.
:param func: A function to apply to each value of mapping which is not a dict object
:type func: func
:param mapping: A Python dict object
:type mapping: dict
:returns: None
"""
for key, value in mapping.items():
if isinstance(value, dict):
mutate_dict_in_place(func, value)
else:
mapping[key] = func(value)
def merge_mappings(mapping1, mapping2):
"""Merge two mappings with string keys, values from `mapping2` overriding values
from `mapping1`.
Do its best to detect the key in `mapping1` to override. For example, let's say
we have::
mapping2 = {"keya": "new"}
mapping1 = {"keyA": "obsolete"}
Then::
merge_mappings(mapping1, mapping2) ==> {"keyA": "new"}
If mapping2 has a key that cannot be detected in mapping1, this new key is added
to mapping1 as is.
:param dict mapping1: The mapping containing values to be overridden
:param dict mapping2: The mapping containing values that will override the
first mapping
"""
# A mapping between mapping1 keys as lowercase strings and original mapping1 keys
m1_keys_lowercase = {key.lower(): key for key in mapping1}
for key, value in mapping2.items():
if isinstance(value, dict):
try:
merge_mappings(mapping1[key], value)
except KeyError:
# If the key from mapping2 is not in mapping1, it is either key is
# the lowercased form of the corresponding key in mapping1 or because
# key is a new key to be added in mapping1
current_value = mapping1.setdefault(m1_keys_lowercase.get(key, key), {})
if not current_value:
current_value.update(value)
else:
merge_mappings(current_value, value)
else:
# Even for "scalar" values (a.k.a not nested structures), first check if
# the key from mapping1 is not the lowercase version of a key in mapping2.
# Otherwise, create the key in mapping1. This is the meaning of
# m1_keys_lowercase.get(key, key)
current_value = mapping1.get(m1_keys_lowercase.get(key, key), None)
if current_value is not None:
current_value_type = type(current_value)
if isinstance(value, six.string_types):
# Bool is a type with special meaning in Python, thus the special
# case
if current_value_type is bool:
if value.capitalize() not in ("True", "False"):
raise ValueError(
"Only true or false strings (case insensitive) are "
"allowed for booleans"
)
# Get the real Python value of the boolean. e.g: value='tRuE'
# => eval(value.capitalize())=True.
# str.capitalize() transforms the first character of the string
# to a capital letter
mapping1[m1_keys_lowercase[key]] = eval(value.capitalize())
else:
mapping1[m1_keys_lowercase[key]] = current_value_type(value)
else:
try:
mapping1[m1_keys_lowercase[key]] = current_value_type(value)
except TypeError:
# Ignore any override value that does not have the same type
# as the default value
pass
else:
mapping1[key] = value
def maybe_generator(obj):
"""Generator function that get an arbitrary object and generate values from it if
the object is a generator."""
if isinstance(obj, types.GeneratorType):
for elt in obj:
yield elt
else:
yield obj
DEFAULT_PROJ = CRS.from_epsg(4326)
def get_timestamp(date_time, date_format="%Y-%m-%dT%H:%M:%S"):
"""Returns the given date_time string formatted with date_format as timestamp,
in a PY2/3 compatible way
:param date_time: the datetime string to return as timestamp
:type date_time: str or unicode
:param date_format: (optional) the date format in which date_time is given,
defaults to '%Y-%m-%dT%H:%M:%S'
:type date_format: str or unicode
:returns: the timestamp corresponding to the date_time string in seconds
:rtype: float
"""
date_time = datetime.strptime(date_time, date_format)
try:
return date_time.timestamp()
# There is no timestamp method on datetime objects in Python 2
except AttributeError:
import time
return time.mktime(date_time.timetuple()) + date_time.microsecond / 1e6
class ProgressCallback(object):
"""A callable used to render progress to users for long running processes"""
def __init__(self, max_size=None):
self.pb = None
self.max_size = max_size
def __call__(self, current_size, max_size=None):
"""Update the progress bar.
:param current_size: amount of data already processed
:type current_size: int
:param max_size: maximum amount of data to be processed
:type max_size: int
"""
if max_size is not None:
self.max_size = max_size
if self.pb is None:
self.pb = tqdm(total=self.max_size, unit="B", unit_scale=True)
self.pb.update(current_size)
class NotebookProgressCallback(ProgressCallback):
"""A custom progress bar to be used inside Jupyter notebooks"""
def __call__(self, current_size, max_size=None):
"""Update the progress bar"""
if max_size is not None:
self.max_size = max_size
if self.pb is None:
self.pb = tqdm_notebook(total=self.max_size, unit="B", unit_scale=True)
self.pb.update(current_size)
def repeatfunc(func, n, *args):
"""Call `func` `n` times with `args`"""
return starmap(func, repeat(args, n))
def makedirs(dirpath):
"""Create a directory in filesystem with parents if necessary"""
try:
os.makedirs(dirpath)
except OSError as err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(dirpath):
raise
def update_nested_dict(old_dict, new_dict, extend_list_values=False):
"""Update recursively old_dict items with new_dict ones
:param old_dict: dict to be updated
:type old_dict: dict
:param new_dict: incomming dict
:type new_dict: dict
:param extend_list_values: extend old_dict value if both old/new values are lists
:type extend_list_values: bool
:returns: updated dict
:rtype: dict
"""
for k, v in new_dict.items():
if k in old_dict.keys():
if isinstance(v, dict) and isinstance(old_dict[k], dict):
old_dict[k] = update_nested_dict(
old_dict[k], v, extend_list_values=extend_list_values
)
elif (
extend_list_values
and isinstance(old_dict[k], list)
and isinstance(v, list)
):
old_dict[k].extend(v)
elif v:
old_dict[k] = v
else:
old_dict[k] = v
return old_dict
def dict_items_recursive_apply(config_dict, apply_method, **apply_method_parameters):
"""Recursive apply method to dict elements
:param config_dict: input nested dictionnary
:type config_dict: dict
:param apply_method: method to be applied to dict elements
:type apply_method: :func:`apply_method`
:param apply_method_parameters: optional parameters passed to the method
:type apply_method_parameters: dict
:returns: updated dict
:rtype: dict
"""
jsonpath_dict = copy.deepcopy(config_dict)
for dict_k, dict_v in jsonpath_dict.items():
if isinstance(dict_v, dict):
jsonpath_dict[dict_k] = dict_items_recursive_apply(
dict_v, apply_method, **apply_method_parameters
)
elif any(isinstance(dict_v, t) for t in (list, tuple)):
for list_idx, list_v in enumerate(dict_v):
if isinstance(list_v, dict):
jsonpath_dict[dict_k][list_idx] = dict_items_recursive_apply(
list_v, apply_method, **apply_method_parameters
)
else:
jsonpath_dict[dict_k][list_idx] = apply_method(
dict_k, list_v, **apply_method_parameters
)
else:
jsonpath_dict[dict_k] = apply_method(
dict_k, dict_v, **apply_method_parameters
)
return jsonpath_dict
| __repr__ | identifier_name |
__init__.py | # -*- coding: utf-8 -*-
# Copyright 2020, CS GROUP - France, http://www.c-s.fr
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utilities to be used throughout eodag.
Everything that does not fit into one of the specialised categories of utilities in
this package should go here
"""
from __future__ import unicode_literals
import copy
import errno
import os
import re
import string
import sys
import types
import unicodedata
from datetime import datetime
from itertools import repeat, starmap
import click
import six
from rasterio.crs import CRS
from requests.auth import AuthBase
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdm_notebook
from unidecode import unidecode
# All modules using these should import them from utils package
try: # PY3
from urllib.parse import urljoin, urlparse, parse_qs, urlunparse # noqa
except ImportError: # PY2
from urlparse import urljoin, urlparse, parse_qs, urlunparse # noqa
try: # PY3
from urllib.parse import quote, quote_plus # noqa
if sys.version_info.minor < 5:
# Explicitly redefining urlencode the way it is defined in Python 3.5
def urlencode(
query,
doseq=False,
safe="",
encoding=None,
errors=None,
quote_via=quote_plus,
): # noqa
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError(
"not a valid non-string sequence " "or mapping object"
).with_traceback(tb)
l = [] # noqa
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe, encoding, errors)
l.append(k + "=" + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + "=" + v)
elif isinstance(v, str):
v = quote_via(v, safe, encoding, errors)
l.append(k + "=" + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v) # noqa
except TypeError:
# not a sequence
v = quote_via(str(v), safe, encoding, errors)
l.append(k + "=" + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe, encoding, errors)
l.append(k + "=" + elt)
return "&".join(l)
else:
from urllib.parse import urlencode
except ImportError: # PY2
from urllib import quote, quote_plus # noqa
# Explicitly redefining urlencode the way it is defined in Python 3.5
def urlencode(
query, doseq=False, safe="", encoding=None, errors=None, quote_via=quote_plus
):
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError(
"not a valid non-string sequence " "or mapping object"
).with_traceback(tb)
l = [] # noqa
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe)
l.append(k + "=" + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + "=" + v)
elif isinstance(v, six.string_types):
v = quote_via(v, safe)
l.append(k + "=" + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v) # noqa
except TypeError:
# not a sequence
v = quote_via(str(v), safe)
l.append(k + "=" + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe)
l.append(k + "=" + elt)
return "&".join(l)
class RequestsTokenAuth(AuthBase):
"""A custom authentication class to be used with requests module"""
def __init__(self, token, where, qs_key=None):
self.token = token
self.where = where
self.qs_key = qs_key
def __call__(self, request):
"""Perform the actual authentication"""
if self.where == "qs":
parts = urlparse(request.url)
qs = parse_qs(parts.query)
qs[self.qs_key] = self.token
request.url = urlunparse(
(
parts.scheme,
parts.netloc,
parts.path,
parts.params,
urlencode(qs),
parts.fragment,
)
)
elif self.where == "header":
request.headers["Authorization"] = "Bearer {}".format(self.token)
return request
class FloatRange(click.types.FloatParamType):
"""A parameter that works similar to :data:`click.FLOAT` but restricts the
value to fit into a range. Fails if the value doesn't fit into the range.
"""
name = "percentage"
def __init__(self, min=None, max=None):
self.min = min
self.max = max
def convert(self, value, param, ctx):
"""Convert value"""
rv = click.types.FloatParamType.convert(self, value, param, ctx)
if (
self.min is not None
and rv < self.min
or self.max is not None
and rv > self.max
):
if self.min is None:
self.fail(
"%s is bigger than the maximum valid value " "%s." % (rv, self.max),
param,
ctx,
)
elif self.max is None:
self.fail(
"%s is smaller than the minimum valid value "
"%s." % (rv, self.min),
param,
ctx,
)
else:
self.fail(
"%s is not in the valid range of %s to %s."
% (rv, self.min, self.max),
param,
ctx,
)
return rv
def __repr__(self):
return "FloatRange(%r, %r)" % (self.min, self.max)
def slugify(value, allow_unicode=False):
"""Copied from Django Source code, only modifying last line (no need for safe
strings).
source: https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
try: # PY2
value = unicode(value)
except NameError: # PY3
value = str(value)
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
)
value = re.sub(r"[^\w\s-]", "", value).strip().lower()
return re.sub(r"[-\s]+", "-", value)
def utf8_everywhere(mapping):
"""Recursively transforms all string found in the dict mapping to UTF-8 if we are
on Python 2"""
mutate_dict_in_place(
(
lambda value: value.decode("utf-8")
if isinstance(value, str)
and sys.version_info.major == 2
and sys.version_info.minor == 7
else value
),
mapping,
)
def sanitize(value):
"""Sanitize string to be used as a name of a directory.
>>> sanitize('productName')
'productName'
>>> sanitize('name with multiple spaces')
'name_with_multiple_spaces'
>>> sanitize('âtre fête île alcôve bûche çà génèse où Noël ovoïde capharnaüm')
'atre_fete_ile_alcove_buche_ca_genese_ou_Noel_ovoide_capharnaum'
>>> sanitize('replace,ponctuation:;signs!?byunderscorekeeping-hyphen.dot_and_underscore') # noqa
'replace_ponctuation_signs_byunderscorekeeping-hyphen.dot_and_underscore'
"""
# remove accents
rv = unidecode(value)
# replace punctuation signs and spaces by underscore
# keep hyphen, dot and underscore from punctuation
tobereplaced = re.sub(r"[-_.]", "", string.punctuation)
# add spaces to be removed
tobereplaced += r"\s"
rv = re.sub(r"[" + tobereplaced + r"]+", "_", rv)
return str(rv)
def mutate_dict_in_place(func, mapping):
"""Apply func to values of mapping.
The mapping object's values are modified in-place. The function is recursive,
allowing to also modify values of nested dicts that may be level-1 values of
mapping.
:param func: A function to apply to each value of mapping which is not a dict object
:type func: func
:param mapping: A Python dict object
:type mapping: dict
:returns: None
"""
for key, value in mapping.items():
if isinstance(value, dict):
mutate_dict_in_place(func, value)
else:
mapping[key] = func(value)
def merge_mappings(mapping1, mapping2):
"""Merge two mappings with string keys, values from `mapping2` overriding values
from `mapping1`.
Do its best to detect the key in `mapping1` to override. For example, let's say
we have::
mapping2 = {"keya": "new"}
mapping1 = {"keyA": "obsolete"}
Then::
merge_mappings(mapping1, mapping2) ==> {"keyA": "new"}
If mapping2 has a key that cannot be detected in mapping1, this new key is added
to mapping1 as is.
:param dict mapping1: The mapping containing values to be overridden
:param dict mapping2: The mapping containing values that will override the
first mapping
"""
# A mapping between mapping1 keys as lowercase strings and original mapping1 keys
m1_keys_lowercase = {key.lower(): key for key in mapping1}
for key, value in mapping2.items():
if isinstance(value, dict):
try:
merge_mappings(mapping1[key], value)
except KeyError:
# If the key from mapping2 is not in mapping1, it is either key is
# the lowercased form of the corresponding key in mapping1 or because
# key is a new key to be added in mapping1
current_value = mapping1.setdefault(m1_keys_lowercase.get(key, key), {})
if not current_value:
current_value.update(value)
else:
merge_mappings(current_value, value)
else:
# Even for "scalar" values (a.k.a not nested structures), first check if
# the key from mapping1 is not the lowercase version of a key in mapping2.
# Otherwise, create the key in mapping1. This is the meaning of
# m1_keys_lowercase.get(key, key)
current_value = mapping1.get(m1_keys_lowercase.get(key, key), None)
if current_value is not None:
current_value_type = type(current_value)
if isinstance(value, six.string_types):
# Bool is a type with special meaning in Python, thus the special
# case
if current_value_type is bool:
if value.capitalize() not in ("True", "False"):
raise ValueError(
"Only true or false strings (case insensitive) are "
"allowed for booleans"
)
# Get the real Python value of the boolean. e.g: value='tRuE'
# => eval(value.capitalize())=True.
# str.capitalize() transforms the first character of the string
# to a capital letter
mapping1[m1_keys_lowercase[key]] = eval(value.capitalize())
else:
mapping1[m1_keys_lowercase[key]] = current_value_type(value)
else:
try:
mapping1[m1_keys_lowercase[key]] = current_value_type(value)
except TypeError:
# Ignore any override value that does not have the same type
# as the default value
pass
else:
mapping1[key] = value
def maybe_generator(obj):
"""Generator function that get an arbitrary object and generate values from it if
the object is a generator."""
if isinstance(obj, types.GeneratorType):
for elt in obj:
yield elt
else:
yield obj
DEFAULT_PROJ = CRS.from_epsg(4326)
def get_timestamp(date_time, date_format="%Y-%m-%dT%H:%M:%S"):
"""Returns the given date_time string formatted with date_format as timestamp,
in a PY2/3 compatible way
:param date_time: the datetime string to return as timestamp
:type date_time: str or unicode
:param date_format: (optional) the date format in which date_time is given,
defaults to '%Y-%m-%dT%H:%M:%S'
:type date_format: str or unicode
:returns: the timestamp corresponding to the date_time string in seconds
:rtype: float
"""
date_time = datetime.strptime(date_time, date_format)
try:
return date_time.timestamp()
# There is no timestamp method on datetime objects in Python 2
except AttributeError:
import time
return time.mktime(date_time.timetuple()) + date_time.microsecond / 1e6
class ProgressCallback(object):
"""A callable used to render progress to users for long running processes"""
def __init__(self, max_size=None):
self.pb = None
self.max_size = max_size
def __call__(self, current_size, max_size=None):
"""Update the progress bar.
:param current_size: amount of data already processed
:type current_size: int
:param max_size: maximum amount of data to be processed
:type max_size: int
"""
if max_size is not None:
self.max_size = max_size
if self.pb is None:
self.pb = tqdm(total=self.max_size, unit="B", unit_scale=True)
self.pb.update(current_size)
class NotebookProgressCallback(ProgressCallback):
"""A custom progress bar to be used inside Jupyter notebooks"""
def __call__(self, current_size, max_size=None):
"""Update the progress bar"""
if max_size is not None:
self.max_size = max_size
if self.pb is None:
self.pb = tqdm_notebook(total=self.max_size, unit="B", unit_scale=True)
self.pb.update(current_size)
def repeatfunc(func, n, *args):
"""Call `func` `n` times with `args`"""
return starmap(func, repeat(args, n))
def makedirs(dirpath):
"""Create a directory in filesystem with parents if necessary"""
try:
os.makedirs(dirpath)
except OSError as err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(dirpath):
raise
def update_nested_dict(old_dict, new_dict, extend_list_values=False):
"""Update recursively old_dict items with new_dict ones
:param old_dict: dict to be updated
:type old_dict: dict
:param new_dict: incomming dict
:type new_dict: dict
:param extend_list_values: extend old_dict value if both old/new values are lists
:type extend_list_values: bool
:returns: updated dict
:rtype: dict
"""
for k, v in new_dict.items():
if k in old_dict.keys():
if isinstance(v, dict) and isinstance(old_dict[k], dict):
old_dict[k] = | elif (
extend_list_values
and isinstance(old_dict[k], list)
and isinstance(v, list)
):
old_dict[k].extend(v)
elif v:
old_dict[k] = v
else:
old_dict[k] = v
return old_dict
def dict_items_recursive_apply(config_dict, apply_method, **apply_method_parameters):
"""Recursive apply method to dict elements
:param config_dict: input nested dictionnary
:type config_dict: dict
:param apply_method: method to be applied to dict elements
:type apply_method: :func:`apply_method`
:param apply_method_parameters: optional parameters passed to the method
:type apply_method_parameters: dict
:returns: updated dict
:rtype: dict
"""
jsonpath_dict = copy.deepcopy(config_dict)
for dict_k, dict_v in jsonpath_dict.items():
if isinstance(dict_v, dict):
jsonpath_dict[dict_k] = dict_items_recursive_apply(
dict_v, apply_method, **apply_method_parameters
)
elif any(isinstance(dict_v, t) for t in (list, tuple)):
for list_idx, list_v in enumerate(dict_v):
if isinstance(list_v, dict):
jsonpath_dict[dict_k][list_idx] = dict_items_recursive_apply(
list_v, apply_method, **apply_method_parameters
)
else:
jsonpath_dict[dict_k][list_idx] = apply_method(
dict_k, list_v, **apply_method_parameters
)
else:
jsonpath_dict[dict_k] = apply_method(
dict_k, dict_v, **apply_method_parameters
)
return jsonpath_dict
| update_nested_dict(
old_dict[k], v, extend_list_values=extend_list_values
)
| conditional_block |
02.代码实现-06Tensorflow-01Cifar10-01基本网络.py | '''
输入数据->卷积层1->激活层1->池化层1->卷积层2->激活层2->池化层2->非线性全连接层1->非线性全连接层2->全连接层3->SoftMax->Optimizer
输入数据: 24 * 24 * 3 (cifar10的图片都是32*32*3的,需要处理成24*24*3)
卷积层1:5*5 卷积核个数为K1 步长为1,输出为24 * 24 * K1
激活层1:ReLU
池化层1:3*3 步长为2,输出为12 * 12 * K1
卷积层2:5*5 卷积核个数为K2 步长为1,12 * 12 * K2
激活层2:ReLU
池化层2:3*3 步长为2 输出为6 * 6 * K2
非线性全连接层1:神经元个数200(这一层相当于有200*6*6*K2个权重,以及200个偏置),输出为200
非线性全连接层1:神经元个数100(这一层相当于有100*200个权重,以及100个偏置),输出为100
线性全连接层:神经元个数10
softmax层
'''
import tensorflow as tf
import os
import cifar_input,cifar_toTFRecords
import numpy as np
import csv
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
learning_rate_init = 0.001
training_epochs = 1
batch_size = 100
display_step = 10
dataset_dir = '../Total_Data/TempData/'
num_examples_per_epoch_for_train = cifar_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN # 50000
num_examples_per_epoch_for_eval = cifar_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
image_size = cifar_input.IMAGE_SIZE
image_channel = 3
n_classes = cifar_input.NUM_CLASSES_CIFAR10
conv1_kernel_num = 32
conv2_kernel_num = 32
fc1_units_num = 192
fc2_units_num = 96
def WeightsVariable(shape, name_str, stddev = 0.1):
initial = tf.truncated_normal(shape=shape, stddev=stddev, dtype=tf.float32)
return tf.Variable(initial_value=initial, dtype=tf.float32, name=name_str)
def BiasesVariable(shape, name_str, init_value):
initial = tf.constant(init_value, shape=shape)
return tf.Variable(initial_value=initial, dtype=tf.float32, name = name_str)
# 卷积层不做降采样
def Conv2d(x, W, b, stride=1, padding='SAME', activation=tf.nn.relu, act_name='relu'):
with tf.name_scope('conv2d_bias'):
y = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
y = tf.nn.bias_add(y, b)
with tf.name_scope(act_name):
y = activation(y)
return y
def Pool2d(x, pool = tf.nn.max_pool, k =2, stride=2, padding='SAME'):
return pool(x, ksize=[1, k, k, 1], strides=[1, stride, stride, 1], padding=padding)
def FullyConnected(x, W, b, activate=tf.nn.relu, act_name='relu'):
with tf.name_scope('Wx_b'):
y = tf.matmul(x, W)
y = tf.add(y, b)
with tf.name_scope(act_name):
y = activate(y)
return y
def Inference(images_holder):
with tf.name_scope('Conv2d_1'): # 卷积层1
weights = WeightsVariable(shape=[5, 5, image_channel, conv1_kernel_num], name_str='weights', stddev=5e-2)
biases = BiasesVariable(shape=[conv1_kernel_num], name_str='biases', init_value=0.0)
conv1_out = Conv2d(images_holder, weights, biases, stride=1, padding='SAME')
with tf.name_scope('Pool2d_1'): #池化层1
pool1_out = Pool2d(conv1_out, pool=tf.nn.max_pool, k=3, stride=2, padding='SAME')
with tf.name_scope('Conv2d_2'): # 卷积层2
weights = WeightsVariable(shape=[5, 5, conv1_kernel_num, conv2_kernel_num], name_str='weights', stddev=5e-2)
biases = BiasesVariable(shape=[conv2_kernel_num], name_str='biases', init_value=0.0)
conv2_out = Conv2d(pool1_out, weights, biases, stride=1, padding='SAME')
with tf.name_scope('Pool2d_2'): #池化层2
pool2_out = Pool2d(conv2_out, pool=tf.nn.max_pool, k=3, stride=2, padding='SAME') #6 * 6 * 64
with tf.name_scope('FeatsReshape'): #将二维特征图变为一维特征向量,得到的是conv1_kernel_num个特征图,每个特征图是12*12的
features = tf.reshape(pool2_out, [batch_size, -1]) # [batch_size, 2304] 2304 = 6 * 6 * 64
feats_dim = features.get_shape()[1].value
with tf.name_scope('FC1_nonlinear'): #非线性全连接层1
weights = WeightsVariable(shape=[feats_dim, fc1_units_num], name_str='weights', stddev=4e-2)
biases = BiasesVariable(shape=[fc1_units_num], name_str='biases', init_value=0.1)
fc1_out = FullyConnected(features, weights, biases,
activate=tf.nn.relu, act_name='relu')
with tf.name_scope('FC2_nonlinear'): #非线性全连接层2
weights = WeightsVariable(shape=[fc1_units_num, fc2_units_num], name_str='weights', stddev=4e-2)
biases = BiasesVariable(shape=[fc2_units_num], name_str='biases', init_value=0.1)
fc2_out = FullyConnected(fc1_out, weights, biases,
activate=tf.nn.relu, act_name='relu')
with tf.name_scope('FC2_linear'): #线性全连接层
weights = WeightsVariable(shape=[fc2_units_num, n_classes], name_str='weights', stddev=1.0 / fc2_units_num)
biases = BiasesVariable(shape=[n_classes], name_str='biases', init_value=0.0)
logits = FullyConnected(fc2_out, weights, biases,
activate=tf.identity, act_name='linear')
return logits
'''
返回的images是[batch_size, IMAGE_SIZE, IMAGE_SIZE, 3]
返回的labels不是one-hot编码的,因为它返回的是[batch_size],而不是[batch_size, n_classes]
'''
def get_distored_train_batch(data_dir, batch_size):
if not data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
# images, labels = cifar_input.distorted_inputs(cifar10or20or100=10, data_dir=data_dir, batch_size=batch_size)
images, labels = cifar_toTFRecords.readFromTFRecords(
'../Total_Data/TempData/cifar-10-batches-tfrecords/train_package.tfrecords', batch_size=batch_size,
img_shape=[32,32,3])
return images, labels
'''
获取评估测试集
'''
def get_undistored_eval_batch(eval_data, data_dir, batch_size):
if not data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
# images, labels = cifar_input.inputs(cifar10or20or100=10, eval_data=eval_data, data_dir=data_dir, batch_size=batch_size)
images, labels = cifar_toTFRecords.readFromTFRecords(
'../Total_Data/TempData/cifar-10-batches-tfrecords/test_package.tfrecords', batch_size=batch_size,
img_shape=[32,32,3])
return images, labels
if __name__ == '__main__':
# cifar_input.maybe_download_and_extract('../Total_Data/TempData', cifar_input.CIFAR10_DATA_URL)
with tf.Graph().as_default():
# 输入
with tf.name_scope('Inputs'):
images_holder = tf.placeholder(tf.float32, [batch_size, image_size, image_size, image_channel],
name='images')
labels_holder = tf.placeholder(tf.int32, [batch_size], name='labels')# 0 ~ 9的数字
#前向推断
with tf.name_scope('Inference'):
logits = Inference(images_holder)
#定义损失层
with tf.name_scope('Loss'):
# 因为cifar10不是one-hot编码的,所以不能使用softmax,而sparse内部会进行one-hot编码
labels = tf.cast( labels_holder, tf.int64 )
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy)
total_loss = cross_entropy_mean
#定义优化训练层
with tf.name_scope('Train'):
learning_rate = tf.placeholder(tf.float32)
global_step = tf.Variable(0, name='global_step', trainable=False, dtype=tf.int64)
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
#定义模型评估层
with tf.name_scope('Evaluate'):
top_K_op = tf.nn.in_top_k(predictions=logits, targets=labels_holder, k = 1)
with tf.name_scope('GetTrainBatch'):
images_train, labels_train = get_distored_train_batch(data_dir=dataset_dir, batch_size=batch_size)
with tf.name_scope('GetTestBatch'):
images_test, labels_test = get_undistored_eval_batch(eval_data=True, data_dir=dataset_dir,
batch_size=batch_size)
init_op = tf.global_variables_initializer()
# summary_writer = tf.summary.FileWriter(logdir='../logs', graph=tf.get_default_graph())
# summary_writer.close()
# cifar_input.maybe_download_and_extract('../Total_Data/TempData/', cifar_input.CIFAR10_DATA_URL)
results_list = list()
results_list.append(['learning_rate', learning_rate_init,
'training_epochs', training_epochs,
'batch_size', batch_size,
'display_step', display_step,
'conv1_kernel_num', conv1_kernel_num,
'conv2_kernel_num', conv2_kernel_num,
'fc1_units_num', fc1_units_num,
'fc2_units_num', fc2_units_num])
results_list.append(['train_step', 'train_loss', 'train_step', 'train_accuracy'])
with tf.Session() as sess:
sess.run(init_op)
print('==>>>>>>>>>>==开始在训练集上训练模型==<<<<<<<<<<==')
total_batches = int(num_examples_per_epoch_for_train / batch_size)
print('Per batch Size: ', batch_size)
print('Train sample Count Per Epoch: ', num_examples_per_epoch_for_train)
print('Total batch Count Per Epoch: ', total_batches)
tf.train.start_queue_runners()
training_step = 0
for epoch in range(training_epochs):
for batch_idx in range(total_batches):
images_batch, label_batch = sess.run([images_train, labels_train])
# print(label_batch)
_, loss_value = sess.run([train_op, total_loss], feed_dict={images_holder: images_batch,
labels_holder: label_batch,
learning_rate:learning_rate_init})
training_step = sess.run(global_step)
if training_step % display_step == 0:
predictions = sess.run([top_K_op], feed_dict={images_holder: images_batch,
labels_holder : label_batch})
batch_accuracy = np.sum(predictions) / batch_size
results_list.append([training_step, loss_value, training_step, batch_accuracy])
print("Training Step: " + str(training_step) +
", Training Loss= " + "{:.6f}".format(loss_value) +
", Training Accuracy= " + "{:.5f}".format(batch_accuracy))
print('训练完毕!')
print('==>>>>>>>>>>==开始在测试集上评估模型==<<<<<<<<<<==')
total_batches = int(num_examples_per_epoch_for_eval / batch_size)
total_examples = total_batches * batch_size
print('Per batch Size: ', batch_size)
print('Test sample Count Per Epoch: ', total_examples)
print('Total batch Count Per Epoch: ', total_batches)
correct_predicted = 0
for test_step in range(total_batches):
images_batch, label_batch = sess.run([images_test, labels_test])
predictions = sess.run([top_K_op], feed_dict={images_holder: images_batch,
labels_holder: label_batch}) | accuracy_score = correct_predicted / total_examples
print('--------->Accuracy on Test Examples: ', accuracy_score)
results_list.append(['Accuracy on Test Examples: ', accuracy_score])
results_file = open('../logs/SummaryFiles/02.代码实现-06Tensorflow-02Cifar100-01读取数据集.csv', 'w', newline='')
csv_writer = csv.writer(results_file, dialect='excel')
for row in results_list:
csv_writer.writerow(row) | correct_predicted += np.sum(predictions) | random_line_split |
02.代码实现-06Tensorflow-01Cifar10-01基本网络.py | '''
输入数据->卷积层1->激活层1->池化层1->卷积层2->激活层2->池化层2->非线性全连接层1->非线性全连接层2->全连接层3->SoftMax->Optimizer
输入数据: 24 * 24 * 3 (cifar10的图片都是32*32*3的,需要处理成24*24*3)
卷积层1:5*5 卷积核个数为K1 步长为1,输出为24 * 24 * K1
激活层1:ReLU
池化层1:3*3 步长为2,输出为12 * 12 * K1
卷积层2:5*5 卷积核个数为K2 步长为1,12 * 12 * K2
激活层2:ReLU
池化层2:3*3 步长为2 输出为6 * 6 * K2
非线性全连接层1:神经元个数200(这一层相当于有200*6*6*K2个权重,以及200个偏置),输出为200
非线性全连接层1:神经元个数100(这一层相当于有100*200个权重,以及100个偏置),输出为100
线性全连接层:神经元个数10
softmax层
'''
import tensorflow as tf
import os
import cifar_input,cifar_toTFRecords
import numpy as np
import csv
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
learning_rate_init = 0.001
training_epochs = 1
batch_size = 100
display_step = 10
dataset_dir = '../Total_Data/TempData/'
num_examples_per_epoch_for_train = cifar_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN # 50000
num_examples_per_epoch_for_eval = cifar_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
image_size = cifar_input.IMAGE_SIZE
image_channel = 3
n_classes = cifar_input.NUM_CLASSES_CIFAR10
conv1_kernel_num = 32
conv2_kernel_num = 32
fc1_units_num = 192
fc2_units_num = 96
def WeightsVariable(shape, name_str, stddev = 0.1):
initial = tf.truncated_normal(shape=shape, stddev=stddev, dtype=tf.float32)
return tf.Variable(initial_value=initial, dtype=tf.float32, name=name_str)
def BiasesVariable(shape, name_str, init_value):
initial = tf.constant(init_value, shape=shape)
return tf.Variable(initial_value=initial, dtype=tf.float32, name = name_str)
# 卷积层不做降采样
def Conv2d(x, W, b, stride=1, padding='SAME', activation=tf.nn.relu, act_name='relu'):
with tf.name_scope('conv2d_bias'):
y = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
y = tf.nn.bias_add(y, b)
with tf.name_scope(act_name):
y = activation(y)
return y
def Pool2d(x, pool = tf.nn.max_pool, k =2, stride=2, padding='SAME'):
return pool(x, ksize=[1, k, k, 1], strides=[1, stride, stride, 1], padding=padding)
def FullyConnected(x, W, b, activate=tf.nn.relu, act_name='relu'):
with tf.name_scope('Wx_b'):
y = tf.matmul(x, W)
y = tf.add(y, b)
with tf.name_scope(act_name):
y = activate(y)
return y
def Inference(images_holder):
with tf.name_scope('Conv2d_1'): # 卷积层1
weights = WeightsVariable(shape=[5, 5, image_channel, conv1_kernel_num], name_str='weights', stddev=5e-2)
biases = BiasesVariable(shape=[conv1_kernel_num], name_str='biases', init_value=0.0)
conv1_out = Conv2d(images_holder, weights, biases, stride=1, padding='SAME')
with tf.name_scope('Pool2d_1'): #池化层1
pool1_out = Pool2d(conv1 | batches-tfrecords/train_package.tfrecords', batch_size=batch_size,
img_shape=[32,32,3])
return images, labels
'''
获取评估测试集
'''
def get_undistored_eval_batch(eval_data, data_dir, batch_size):
if not data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
# images, labels = cifar_input.inputs(cifar10or20or100=10, eval_data=eval_data, data_dir=data_dir, batch_size=batch_size)
images, labels = cifar_toTFRecords.readFromTFRecords(
'../Total_Data/TempData/cifar-10-batches-tfrecords/test_package.tfrecords', batch_size=batch_size,
img_shape=[32,32,3])
return images, labels
if __name__ == '__main__':
# cifar_input.maybe_download_and_extract('../Total_Data/TempData', cifar_input.CIFAR10_DATA_URL)
with tf.Graph().as_default():
# 输入
with tf.name_scope('Inputs'):
images_holder = tf.placeholder(tf.float32, [batch_size, image_size, image_size, image_channel],
name='images')
labels_holder = tf.placeholder(tf.int32, [batch_size], name='labels')# 0 ~ 9的数字
#前向推断
with tf.name_scope('Inference'):
logits = Inference(images_holder)
#定义损失层
with tf.name_scope('Loss'):
# 因为cifar10不是one-hot编码的,所以不能使用softmax,而sparse内部会进行one-hot编码
labels = tf.cast( labels_holder, tf.int64 )
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy)
total_loss = cross_entropy_mean
#定义优化训练层
with tf.name_scope('Train'):
learning_rate = tf.placeholder(tf.float32)
global_step = tf.Variable(0, name='global_step', trainable=False, dtype=tf.int64)
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
#定义模型评估层
with tf.name_scope('Evaluate'):
top_K_op = tf.nn.in_top_k(predictions=logits, targets=labels_holder, k = 1)
with tf.name_scope('GetTrainBatch'):
images_train, labels_train = get_distored_train_batch(data_dir=dataset_dir, batch_size=batch_size)
with tf.name_scope('GetTestBatch'):
images_test, labels_test = get_undistored_eval_batch(eval_data=True, data_dir=dataset_dir,
batch_size=batch_size)
init_op = tf.global_variables_initializer()
# summary_writer = tf.summary.FileWriter(logdir='../logs', graph=tf.get_default_graph())
# summary_writer.close()
# cifar_input.maybe_download_and_extract('../Total_Data/TempData/', cifar_input.CIFAR10_DATA_URL)
results_list = list()
results_list.append(['learning_rate', learning_rate_init,
'training_epochs', training_epochs,
'batch_size', batch_size,
'display_step', display_step,
'conv1_kernel_num', conv1_kernel_num,
'conv2_kernel_num', conv2_kernel_num,
'fc1_units_num', fc1_units_num,
'fc2_units_num', fc2_units_num])
results_list.append(['train_step', 'train_loss', 'train_step', 'train_accuracy'])
with tf.Session() as sess:
sess.run(init_op)
print('==>>>>>>>>>>==开始在训练集上训练模型==<<<<<<<<<<==')
total_batches = int(num_examples_per_epoch_for_train / batch_size)
print('Per batch Size: ', batch_size)
print('Train sample Count Per Epoch: ', num_examples_per_epoch_for_train)
print('Total batch Count Per Epoch: ', total_batches)
tf.train.start_queue_runners()
training_step = 0
for epoch in range(training_epochs):
for batch_idx in range(total_batches):
images_batch, label_batch = sess.run([images_train, labels_train])
# print(label_batch)
_, loss_value = sess.run([train_op, total_loss], feed_dict={images_holder: images_batch,
labels_holder: label_batch,
learning_rate:learning_rate_init})
training_step = sess.run(global_step)
if training_step % display_step == 0:
predictions = sess.run([top_K_op], feed_dict={images_holder: images_batch,
labels_holder : label_batch})
batch_accuracy = np.sum(predictions) / batch_size
results_list.append([training_step, loss_value, training_step, batch_accuracy])
print("Training Step: " + str(training_step) +
", Training Loss= " + "{:.6f}".format(loss_value) +
", Training Accuracy= " + "{:.5f}".format(batch_accuracy))
print('训练完毕!')
print('==>>>>>>>>>>==开始在测试集上评估模型==<<<<<<<<<<==')
total_batches = int(num_examples_per_epoch_for_eval / batch_size)
total_examples = total_batches * batch_size
print('Per batch Size: ', batch_size)
print('Test sample Count Per Epoch: ', total_examples)
print('Total batch Count Per Epoch: ', total_batches)
correct_predicted = 0
for test_step in range(total_batches):
images_batch, label_batch = sess.run([images_test, labels_test])
predictions = sess.run([top_K_op], feed_dict={images_holder: images_batch,
labels_holder: label_batch})
correct_predicted += np.sum(predictions)
accuracy_score = correct_predicted / total_examples
print('--------->Accuracy on Test Examples: ', accuracy_score)
results_list.append(['Accuracy on Test Examples: ', accuracy_score])
results_file = open('../logs/SummaryFiles/02.代码实现-06Tensorflow-02Cifar100-01读取数据集.csv', 'w', newline='')
csv_writer = csv.writer(results_file, dialect='excel')
for row in results_list:
csv_writer.writerow(row)
| _out, pool=tf.nn.max_pool, k=3, stride=2, padding='SAME')
with tf.name_scope('Conv2d_2'): # 卷积层2
weights = WeightsVariable(shape=[5, 5, conv1_kernel_num, conv2_kernel_num], name_str='weights', stddev=5e-2)
biases = BiasesVariable(shape=[conv2_kernel_num], name_str='biases', init_value=0.0)
conv2_out = Conv2d(pool1_out, weights, biases, stride=1, padding='SAME')
with tf.name_scope('Pool2d_2'): #池化层2
pool2_out = Pool2d(conv2_out, pool=tf.nn.max_pool, k=3, stride=2, padding='SAME') #6 * 6 * 64
with tf.name_scope('FeatsReshape'): #将二维特征图变为一维特征向量,得到的是conv1_kernel_num个特征图,每个特征图是12*12的
features = tf.reshape(pool2_out, [batch_size, -1]) # [batch_size, 2304] 2304 = 6 * 6 * 64
feats_dim = features.get_shape()[1].value
with tf.name_scope('FC1_nonlinear'): #非线性全连接层1
weights = WeightsVariable(shape=[feats_dim, fc1_units_num], name_str='weights', stddev=4e-2)
biases = BiasesVariable(shape=[fc1_units_num], name_str='biases', init_value=0.1)
fc1_out = FullyConnected(features, weights, biases,
activate=tf.nn.relu, act_name='relu')
with tf.name_scope('FC2_nonlinear'): #非线性全连接层2
weights = WeightsVariable(shape=[fc1_units_num, fc2_units_num], name_str='weights', stddev=4e-2)
biases = BiasesVariable(shape=[fc2_units_num], name_str='biases', init_value=0.1)
fc2_out = FullyConnected(fc1_out, weights, biases,
activate=tf.nn.relu, act_name='relu')
with tf.name_scope('FC2_linear'): #线性全连接层
weights = WeightsVariable(shape=[fc2_units_num, n_classes], name_str='weights', stddev=1.0 / fc2_units_num)
biases = BiasesVariable(shape=[n_classes], name_str='biases', init_value=0.0)
logits = FullyConnected(fc2_out, weights, biases,
activate=tf.identity, act_name='linear')
return logits
'''
返回的images是[batch_size, IMAGE_SIZE, IMAGE_SIZE, 3]
返回的labels不是one-hot编码的,因为它返回的是[batch_size],而不是[batch_size, n_classes]
'''
def get_distored_train_batch(data_dir, batch_size):
if not data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
# images, labels = cifar_input.distorted_inputs(cifar10or20or100=10, data_dir=data_dir, batch_size=batch_size)
images, labels = cifar_toTFRecords.readFromTFRecords(
'../Total_Data/TempData/cifar-10- | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.