file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
serde_snapshot.rs | use {
crate::{
accounts::Accounts,
accounts_db::{AccountStorageEntry, AccountsDB, AppendVecId, BankHashInfo},
accounts_index::Ancestors,
append_vec::AppendVec,
bank::{Bank, BankFieldsToDeserialize, BankRc, Builtins},
blockhash_queue::BlockhashQueue,
epoch_stakes::EpochStakes,
message_processor::MessageProcessor,
rent_collector::RentCollector,
stakes::Stakes,
},
bincode,
bincode::{config::Options, Error},
fs_extra::dir::CopyOptions,
log::{info, warn},
rand::{thread_rng, Rng},
serde::{de::DeserializeOwned, Deserialize, Serialize},
solana_sdk::{
clock::{Epoch, Slot, UnixTimestamp},
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
genesis_config::ClusterType,
genesis_config::GenesisConfig,
hard_forks::HardForks,
hash::Hash,
inflation::Inflation,
pubkey::Pubkey,
},
std::{
collections::{HashMap, HashSet},
io::{BufReader, BufWriter, Read, Write},
path::{Path, PathBuf},
result::Result,
sync::{atomic::Ordering, Arc, RwLock},
time::Instant,
},
};
#[cfg(RUSTC_WITH_SPECIALIZATION)]
use solana_frozen_abi::abi_example::IgnoreAsHelper;
mod common;
mod future;
mod tests;
mod utils;
use future::Context as TypeContextFuture;
#[allow(unused_imports)]
use utils::{serialize_iter_as_map, serialize_iter_as_seq, serialize_iter_as_tuple};
// a number of test cases in accounts_db use this
#[cfg(test)]
pub(crate) use self::tests::reconstruct_accounts_db_via_serialization;
pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages};
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum SerdeStyle {
NEWER,
}
const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
struct AccountsDbFields<T>(HashMap<Slot, Vec<T>>, u64, Slot, BankHashInfo);
trait TypeContext<'a> {
type SerializableAccountStorageEntry: Serialize
+ DeserializeOwned
+ From<&'a AccountStorageEntry>
+ Into<AccountStorageEntry>;
fn serialize_bank_and_storage<S: serde::ser::Serializer>(
serializer: S,
serializable_bank: &SerializableBankAndStorage<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDB<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn deserialize_bank_fields<R>(
stream: &mut BufReader<R>,
) -> Result<
(
BankFieldsToDeserialize,
AccountsDbFields<Self::SerializableAccountStorageEntry>,
),
Error,
>
where
R: Read;
fn deserialize_accounts_db_fields<R>(
stream: &mut BufReader<R>,
) -> Result<AccountsDbFields<Self::SerializableAccountStorageEntry>, Error>
where
R: Read;
}
fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
where
R: Read,
T: DeserializeOwned,
{
bincode::options()
.with_limit(MAX_STREAM_SIZE)
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from::<R, T>(reader)
}
pub(crate) fn bank_from_stream<R, P>(
serde_style: SerdeStyle,
stream: &mut BufReader<R>,
append_vecs_path: P,
account_paths: &[PathBuf],
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> std::result::Result<Bank, Error>
where
R: Read,
P: AsRef<Path>,
{
macro_rules! INTO {
($x:ident) => {{
let (bank_fields, accounts_db_fields) = $x::deserialize_bank_fields(stream)?;
let bank = reconstruct_bank_from_fields(
bank_fields,
accounts_db_fields,
genesis_config,
frozen_account_pubkeys,
account_paths,
append_vecs_path,
debug_keys,
additional_builtins,
)?;
Ok(bank)
}};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_from_stream error: {:?}", err);
err
})
}
pub(crate) fn bank_to_stream<W>(
serde_style: SerdeStyle,
stream: &mut BufWriter<W>,
bank: &Bank,
snapshot_storages: &[SnapshotStorage],
) -> Result<(), Error>
where
W: Write,
{
macro_rules! INTO {
($x:ident) => {
bincode::serialize_into(
stream,
&SerializableBankAndStorage::<$x> {
bank,
snapshot_storages,
phantom: std::marker::PhantomData::default(),
},
)
};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_to_stream error: {:?}", err);
err
})
}
struct SerializableBankAndStorage<'a, C> {
bank: &'a Bank,
snapshot_storages: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_bank_and_storage(serializer, self)
}
}
struct SerializableAccountsDB<'a, C> {
accounts_db: &'a AccountsDB,
slot: Slot,
account_storage_entries: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_accounts_db_fields(serializer, self)
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {}
fn reconstruct_bank_from_fields<E, P>(
bank_fields: BankFieldsToDeserialize,
accounts_db_fields: AccountsDbFields<E>, | frozen_account_pubkeys: &[Pubkey],
account_paths: &[PathBuf],
append_vecs_path: P,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> Result<Bank, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = reconstruct_accountsdb_from_fields(
accounts_db_fields,
account_paths,
append_vecs_path,
&genesis_config.cluster_type,
)?;
accounts_db.freeze_accounts(&bank_fields.ancestors, frozen_account_pubkeys);
let bank_rc = BankRc::new(Accounts::new_empty(accounts_db), bank_fields.slot);
let bank = Bank::new_from_fields(
bank_rc,
genesis_config,
bank_fields,
debug_keys,
additional_builtins,
);
Ok(bank)
}
fn reconstruct_accountsdb_from_fields<E, P>(
accounts_db_fields: AccountsDbFields<E>,
account_paths: &[PathBuf],
stream_append_vecs_path: P,
cluster_type: &ClusterType,
) -> Result<AccountsDB, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = AccountsDB::new(account_paths.to_vec(), cluster_type);
let AccountsDbFields(storage, version, slot, bank_hash_info) = accounts_db_fields;
// convert to two level map of slot -> id -> account storage entry
let storage = {
let mut map = HashMap::new();
for (slot, entries) in storage.into_iter() {
let sub_map = map.entry(slot).or_insert_with(HashMap::new);
for entry in entries.into_iter() {
let entry: AccountStorageEntry = entry.into();
entry.slot.store(slot, Ordering::Relaxed);
sub_map.insert(entry.append_vec_id(), Arc::new(entry));
}
}
map
};
let mut last_log_update = Instant::now();
let mut remaining_slots_to_process = storage.len();
// Remap the deserialized AppendVec paths to point to correct local paths
let mut storage = storage
.into_iter()
.map(|(slot, mut slot_storage)| {
let now = Instant::now();
if now.duration_since(last_log_update).as_secs() >= 10 {
info!("{} slots remaining...", remaining_slots_to_process);
last_log_update = now;
}
remaining_slots_to_process -= 1;
let mut new_slot_storage = HashMap::new();
for (id, storage_entry) in slot_storage.drain() {
let path_index = thread_rng().gen_range(0, accounts_db.paths.len());
let local_dir = &accounts_db.paths[path_index];
std::fs::create_dir_all(local_dir).expect("Create directory failed");
// Move the corresponding AppendVec from the snapshot into the directory pointed
// at by `local_dir`
let append_vec_relative_path =
AppendVec::new_relative_path(slot, storage_entry.append_vec_id());
let append_vec_abs_path = stream_append_vecs_path
.as_ref()
.join(&append_vec_relative_path);
let target = local_dir.join(append_vec_abs_path.file_name().unwrap());
std::fs::rename(append_vec_abs_path.clone(), target).or_else(|_| {
let mut copy_options = CopyOptions::new();
copy_options.overwrite = true;
fs_extra::move_items(&vec![&append_vec_abs_path], &local_dir, ©_options)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.and(Ok(()))
})?;
// Notify the AppendVec of the new file location
let local_path = local_dir.join(append_vec_relative_path);
let mut u_storage_entry = Arc::try_unwrap(storage_entry).unwrap();
u_storage_entry.set_file(local_path)?;
new_slot_storage.insert(id, Arc::new(u_storage_entry));
}
Ok((slot, new_slot_storage))
})
.collect::<Result<HashMap<Slot, _>, Error>>()?;
// discard any slots with no storage entries
// this can happen if a non-root slot was serialized
// but non-root stores should not be included in the snapshot
storage.retain(|_slot, stores| !stores.is_empty());
accounts_db
.bank_hashes
.write()
.unwrap()
.insert(slot, bank_hash_info);
// Process deserialized data, set necessary fields in self
let max_id: usize = *storage
.values()
.flat_map(HashMap::keys)
.max()
.expect("At least one storage entry must exist from deserializing stream");
{
accounts_db.storage.0.extend(
storage.into_iter().map(|(slot, slot_storage_entry)| {
(slot, Arc::new(RwLock::new(slot_storage_entry)))
}),
);
}
accounts_db.next_id.store(max_id + 1, Ordering::Relaxed);
accounts_db
.write_version
.fetch_add(version, Ordering::Relaxed);
accounts_db.generate_index();
Ok(accounts_db)
} | genesis_config: &GenesisConfig, | random_line_split |
serde_snapshot.rs | use {
crate::{
accounts::Accounts,
accounts_db::{AccountStorageEntry, AccountsDB, AppendVecId, BankHashInfo},
accounts_index::Ancestors,
append_vec::AppendVec,
bank::{Bank, BankFieldsToDeserialize, BankRc, Builtins},
blockhash_queue::BlockhashQueue,
epoch_stakes::EpochStakes,
message_processor::MessageProcessor,
rent_collector::RentCollector,
stakes::Stakes,
},
bincode,
bincode::{config::Options, Error},
fs_extra::dir::CopyOptions,
log::{info, warn},
rand::{thread_rng, Rng},
serde::{de::DeserializeOwned, Deserialize, Serialize},
solana_sdk::{
clock::{Epoch, Slot, UnixTimestamp},
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
genesis_config::ClusterType,
genesis_config::GenesisConfig,
hard_forks::HardForks,
hash::Hash,
inflation::Inflation,
pubkey::Pubkey,
},
std::{
collections::{HashMap, HashSet},
io::{BufReader, BufWriter, Read, Write},
path::{Path, PathBuf},
result::Result,
sync::{atomic::Ordering, Arc, RwLock},
time::Instant,
},
};
#[cfg(RUSTC_WITH_SPECIALIZATION)]
use solana_frozen_abi::abi_example::IgnoreAsHelper;
mod common;
mod future;
mod tests;
mod utils;
use future::Context as TypeContextFuture;
#[allow(unused_imports)]
use utils::{serialize_iter_as_map, serialize_iter_as_seq, serialize_iter_as_tuple};
// a number of test cases in accounts_db use this
#[cfg(test)]
pub(crate) use self::tests::reconstruct_accounts_db_via_serialization;
pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages};
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum SerdeStyle {
NEWER,
}
const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
struct AccountsDbFields<T>(HashMap<Slot, Vec<T>>, u64, Slot, BankHashInfo);
trait TypeContext<'a> {
type SerializableAccountStorageEntry: Serialize
+ DeserializeOwned
+ From<&'a AccountStorageEntry>
+ Into<AccountStorageEntry>;
fn serialize_bank_and_storage<S: serde::ser::Serializer>(
serializer: S,
serializable_bank: &SerializableBankAndStorage<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDB<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn deserialize_bank_fields<R>(
stream: &mut BufReader<R>,
) -> Result<
(
BankFieldsToDeserialize,
AccountsDbFields<Self::SerializableAccountStorageEntry>,
),
Error,
>
where
R: Read;
fn deserialize_accounts_db_fields<R>(
stream: &mut BufReader<R>,
) -> Result<AccountsDbFields<Self::SerializableAccountStorageEntry>, Error>
where
R: Read;
}
fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
where
R: Read,
T: DeserializeOwned,
{
bincode::options()
.with_limit(MAX_STREAM_SIZE)
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from::<R, T>(reader)
}
pub(crate) fn bank_from_stream<R, P>(
serde_style: SerdeStyle,
stream: &mut BufReader<R>,
append_vecs_path: P,
account_paths: &[PathBuf],
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> std::result::Result<Bank, Error>
where
R: Read,
P: AsRef<Path>,
{
macro_rules! INTO {
($x:ident) => {{
let (bank_fields, accounts_db_fields) = $x::deserialize_bank_fields(stream)?;
let bank = reconstruct_bank_from_fields(
bank_fields,
accounts_db_fields,
genesis_config,
frozen_account_pubkeys,
account_paths,
append_vecs_path,
debug_keys,
additional_builtins,
)?;
Ok(bank)
}};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_from_stream error: {:?}", err);
err
})
}
pub(crate) fn bank_to_stream<W>(
serde_style: SerdeStyle,
stream: &mut BufWriter<W>,
bank: &Bank,
snapshot_storages: &[SnapshotStorage],
) -> Result<(), Error>
where
W: Write,
{
macro_rules! INTO {
($x:ident) => {
bincode::serialize_into(
stream,
&SerializableBankAndStorage::<$x> {
bank,
snapshot_storages,
phantom: std::marker::PhantomData::default(),
},
)
};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_to_stream error: {:?}", err);
err
})
}
struct SerializableBankAndStorage<'a, C> {
bank: &'a Bank,
snapshot_storages: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> {
fn | <S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_bank_and_storage(serializer, self)
}
}
struct SerializableAccountsDB<'a, C> {
accounts_db: &'a AccountsDB,
slot: Slot,
account_storage_entries: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_accounts_db_fields(serializer, self)
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {}
fn reconstruct_bank_from_fields<E, P>(
bank_fields: BankFieldsToDeserialize,
accounts_db_fields: AccountsDbFields<E>,
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
account_paths: &[PathBuf],
append_vecs_path: P,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> Result<Bank, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = reconstruct_accountsdb_from_fields(
accounts_db_fields,
account_paths,
append_vecs_path,
&genesis_config.cluster_type,
)?;
accounts_db.freeze_accounts(&bank_fields.ancestors, frozen_account_pubkeys);
let bank_rc = BankRc::new(Accounts::new_empty(accounts_db), bank_fields.slot);
let bank = Bank::new_from_fields(
bank_rc,
genesis_config,
bank_fields,
debug_keys,
additional_builtins,
);
Ok(bank)
}
fn reconstruct_accountsdb_from_fields<E, P>(
accounts_db_fields: AccountsDbFields<E>,
account_paths: &[PathBuf],
stream_append_vecs_path: P,
cluster_type: &ClusterType,
) -> Result<AccountsDB, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = AccountsDB::new(account_paths.to_vec(), cluster_type);
let AccountsDbFields(storage, version, slot, bank_hash_info) = accounts_db_fields;
// convert to two level map of slot -> id -> account storage entry
let storage = {
let mut map = HashMap::new();
for (slot, entries) in storage.into_iter() {
let sub_map = map.entry(slot).or_insert_with(HashMap::new);
for entry in entries.into_iter() {
let entry: AccountStorageEntry = entry.into();
entry.slot.store(slot, Ordering::Relaxed);
sub_map.insert(entry.append_vec_id(), Arc::new(entry));
}
}
map
};
let mut last_log_update = Instant::now();
let mut remaining_slots_to_process = storage.len();
// Remap the deserialized AppendVec paths to point to correct local paths
let mut storage = storage
.into_iter()
.map(|(slot, mut slot_storage)| {
let now = Instant::now();
if now.duration_since(last_log_update).as_secs() >= 10 {
info!("{} slots remaining...", remaining_slots_to_process);
last_log_update = now;
}
remaining_slots_to_process -= 1;
let mut new_slot_storage = HashMap::new();
for (id, storage_entry) in slot_storage.drain() {
let path_index = thread_rng().gen_range(0, accounts_db.paths.len());
let local_dir = &accounts_db.paths[path_index];
std::fs::create_dir_all(local_dir).expect("Create directory failed");
// Move the corresponding AppendVec from the snapshot into the directory pointed
// at by `local_dir`
let append_vec_relative_path =
AppendVec::new_relative_path(slot, storage_entry.append_vec_id());
let append_vec_abs_path = stream_append_vecs_path
.as_ref()
.join(&append_vec_relative_path);
let target = local_dir.join(append_vec_abs_path.file_name().unwrap());
std::fs::rename(append_vec_abs_path.clone(), target).or_else(|_| {
let mut copy_options = CopyOptions::new();
copy_options.overwrite = true;
fs_extra::move_items(&vec![&append_vec_abs_path], &local_dir, ©_options)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.and(Ok(()))
})?;
// Notify the AppendVec of the new file location
let local_path = local_dir.join(append_vec_relative_path);
let mut u_storage_entry = Arc::try_unwrap(storage_entry).unwrap();
u_storage_entry.set_file(local_path)?;
new_slot_storage.insert(id, Arc::new(u_storage_entry));
}
Ok((slot, new_slot_storage))
})
.collect::<Result<HashMap<Slot, _>, Error>>()?;
// discard any slots with no storage entries
// this can happen if a non-root slot was serialized
// but non-root stores should not be included in the snapshot
storage.retain(|_slot, stores| !stores.is_empty());
accounts_db
.bank_hashes
.write()
.unwrap()
.insert(slot, bank_hash_info);
// Process deserialized data, set necessary fields in self
let max_id: usize = *storage
.values()
.flat_map(HashMap::keys)
.max()
.expect("At least one storage entry must exist from deserializing stream");
{
accounts_db.storage.0.extend(
storage.into_iter().map(|(slot, slot_storage_entry)| {
(slot, Arc::new(RwLock::new(slot_storage_entry)))
}),
);
}
accounts_db.next_id.store(max_id + 1, Ordering::Relaxed);
accounts_db
.write_version
.fetch_add(version, Ordering::Relaxed);
accounts_db.generate_index();
Ok(accounts_db)
}
| serialize | identifier_name |
serde_snapshot.rs | use {
crate::{
accounts::Accounts,
accounts_db::{AccountStorageEntry, AccountsDB, AppendVecId, BankHashInfo},
accounts_index::Ancestors,
append_vec::AppendVec,
bank::{Bank, BankFieldsToDeserialize, BankRc, Builtins},
blockhash_queue::BlockhashQueue,
epoch_stakes::EpochStakes,
message_processor::MessageProcessor,
rent_collector::RentCollector,
stakes::Stakes,
},
bincode,
bincode::{config::Options, Error},
fs_extra::dir::CopyOptions,
log::{info, warn},
rand::{thread_rng, Rng},
serde::{de::DeserializeOwned, Deserialize, Serialize},
solana_sdk::{
clock::{Epoch, Slot, UnixTimestamp},
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
genesis_config::ClusterType,
genesis_config::GenesisConfig,
hard_forks::HardForks,
hash::Hash,
inflation::Inflation,
pubkey::Pubkey,
},
std::{
collections::{HashMap, HashSet},
io::{BufReader, BufWriter, Read, Write},
path::{Path, PathBuf},
result::Result,
sync::{atomic::Ordering, Arc, RwLock},
time::Instant,
},
};
#[cfg(RUSTC_WITH_SPECIALIZATION)]
use solana_frozen_abi::abi_example::IgnoreAsHelper;
mod common;
mod future;
mod tests;
mod utils;
use future::Context as TypeContextFuture;
#[allow(unused_imports)]
use utils::{serialize_iter_as_map, serialize_iter_as_seq, serialize_iter_as_tuple};
// a number of test cases in accounts_db use this
#[cfg(test)]
pub(crate) use self::tests::reconstruct_accounts_db_via_serialization;
pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages};
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum SerdeStyle {
NEWER,
}
const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
struct AccountsDbFields<T>(HashMap<Slot, Vec<T>>, u64, Slot, BankHashInfo);
trait TypeContext<'a> {
type SerializableAccountStorageEntry: Serialize
+ DeserializeOwned
+ From<&'a AccountStorageEntry>
+ Into<AccountStorageEntry>;
fn serialize_bank_and_storage<S: serde::ser::Serializer>(
serializer: S,
serializable_bank: &SerializableBankAndStorage<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDB<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn deserialize_bank_fields<R>(
stream: &mut BufReader<R>,
) -> Result<
(
BankFieldsToDeserialize,
AccountsDbFields<Self::SerializableAccountStorageEntry>,
),
Error,
>
where
R: Read;
fn deserialize_accounts_db_fields<R>(
stream: &mut BufReader<R>,
) -> Result<AccountsDbFields<Self::SerializableAccountStorageEntry>, Error>
where
R: Read;
}
fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
where
R: Read,
T: DeserializeOwned,
{
bincode::options()
.with_limit(MAX_STREAM_SIZE)
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from::<R, T>(reader)
}
pub(crate) fn bank_from_stream<R, P>(
serde_style: SerdeStyle,
stream: &mut BufReader<R>,
append_vecs_path: P,
account_paths: &[PathBuf],
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> std::result::Result<Bank, Error>
where
R: Read,
P: AsRef<Path>,
{
macro_rules! INTO {
($x:ident) => {{
let (bank_fields, accounts_db_fields) = $x::deserialize_bank_fields(stream)?;
let bank = reconstruct_bank_from_fields(
bank_fields,
accounts_db_fields,
genesis_config,
frozen_account_pubkeys,
account_paths,
append_vecs_path,
debug_keys,
additional_builtins,
)?;
Ok(bank)
}};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_from_stream error: {:?}", err);
err
})
}
pub(crate) fn bank_to_stream<W>(
serde_style: SerdeStyle,
stream: &mut BufWriter<W>,
bank: &Bank,
snapshot_storages: &[SnapshotStorage],
) -> Result<(), Error>
where
W: Write,
{
macro_rules! INTO {
($x:ident) => {
bincode::serialize_into(
stream,
&SerializableBankAndStorage::<$x> {
bank,
snapshot_storages,
phantom: std::marker::PhantomData::default(),
},
)
};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_to_stream error: {:?}", err);
err
})
}
struct SerializableBankAndStorage<'a, C> {
bank: &'a Bank,
snapshot_storages: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_bank_and_storage(serializer, self)
}
}
struct SerializableAccountsDB<'a, C> {
accounts_db: &'a AccountsDB,
slot: Slot,
account_storage_entries: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_accounts_db_fields(serializer, self)
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {}
fn reconstruct_bank_from_fields<E, P>(
bank_fields: BankFieldsToDeserialize,
accounts_db_fields: AccountsDbFields<E>,
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
account_paths: &[PathBuf],
append_vecs_path: P,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> Result<Bank, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = reconstruct_accountsdb_from_fields(
accounts_db_fields,
account_paths,
append_vecs_path,
&genesis_config.cluster_type,
)?;
accounts_db.freeze_accounts(&bank_fields.ancestors, frozen_account_pubkeys);
let bank_rc = BankRc::new(Accounts::new_empty(accounts_db), bank_fields.slot);
let bank = Bank::new_from_fields(
bank_rc,
genesis_config,
bank_fields,
debug_keys,
additional_builtins,
);
Ok(bank)
}
fn reconstruct_accountsdb_from_fields<E, P>(
accounts_db_fields: AccountsDbFields<E>,
account_paths: &[PathBuf],
stream_append_vecs_path: P,
cluster_type: &ClusterType,
) -> Result<AccountsDB, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
| {
let mut accounts_db = AccountsDB::new(account_paths.to_vec(), cluster_type);
let AccountsDbFields(storage, version, slot, bank_hash_info) = accounts_db_fields;
// convert to two level map of slot -> id -> account storage entry
let storage = {
let mut map = HashMap::new();
for (slot, entries) in storage.into_iter() {
let sub_map = map.entry(slot).or_insert_with(HashMap::new);
for entry in entries.into_iter() {
let entry: AccountStorageEntry = entry.into();
entry.slot.store(slot, Ordering::Relaxed);
sub_map.insert(entry.append_vec_id(), Arc::new(entry));
}
}
map
};
let mut last_log_update = Instant::now();
let mut remaining_slots_to_process = storage.len();
// Remap the deserialized AppendVec paths to point to correct local paths
let mut storage = storage
.into_iter()
.map(|(slot, mut slot_storage)| {
let now = Instant::now();
if now.duration_since(last_log_update).as_secs() >= 10 {
info!("{} slots remaining...", remaining_slots_to_process);
last_log_update = now;
}
remaining_slots_to_process -= 1;
let mut new_slot_storage = HashMap::new();
for (id, storage_entry) in slot_storage.drain() {
let path_index = thread_rng().gen_range(0, accounts_db.paths.len());
let local_dir = &accounts_db.paths[path_index];
std::fs::create_dir_all(local_dir).expect("Create directory failed");
// Move the corresponding AppendVec from the snapshot into the directory pointed
// at by `local_dir`
let append_vec_relative_path =
AppendVec::new_relative_path(slot, storage_entry.append_vec_id());
let append_vec_abs_path = stream_append_vecs_path
.as_ref()
.join(&append_vec_relative_path);
let target = local_dir.join(append_vec_abs_path.file_name().unwrap());
std::fs::rename(append_vec_abs_path.clone(), target).or_else(|_| {
let mut copy_options = CopyOptions::new();
copy_options.overwrite = true;
fs_extra::move_items(&vec![&append_vec_abs_path], &local_dir, ©_options)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.and(Ok(()))
})?;
// Notify the AppendVec of the new file location
let local_path = local_dir.join(append_vec_relative_path);
let mut u_storage_entry = Arc::try_unwrap(storage_entry).unwrap();
u_storage_entry.set_file(local_path)?;
new_slot_storage.insert(id, Arc::new(u_storage_entry));
}
Ok((slot, new_slot_storage))
})
.collect::<Result<HashMap<Slot, _>, Error>>()?;
// discard any slots with no storage entries
// this can happen if a non-root slot was serialized
// but non-root stores should not be included in the snapshot
storage.retain(|_slot, stores| !stores.is_empty());
accounts_db
.bank_hashes
.write()
.unwrap()
.insert(slot, bank_hash_info);
// Process deserialized data, set necessary fields in self
let max_id: usize = *storage
.values()
.flat_map(HashMap::keys)
.max()
.expect("At least one storage entry must exist from deserializing stream");
{
accounts_db.storage.0.extend(
storage.into_iter().map(|(slot, slot_storage_entry)| {
(slot, Arc::new(RwLock::new(slot_storage_entry)))
}),
);
}
accounts_db.next_id.store(max_id + 1, Ordering::Relaxed);
accounts_db
.write_version
.fetch_add(version, Ordering::Relaxed);
accounts_db.generate_index();
Ok(accounts_db)
} | identifier_body | |
control.go |
package control
import (
"context"
"fmt"
kitgrpc "github.com/go-kit/kit/transport/grpc"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"nRIC/api/v1/pb/db"
"nRIC/internal"
"nRIC/internal/msgx"
"nRIC/internal/msgx/endpoint"
"nRIC/internal/msgx/service"
"nRIC/internal/msgx/transport"
"nRIC/internal/xapp"
dbclient "nRIC/pkg/dbagent/grpcserver"
"net"
"os"
"sort"
"strconv"
"time"
)
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
func idstring(err error, entries ...fmt.Stringer) string {
var retval string = ""
var filler string = ""
for _, entry := range entries {
retval += filler + entry.String()
filler = " "
}
if err != nil {
retval += filler + "err(" + err.Error() + ")"
filler = " "
}
return retval
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
var e2tSubReqTimeout time.Duration
var e2tSubDelReqTime time.Duration
var e2tRecvMsgTimeout time.Duration
var e2tMaxSubReqTryCount uint64 // Initial try + retry
var e2tMaxSubDelReqTryCount uint64 // Initial try + retry
type Control struct {
//*xapp.MsgClientToXapp
MsgSendertoSMO *msgx.MsgSender //*msgx.MsgSender
CntRecvMsg uint64
AccessDbagent *dbclient.MsgSender
Endpoint map[uint32]*msgx.KafkaMsgSender //key: XappID ,value : kafka writer
}
type MsgMeid struct {
PlmnID string
EnbID string
RanName string
}
func init() {
}
func NewControl(MsgSendertoSMO *msgx.MsgSender ,AcessDbAgent *dbclient.MsgSender) *Control {
endpoint := make(map[uint32]*msgx.KafkaMsgSender)
c := &Control{
//MsgClientToXapp: MsgClientToXapp,
MsgSendertoSMO: MsgSendertoSMO,
//subscriber: subscriber,
AccessDbagent: AcessDbAgent,
Endpoint: endpoint,
}
return c
}
func (c *Control) ReadyCB(data interface{}) {
if c.MsgSendertoSMO == nil {
}
}
func (c *Control) CreateAndRunMsgServer (grpcAddr string) {
svc := service.NewMsgService(c)
ep := endpoint.NewMsgServiceEndpoint(svc)
s := transport.NewMsgServer(ep)
// The gRPC listener mounts the Go kit gRPC server we created.
grpcListener, err := net.Listen("tcp", grpcAddr)
if err != nil {
xapp.Logger.Info("transport", "gRPC", "during", "Listen", "err", err)
os.Exit(1)
}
xapp.Logger.Info("transport", "gRPC", "addr", grpcAddr)
// we add the Go Kit gRPC Interceptor to our gRPC service as it is used by
// the here demonstrated zipkin tracing middleware.
baseServer := grpc.NewServer(grpc.UnaryInterceptor(kitgrpc.Interceptor))
msgx.RegisterMsgServiceServer(baseServer, s)
baseServer.Serve(grpcListener)
}
func (c *Control) Run(grpcAddr string) {
//xapp.SetReadyCB(c.ReadyCB, nil)
//xapp.Run(c,grpcAddr)
c.CreateAndRunMsgServer(grpcAddr)
}
func (c *Control) Consume(msg *xapp.MsgParams) (err error) {
xapp.Logger.Debug("Received message type: %s", xapp.RicMessageTypeToName[msg.Mtype])
if c.MsgSendertoSMO == nil {
err = fmt.Errorf("Msg object nil can handle %s", msg.String())
xapp.Logger.Error("%s", err.Error())
return
}
c.CntRecvMsg++
switch msg.Mtype {
case xapp.RIC_O1_REGISTER:
go c.handleXappRegisterRequest(msg)
case xapp.RIC_O1_INIT:
go c.handleSMOInit(msg)
case xapp.RIC_O1_ENABLE:
go c.handleSMOEnableOrDisable(msg)
case xapp.RIC_O1_DISABLE:
go c.handleSMOEnableOrDisable(msg)
default:
xapp.Logger.Info("Unknown Message Type '%d', discarding", msg.Mtype)
}
return
}
//分配xappID
//XappID 合法值 (1...65535),0 为 非法值
func (c *Control) allocXappID(RegMsg msgx.XappRegMsg) (uint32,error, bool){
isRegistered := false
resp,err := c.AccessDbagent.Client.MOITableReadAll(context.Background(),&db.MOITableReadAllRequest{Api: "1",})
if err != nil {
xapp.Logger.Error(err.Error())
return 0 ,err,isRegistered
}
//第一个注册的xapp,直接分配 xappID = 1
if len(resp.MoiTables) == 0 {
return 1,nil,isRegistered
}
for _,r := range resp.MoiTables{
//重复注册,走正常返回流程
if r.XappName == RegMsg.XappName {
isRegistered = true
return r.XappID,nil,isRegistered
}
}
//新注册 ,先按从小到大排序(只有1个表项也可以排序,不会返回错误),然后找到最小的可用xappID
ps := resp.MoiTables
sort.SliceStable(ps, func(i, j int) bool {
return ps[i].XappID < ps[j].XappID
})
var i uint32
//正常的分配为: ps[0].XappID = 1 ,ps[1].XappID = 2 ,ps[2].XappID = 3 ,
//如果出现ps[1].XappID = 3 ,说明原来的XappID = 2 表项已经释放,可以被再次分配使用
for i = 1; i <= 65535;i++ {
if i != ps[i-1].XappID {
// i 值未被使用,可以被分配
return i,nil,isRegistered
}
}
return 0 , fmt.Errorf("分配失败"),isRegistered
}
//新增该xApp的管理对象实例(xApp MOI)表项
func (c *Control) addXappMOI(XappID uint32,RegMsg msgx.XappRegMsg) error {
r := &db.MOITableInsertRequest{}
r.Api = "1"
m := &db.MOITable{}
m.XappID = XappID
m.XappName = RegMsg.XappName
m.XappVer = RegMsg.XappVer
m.Functions = RegMsg.XappFunctions
m.RunningStatus = "inactive"
m.IsReady = "false"
m.Topic = "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
r.MoiTable = m
_, err := c.AccessDbagent.Client.MOITableInsert(context.Background(),r)
return err
}
//通知网管该xApp在nRT RIC平台上的部署
func (c *Control) Register2SMO (RegMsg *msgx.XappRegMsg,params *xapp.MsgParams){
RICO1RegMsg,err := proto.Marshal(RegMsg)
if err != nil {
xapp.Logger.Error("Marshal RICO1RegMsg failed! %s",err.Error())
return
}
params.Payload = RICO1RegMsg
params.PayloadLen = len(RICO1RegMsg)
//确保消息发送成功,否则每隔 5 秒再次发送
for {
err := c.MsgSendertoSMO.SendMsg(params)
if err == nil {
break
}
xapp.Logger.Error("Register2SMO:",err.Error())
time.Sleep( 5 * time.Second)
}
}
//-------------------------------------------------------------------
// handle from XAPP Register Request
//------------------------------------------------------------------
func (c *Control) handleXappRegisterRequest(params *xapp.MsgParams) {
xapp.Logger.Info("Register MSG from XAPP: %s", params.String())
var RegMsg msgx.XappRegMsg
err := proto.Unmarshal(params.Payload,&RegMsg)
if err != nil {
xapp.Logger.Error("Unmarshal XappRegMsg failed! %s",err.Error())
//(解析不到xapp的ip,无法返回响应消息)xapp 接收不到注册成功响应消息,会继续发起注册 | .Info("XappName = %s,XappRequestID = %d,Token = %s /n",
RegMsg.XappName,RegMsg.Header.XappRequestID,RegMsg.Header.Token)
//第一个消息,xapp还没获取到topic,需要通过grpc来返回注册响应消息
Client2Xapp := msgx.NewMsgSender(RegMsg.XappIpaddr,RegMsg.XappPort)
//分配xappID ; 并判断是否重复注册
XappID,err,isRegistered := c.allocXappID(RegMsg)
if err != nil {
xapp.Logger.Error("Alloc XappID failed! %s",err.Error())
return
}
//非重复注册,新增该xApp的管理对象实例(xApp MOI)表项
if !isRegistered {
err = c.addXappMOI(XappID ,RegMsg)
if err != nil {
xapp.Logger.Error("Add Xapp MOI failed! %s",err.Error())
return
}
}
//通知网管该xApp在nRT RIC平台上的部署
RegMsg.Header.XappRequestID.XappID = XappID
go c.Register2SMO(&RegMsg,params)
//response: 携带xApp所需服务(如数据库、冲突解决功能)的信息(服务名称、版本、详细信息等)
Topic := "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
//除第一个RegisterResp消息外,第二个及以后的消息返回xapp,都通过xapp专有的kafka消息通道返回
Endpoint := msgx.NewKafkaMsgSender(Topic)
c.Endpoint[XappID] = Endpoint
//
var RicServices map [string]*msgx.RICService
RicServices = make(map[string]*msgx.RICService)
RicSubsmgr := msgx.RICService{Name:"nricsubs",ServiceVer: 1,IpAddr: internal.SubmgrHost,Port: internal.DefaultGRPCPort}
RicServices["nricsubs"] = &RicSubsmgr
RicCflmgr := msgx.RICService{Name:"nriccflm",ServiceVer: 1,IpAddr: internal.NriccflmHost,Port: internal.DefaultGRPCPort}
RicServices["nriccflm"] = &RicCflmgr
RicDbagent := msgx.RICService{Name:"nricdbagent",ServiceVer: 1,IpAddr: internal.DbagentHost,Port: internal.DefaultGRPCPort}
RicServices["nricdbagent"] = &RicDbagent
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_RESP,
MsgVer: 1,
XappRequestID: &msgx.XAPPRequestID{
XappID: XappID, //返回分配的XappID
XappInstanceID: RegMsg.Header.XappRequestID.XappInstanceID,
},
},
RicServices: RicServices,
Topic:Topic,
KafkaURL: internal.KafkaURL,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) SendRegisterFailureResp(Client2Xapp *msgx.MsgSender,Cause string,params *xapp.MsgParams) {
xapp.Logger.Info("Send RegisterFailureResp Msg to Xapp: %s\n",xapp.RicMessageTypeToName[params.Mtype])
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_FAILURE,
MsgVer: 1,
},
Cause: Cause,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) handleSMOInit(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOInitMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Init msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
}
//-------------------------------------------------------------------
// handle from SMO Enable or Disable Xapp Request
//------------------------------------------------------------------
func (c *Control) handleSMOEnableOrDisable(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOEnableMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Enable or Disable msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
}
|
return
}
xapp.Logger | identifier_name |
control.go | package control
import (
"context"
"fmt"
kitgrpc "github.com/go-kit/kit/transport/grpc"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"nRIC/api/v1/pb/db"
"nRIC/internal"
"nRIC/internal/msgx"
"nRIC/internal/msgx/endpoint"
"nRIC/internal/msgx/service"
"nRIC/internal/msgx/transport"
"nRIC/internal/xapp"
dbclient "nRIC/pkg/dbagent/grpcserver"
"net"
"os"
"sort"
"strconv"
"time"
)
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
func idstring(err error, entries ...fmt.Stringer) string {
var retval string = ""
var filler string = ""
for _, entry := range entries {
retval += filler + entry.String()
filler = " "
}
if err != nil {
retval += filler + "err(" + err.Error() + ")"
filler = " "
}
return retval
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
var e2tSubReqTimeout time.Duration
var e2tSubDelReqTime time.Duration
var e2tRecvMsgTimeout time.Duration
var e2tMaxSubReqTryCount uint64 // Initial try + retry
var e2tMaxSubDelReqTryCount uint64 // Initial try + retry
type Control struct {
//*xapp.MsgClientToXapp
MsgSendertoSMO *msgx.MsgSender //*msgx.MsgSender
CntRecvMsg uint64
AccessDbagent *dbclient.MsgSender
Endpoint map[uint32]*msgx.KafkaMsgSender //key: XappID ,value : kafka writer
}
type MsgMeid struct {
PlmnID string
EnbID string
RanName string
}
func init() {
}
func NewControl(MsgSendertoSMO *msgx.MsgSender ,AcessDbAgent *dbclient.MsgSender) *Control {
endpoint := make(map[uint32]*msgx.KafkaMsgSender)
c := &Control{
//MsgClientToXapp: MsgClientToXapp, | //subscriber: subscriber,
AccessDbagent: AcessDbAgent,
Endpoint: endpoint,
}
return c
}
func (c *Control) ReadyCB(data interface{}) {
if c.MsgSendertoSMO == nil {
}
}
func (c *Control) CreateAndRunMsgServer (grpcAddr string) {
svc := service.NewMsgService(c)
ep := endpoint.NewMsgServiceEndpoint(svc)
s := transport.NewMsgServer(ep)
// The gRPC listener mounts the Go kit gRPC server we created.
grpcListener, err := net.Listen("tcp", grpcAddr)
if err != nil {
xapp.Logger.Info("transport", "gRPC", "during", "Listen", "err", err)
os.Exit(1)
}
xapp.Logger.Info("transport", "gRPC", "addr", grpcAddr)
// we add the Go Kit gRPC Interceptor to our gRPC service as it is used by
// the here demonstrated zipkin tracing middleware.
baseServer := grpc.NewServer(grpc.UnaryInterceptor(kitgrpc.Interceptor))
msgx.RegisterMsgServiceServer(baseServer, s)
baseServer.Serve(grpcListener)
}
func (c *Control) Run(grpcAddr string) {
//xapp.SetReadyCB(c.ReadyCB, nil)
//xapp.Run(c,grpcAddr)
c.CreateAndRunMsgServer(grpcAddr)
}
func (c *Control) Consume(msg *xapp.MsgParams) (err error) {
xapp.Logger.Debug("Received message type: %s", xapp.RicMessageTypeToName[msg.Mtype])
if c.MsgSendertoSMO == nil {
err = fmt.Errorf("Msg object nil can handle %s", msg.String())
xapp.Logger.Error("%s", err.Error())
return
}
c.CntRecvMsg++
switch msg.Mtype {
case xapp.RIC_O1_REGISTER:
go c.handleXappRegisterRequest(msg)
case xapp.RIC_O1_INIT:
go c.handleSMOInit(msg)
case xapp.RIC_O1_ENABLE:
go c.handleSMOEnableOrDisable(msg)
case xapp.RIC_O1_DISABLE:
go c.handleSMOEnableOrDisable(msg)
default:
xapp.Logger.Info("Unknown Message Type '%d', discarding", msg.Mtype)
}
return
}
//分配xappID
//XappID 合法值 (1...65535),0 为 非法值
func (c *Control) allocXappID(RegMsg msgx.XappRegMsg) (uint32,error, bool){
isRegistered := false
resp,err := c.AccessDbagent.Client.MOITableReadAll(context.Background(),&db.MOITableReadAllRequest{Api: "1",})
if err != nil {
xapp.Logger.Error(err.Error())
return 0 ,err,isRegistered
}
//第一个注册的xapp,直接分配 xappID = 1
if len(resp.MoiTables) == 0 {
return 1,nil,isRegistered
}
for _,r := range resp.MoiTables{
//重复注册,走正常返回流程
if r.XappName == RegMsg.XappName {
isRegistered = true
return r.XappID,nil,isRegistered
}
}
//新注册 ,先按从小到大排序(只有1个表项也可以排序,不会返回错误),然后找到最小的可用xappID
ps := resp.MoiTables
sort.SliceStable(ps, func(i, j int) bool {
return ps[i].XappID < ps[j].XappID
})
var i uint32
//正常的分配为: ps[0].XappID = 1 ,ps[1].XappID = 2 ,ps[2].XappID = 3 ,
//如果出现ps[1].XappID = 3 ,说明原来的XappID = 2 表项已经释放,可以被再次分配使用
for i = 1; i <= 65535;i++ {
if i != ps[i-1].XappID {
// i 值未被使用,可以被分配
return i,nil,isRegistered
}
}
return 0 , fmt.Errorf("分配失败"),isRegistered
}
//新增该xApp的管理对象实例(xApp MOI)表项
func (c *Control) addXappMOI(XappID uint32,RegMsg msgx.XappRegMsg) error {
r := &db.MOITableInsertRequest{}
r.Api = "1"
m := &db.MOITable{}
m.XappID = XappID
m.XappName = RegMsg.XappName
m.XappVer = RegMsg.XappVer
m.Functions = RegMsg.XappFunctions
m.RunningStatus = "inactive"
m.IsReady = "false"
m.Topic = "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
r.MoiTable = m
_, err := c.AccessDbagent.Client.MOITableInsert(context.Background(),r)
return err
}
//通知网管该xApp在nRT RIC平台上的部署
func (c *Control) Register2SMO (RegMsg *msgx.XappRegMsg,params *xapp.MsgParams){
RICO1RegMsg,err := proto.Marshal(RegMsg)
if err != nil {
xapp.Logger.Error("Marshal RICO1RegMsg failed! %s",err.Error())
return
}
params.Payload = RICO1RegMsg
params.PayloadLen = len(RICO1RegMsg)
//确保消息发送成功,否则每隔 5 秒再次发送
for {
err := c.MsgSendertoSMO.SendMsg(params)
if err == nil {
break
}
xapp.Logger.Error("Register2SMO:",err.Error())
time.Sleep( 5 * time.Second)
}
}
//-------------------------------------------------------------------
// handle from XAPP Register Request
//------------------------------------------------------------------
func (c *Control) handleXappRegisterRequest(params *xapp.MsgParams) {
xapp.Logger.Info("Register MSG from XAPP: %s", params.String())
var RegMsg msgx.XappRegMsg
err := proto.Unmarshal(params.Payload,&RegMsg)
if err != nil {
xapp.Logger.Error("Unmarshal XappRegMsg failed! %s",err.Error())
//(解析不到xapp的ip,无法返回响应消息)xapp 接收不到注册成功响应消息,会继续发起注册
return
}
xapp.Logger.Info("XappName = %s,XappRequestID = %d,Token = %s /n",
RegMsg.XappName,RegMsg.Header.XappRequestID,RegMsg.Header.Token)
//第一个消息,xapp还没获取到topic,需要通过grpc来返回注册响应消息
Client2Xapp := msgx.NewMsgSender(RegMsg.XappIpaddr,RegMsg.XappPort)
//分配xappID ; 并判断是否重复注册
XappID,err,isRegistered := c.allocXappID(RegMsg)
if err != nil {
xapp.Logger.Error("Alloc XappID failed! %s",err.Error())
return
}
//非重复注册,新增该xApp的管理对象实例(xApp MOI)表项
if !isRegistered {
err = c.addXappMOI(XappID ,RegMsg)
if err != nil {
xapp.Logger.Error("Add Xapp MOI failed! %s",err.Error())
return
}
}
//通知网管该xApp在nRT RIC平台上的部署
RegMsg.Header.XappRequestID.XappID = XappID
go c.Register2SMO(&RegMsg,params)
//response: 携带xApp所需服务(如数据库、冲突解决功能)的信息(服务名称、版本、详细信息等)
Topic := "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
//除第一个RegisterResp消息外,第二个及以后的消息返回xapp,都通过xapp专有的kafka消息通道返回
Endpoint := msgx.NewKafkaMsgSender(Topic)
c.Endpoint[XappID] = Endpoint
//
var RicServices map [string]*msgx.RICService
RicServices = make(map[string]*msgx.RICService)
RicSubsmgr := msgx.RICService{Name:"nricsubs",ServiceVer: 1,IpAddr: internal.SubmgrHost,Port: internal.DefaultGRPCPort}
RicServices["nricsubs"] = &RicSubsmgr
RicCflmgr := msgx.RICService{Name:"nriccflm",ServiceVer: 1,IpAddr: internal.NriccflmHost,Port: internal.DefaultGRPCPort}
RicServices["nriccflm"] = &RicCflmgr
RicDbagent := msgx.RICService{Name:"nricdbagent",ServiceVer: 1,IpAddr: internal.DbagentHost,Port: internal.DefaultGRPCPort}
RicServices["nricdbagent"] = &RicDbagent
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_RESP,
MsgVer: 1,
XappRequestID: &msgx.XAPPRequestID{
XappID: XappID, //返回分配的XappID
XappInstanceID: RegMsg.Header.XappRequestID.XappInstanceID,
},
},
RicServices: RicServices,
Topic:Topic,
KafkaURL: internal.KafkaURL,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) SendRegisterFailureResp(Client2Xapp *msgx.MsgSender,Cause string,params *xapp.MsgParams) {
xapp.Logger.Info("Send RegisterFailureResp Msg to Xapp: %s\n",xapp.RicMessageTypeToName[params.Mtype])
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_FAILURE,
MsgVer: 1,
},
Cause: Cause,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) handleSMOInit(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOInitMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Init msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
}
//-------------------------------------------------------------------
// handle from SMO Enable or Disable Xapp Request
//------------------------------------------------------------------
func (c *Control) handleSMOEnableOrDisable(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOEnableMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Enable or Disable msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
} | MsgSendertoSMO: MsgSendertoSMO, | random_line_split |
control.go |
package control
import (
"context"
"fmt"
kitgrpc "github.com/go-kit/kit/transport/grpc"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"nRIC/api/v1/pb/db"
"nRIC/internal"
"nRIC/internal/msgx"
"nRIC/internal/msgx/endpoint"
"nRIC/internal/msgx/service"
"nRIC/internal/msgx/transport"
"nRIC/internal/xapp"
dbclient "nRIC/pkg/dbagent/grpcserver"
"net"
"os"
"sort"
"strconv"
"time"
)
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
func idstring(err error, entries ...fmt.Stringer) string {
var retval string = ""
var filler string = ""
for _, entry := range entries {
retval += filler + entry.String()
filler = " "
}
if err != nil {
retval += filler + "err(" + err.Error() + ")"
filler = " "
}
return retval
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
var e2tSubReqTimeout time.Duration
var e2tSubDelReqTime time.Duration
var e2tRecvMsgTimeout time.Duration
var e2tMaxSubReqTryCount uint64 // Initial try + retry
var e2tMaxSubDelReqTryCount uint64 // Initial try + retry
type Control struct {
//*xapp.MsgClientToXapp
MsgSendertoSMO *msgx.MsgSender //*msgx.MsgSender
CntRecvMsg uint64
AccessDbagent *dbclient.MsgSender
Endpoint map[uint32]*msgx.KafkaMsgSender //key: XappID ,value : kafka writer
}
type MsgMeid struct {
PlmnID string
EnbID string
RanName string
}
func init() {
}
func NewControl(MsgSendertoSMO *msgx.MsgSender ,AcessDbAgent *dbclient.MsgSender) *Control {
endpoint := make(map[uint32]*msgx.KafkaMsgSender)
c := &Control{
//MsgClientToXapp: MsgClientToXapp,
MsgSendertoSMO: MsgSendertoSMO,
//subscriber: subscriber,
AccessDbagent: AcessDbAgent,
Endpoint: endpoint,
}
return c
}
func (c *Control) ReadyCB(data interface{}) {
if c.MsgSendertoSMO == nil {
}
}
func (c *Control) CreateAndRunMsgServer (grpcAddr string) {
svc := service.NewMsgService(c)
ep := endpoint.NewMsgServiceEndpoint(svc)
s := transport.NewMsgServer(ep)
// The gRPC listener mounts the Go kit gRPC server we created.
grpcListener, err := net.Listen("tcp", grpcAddr)
if err != nil {
xapp.Logger.Info("transport", "gRPC", "during", "Listen", "err", err)
os.Exit(1)
}
xapp.Logger.Info("transport", "gRPC", "addr", grpcAddr)
// we add the Go Kit gRPC Interceptor to our gRPC service as it is used by
// the here demonstrated zipkin tracing middleware.
baseServer := grpc.NewServer(grpc.UnaryInterceptor(kitgrpc.Interceptor))
msgx.RegisterMsgServiceServer(baseServer, s)
baseServer.Serve(grpcListener)
}
func (c *Control) Run(grpcAddr string) {
//xapp.SetReadyCB(c.ReadyCB, nil)
//xapp.Run(c,grpcAddr)
c.CreateAndRunMsgServer(grpcAddr)
}
func (c *Control) Consume(msg *xapp.MsgParams) (err error) {
xapp.Logger.Debug("Received message type: %s", xapp.RicMessageTypeToName[msg.Mtype])
if c.MsgSendertoSMO == nil {
err = fmt.Errorf("Msg object nil can handle %s", msg.String())
xapp.Logger.Error("%s", err.Error())
return
}
c.CntRecvMsg++
switch msg.Mtype {
case xapp.RIC_O1_REGISTER:
go c.handleXappRegisterRequest(msg)
case xapp.RIC_O1_INIT:
go c.handleSMOInit(msg)
case xapp.RIC_O1_ENABLE:
go c.handleSMOEnableOrDisable(msg)
case xapp.RIC_O1_DISABLE:
go c.handleSMOEnableOrDisable(msg)
default:
xapp.Logger.Info("Unknown Message Type '%d', discarding", msg.Mtype)
}
return
}
//分配xappID
//XappID 合法值 (1...65535),0 为 非法值
func (c *Control) allocXappID(RegMsg msgx.XappRegMsg) (uint32,error, bool){
isRegistered := false
resp,err := c.AccessDbagent.Client.MOITableReadAll(context.Background(),&db.MOITableReadAllRequest{Api: "1",})
if err != nil {
xapp.Logger.Error(err.Error())
return 0 ,err,isRegistered
}
//第一个注册的xapp,直接分配 xappID = 1
if len(resp.MoiTables) == 0 {
return 1,nil,isRegistered
}
for _,r := range resp.MoiTables{
//重复注册,走正常返回流程
if r.XappName == RegMsg.XappName {
isRegistered = true
return r.XappID,nil,isRegistered
}
}
//新注册 ,先按从小到大排序(只有1个表项也可以排序,不会返回错误),然后找到最小的可用xappID
ps := resp.MoiTables
sort.SliceStable(ps, func(i, j int) bool {
return ps[i].XappID < ps[j].XappID
})
var i uint32
//正常的分配为: ps[0].XappID = 1 ,ps[1].XappID = 2 ,ps[2].XappID = 3 ,
//如果出现ps[1].XappID = 3 ,说明原来的XappID = 2 表项已经释放,可以被再次分配使用
for i = 1; i <= 65535;i++ {
if i != ps[i-1].XappID {
// i 值未被使用,可以被分配
return i,nil,isRegistered
}
}
return 0 , fmt.Errorf("分配失败"),isRegistered
}
//新增该xApp的管理对象实例(xApp MOI)表项
func (c *Control) addXappMOI(XappID uint32,RegMsg msgx.XappRegMsg) error {
r := &db.MOITableInsertRequest{}
r.Api = "1"
m := &db.MOITable{}
m.XappID = XappID
m.XappName = RegMsg.XappName
m.XappVer = RegMsg.XappVer
m.Functions = RegMsg.XappFunctions
m.RunningStatus = "inactive"
m.IsReady = "false"
m.Topic = "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
r.MoiTable = m
_, err := c.AccessDbagent.Client.MOITableInsert(context.Background(),r)
return err
}
//通知网管该xApp在nRT RIC平台上的部署
func (c *Control) Register2SMO (RegMsg *msgx.XappRegMsg,params *xapp.MsgParams){
RICO1RegMsg,err := proto.Marshal(RegMsg)
if err != nil {
xapp.Logger.Error("Marshal RICO1RegMsg failed! %s",err.Error())
return
}
params.Payload = RICO1RegMsg
params.PayloadLen = len(RICO1RegMsg)
//确保消息发送成功,否则每隔 5 秒再次发送
for {
err := c.MsgSendertoSMO.SendMsg(params)
if err == nil {
break
}
xapp.Logger.Error("Register2SMO:",err.Error())
time.Sleep( 5 * time.Second)
}
}
//-------------------------------------------------------------------
// handle from XAPP Register Request
//------------------------------------------------------------------
func (c *Control) handleXappRegisterRequest(params *xapp.MsgParams) {
xapp.Logger.Info("Register MSG from XAPP: %s", params.String())
var RegMsg msgx.XappRegMsg
err := proto.Unmarshal(params.Payload,&RegMsg)
if err != nil {
xapp.Logger.Error("Unmarshal XappRegMsg failed! %s",err.Error())
//(解析不到xapp的ip,无法返回响应消息)xapp 接收不到注册成功响应消息,会继续发起注册
return
}
xapp.Logger.Info("XappName = %s,XappRequestID = %d,Token = %s /n",
RegMsg.XappName,RegMsg.Header.XappRequestID,RegMsg.Header.Token)
//第一个消息,xapp还没获取到topic,需要通过grpc来返回注册响应消息
Client2Xapp := msgx.NewMsgSender(RegMsg.XappIpaddr,RegMsg.XappPort)
//分配xappID ; 并判断是否重复注册
XappID,err,isRegistered := c.allocXappID(RegMsg)
if err != nil {
xapp.Logger.Error("Alloc XappID failed! %s",err.Error())
return
}
//非重复注册,新增该xApp的管理对象实例(xApp MOI)表项
if !isRegistered {
err = c.addXappMOI(XappID ,RegMsg)
if err != nil {
xapp.Logger.Error("Add Xapp MOI failed! %s",err.Error())
return
}
}
//通知网管该xApp在nRT RIC平台上的部署
RegMsg.Header.XappRequestID.XappID = XappID
go c.Register2SMO(&RegMsg,params)
//response: 携带xApp所需服务(如数据库、冲突解决功能)的信息(服务名称、版本、详细信息等)
Topic := "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
//除第一个RegisterResp消息外,第二个及以后的消息返回xapp,都通过xapp专有的kafka消息通道返回
Endpoint := msgx.NewKafkaMsgSender(Topic)
c.Endpoint[XappID] = Endpoint
//
var RicServices map [string]*msgx.RICService
RicServices = make(map[string]*msgx.RICService)
RicSubsmgr := msgx.RICService{Name:"nricsubs",ServiceVer: 1,IpAddr: internal.SubmgrHost,Port: internal.DefaultGRPCPort}
RicServices["nricsubs"] = &RicSubsmgr
RicCflmgr := msgx.RICService{Name:"nriccflm",ServiceVer: 1,IpAddr: internal.NriccflmHost,Port: internal.DefaultGRPCPort}
RicServices["nriccflm"] = &RicCflmgr
RicDbagent := msgx.RICService{Name:"nricdbagent",ServiceVer: 1,IpAddr: internal.DbagentHost,Port: internal.DefaultGRPCPort}
RicServices["nricdbagent"] = &RicDbagent
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_RESP,
MsgVer: 1,
XappRequestID: &msgx.XAPPRequestID{
XappID: XappID, //返回分配的XappID
XappInstanceID: RegMsg.Header.XappRequestID.XappInstanceID,
},
},
RicServices: RicServices,
Topic:Topic,
KafkaURL: internal.KafkaURL,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) SendRegisterFailureResp(Client2Xapp *msgx.MsgSender,Cause string,params *xapp.MsgParams) {
xapp.Logger.Info("Send RegisterFailureResp Msg to Xapp: %s\n",xapp.RicMessageTypeToName[params.Mtype])
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_FAILURE,
MsgVer: 1,
},
Cause: Cause,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) handleSMOInit(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOInitMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Init msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
}
//-------------------------------------------------------------------
// handle from SMO Enable or Disable Xapp Request
//------------------------------------------------------------------
func (c *Control) handleSMOEnableOrDisable(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOEnableMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Enable or Disable msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
}
| conditional_block | ||
control.go |
package control
import (
"context"
"fmt"
kitgrpc "github.com/go-kit/kit/transport/grpc"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"nRIC/api/v1/pb/db"
"nRIC/internal"
"nRIC/internal/msgx"
"nRIC/internal/msgx/endpoint"
"nRIC/internal/msgx/service"
"nRIC/internal/msgx/transport"
"nRIC/internal/xapp"
dbclient "nRIC/pkg/dbagent/grpcserver"
"net"
"os"
"sort"
"strconv"
"time"
)
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
func idstring(err error, entries ...fmt.Stringer) string {
var retval string = ""
var filler string = ""
for _, entry := range entries {
retval += filler + entry.String()
filler = " "
}
if err != nil {
retval += filler + "err(" + err.Error() + ")"
filler = " "
}
return retval
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
var e2tSubReqTimeout time.Duration
var e2tSubDelReqTime time.Duration
var e2tRecvMsgTimeout time.Duration
var e2tMaxSubReqTryCount uint64 // Initial try + retry
var e2tMaxSubDelReqTryCount uint64 // Initial try + retry
type Control struct {
//*xapp.MsgClientToXapp
MsgSendertoSMO *msgx.MsgSender //*msgx.MsgSender
CntRecvMsg uint64
AccessDbagent *dbclient.MsgSender
Endpoint map[uint32]*msgx.KafkaMsgSender //key: XappID ,value : kafka writer
}
type MsgMeid struct {
PlmnID string
EnbID string
RanName string
}
func init() {
}
func NewControl(MsgSendertoSMO *msgx.MsgSender ,AcessDbAgent *dbclient.MsgSender) *Control |
func (c *Control) ReadyCB(data interface{}) {
if c.MsgSendertoSMO == nil {
}
}
func (c *Control) CreateAndRunMsgServer (grpcAddr string) {
svc := service.NewMsgService(c)
ep := endpoint.NewMsgServiceEndpoint(svc)
s := transport.NewMsgServer(ep)
// The gRPC listener mounts the Go kit gRPC server we created.
grpcListener, err := net.Listen("tcp", grpcAddr)
if err != nil {
xapp.Logger.Info("transport", "gRPC", "during", "Listen", "err", err)
os.Exit(1)
}
xapp.Logger.Info("transport", "gRPC", "addr", grpcAddr)
// we add the Go Kit gRPC Interceptor to our gRPC service as it is used by
// the here demonstrated zipkin tracing middleware.
baseServer := grpc.NewServer(grpc.UnaryInterceptor(kitgrpc.Interceptor))
msgx.RegisterMsgServiceServer(baseServer, s)
baseServer.Serve(grpcListener)
}
func (c *Control) Run(grpcAddr string) {
//xapp.SetReadyCB(c.ReadyCB, nil)
//xapp.Run(c,grpcAddr)
c.CreateAndRunMsgServer(grpcAddr)
}
func (c *Control) Consume(msg *xapp.MsgParams) (err error) {
xapp.Logger.Debug("Received message type: %s", xapp.RicMessageTypeToName[msg.Mtype])
if c.MsgSendertoSMO == nil {
err = fmt.Errorf("Msg object nil can handle %s", msg.String())
xapp.Logger.Error("%s", err.Error())
return
}
c.CntRecvMsg++
switch msg.Mtype {
case xapp.RIC_O1_REGISTER:
go c.handleXappRegisterRequest(msg)
case xapp.RIC_O1_INIT:
go c.handleSMOInit(msg)
case xapp.RIC_O1_ENABLE:
go c.handleSMOEnableOrDisable(msg)
case xapp.RIC_O1_DISABLE:
go c.handleSMOEnableOrDisable(msg)
default:
xapp.Logger.Info("Unknown Message Type '%d', discarding", msg.Mtype)
}
return
}
//分配xappID
//XappID 合法值 (1...65535),0 为 非法值
func (c *Control) allocXappID(RegMsg msgx.XappRegMsg) (uint32,error, bool){
isRegistered := false
resp,err := c.AccessDbagent.Client.MOITableReadAll(context.Background(),&db.MOITableReadAllRequest{Api: "1",})
if err != nil {
xapp.Logger.Error(err.Error())
return 0 ,err,isRegistered
}
//第一个注册的xapp,直接分配 xappID = 1
if len(resp.MoiTables) == 0 {
return 1,nil,isRegistered
}
for _,r := range resp.MoiTables{
//重复注册,走正常返回流程
if r.XappName == RegMsg.XappName {
isRegistered = true
return r.XappID,nil,isRegistered
}
}
//新注册 ,先按从小到大排序(只有1个表项也可以排序,不会返回错误),然后找到最小的可用xappID
ps := resp.MoiTables
sort.SliceStable(ps, func(i, j int) bool {
return ps[i].XappID < ps[j].XappID
})
var i uint32
//正常的分配为: ps[0].XappID = 1 ,ps[1].XappID = 2 ,ps[2].XappID = 3 ,
//如果出现ps[1].XappID = 3 ,说明原来的XappID = 2 表项已经释放,可以被再次分配使用
for i = 1; i <= 65535;i++ {
if i != ps[i-1].XappID {
// i 值未被使用,可以被分配
return i,nil,isRegistered
}
}
return 0 , fmt.Errorf("分配失败"),isRegistered
}
//新增该xApp的管理对象实例(xApp MOI)表项
func (c *Control) addXappMOI(XappID uint32,RegMsg msgx.XappRegMsg) error {
r := &db.MOITableInsertRequest{}
r.Api = "1"
m := &db.MOITable{}
m.XappID = XappID
m.XappName = RegMsg.XappName
m.XappVer = RegMsg.XappVer
m.Functions = RegMsg.XappFunctions
m.RunningStatus = "inactive"
m.IsReady = "false"
m.Topic = "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
r.MoiTable = m
_, err := c.AccessDbagent.Client.MOITableInsert(context.Background(),r)
return err
}
//通知网管该xApp在nRT RIC平台上的部署
func (c *Control) Register2SMO (RegMsg *msgx.XappRegMsg,params *xapp.MsgParams){
RICO1RegMsg,err := proto.Marshal(RegMsg)
if err != nil {
xapp.Logger.Error("Marshal RICO1RegMsg failed! %s",err.Error())
return
}
params.Payload = RICO1RegMsg
params.PayloadLen = len(RICO1RegMsg)
//确保消息发送成功,否则每隔 5 秒再次发送
for {
err := c.MsgSendertoSMO.SendMsg(params)
if err == nil {
break
}
xapp.Logger.Error("Register2SMO:",err.Error())
time.Sleep( 5 * time.Second)
}
}
//-------------------------------------------------------------------
// handle from XAPP Register Request
//------------------------------------------------------------------
func (c *Control) handleXappRegisterRequest(params *xapp.MsgParams) {
xapp.Logger.Info("Register MSG from XAPP: %s", params.String())
var RegMsg msgx.XappRegMsg
err := proto.Unmarshal(params.Payload,&RegMsg)
if err != nil {
xapp.Logger.Error("Unmarshal XappRegMsg failed! %s",err.Error())
//(解析不到xapp的ip,无法返回响应消息)xapp 接收不到注册成功响应消息,会继续发起注册
return
}
xapp.Logger.Info("XappName = %s,XappRequestID = %d,Token = %s /n",
RegMsg.XappName,RegMsg.Header.XappRequestID,RegMsg.Header.Token)
//第一个消息,xapp还没获取到topic,需要通过grpc来返回注册响应消息
Client2Xapp := msgx.NewMsgSender(RegMsg.XappIpaddr,RegMsg.XappPort)
//分配xappID ; 并判断是否重复注册
XappID,err,isRegistered := c.allocXappID(RegMsg)
if err != nil {
xapp.Logger.Error("Alloc XappID failed! %s",err.Error())
return
}
//非重复注册,新增该xApp的管理对象实例(xApp MOI)表项
if !isRegistered {
err = c.addXappMOI(XappID ,RegMsg)
if err != nil {
xapp.Logger.Error("Add Xapp MOI failed! %s",err.Error())
return
}
}
//通知网管该xApp在nRT RIC平台上的部署
RegMsg.Header.XappRequestID.XappID = XappID
go c.Register2SMO(&RegMsg,params)
//response: 携带xApp所需服务(如数据库、冲突解决功能)的信息(服务名称、版本、详细信息等)
Topic := "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
//除第一个RegisterResp消息外,第二个及以后的消息返回xapp,都通过xapp专有的kafka消息通道返回
Endpoint := msgx.NewKafkaMsgSender(Topic)
c.Endpoint[XappID] = Endpoint
//
var RicServices map [string]*msgx.RICService
RicServices = make(map[string]*msgx.RICService)
RicSubsmgr := msgx.RICService{Name:"nricsubs",ServiceVer: 1,IpAddr: internal.SubmgrHost,Port: internal.DefaultGRPCPort}
RicServices["nricsubs"] = &RicSubsmgr
RicCflmgr := msgx.RICService{Name:"nriccflm",ServiceVer: 1,IpAddr: internal.NriccflmHost,Port: internal.DefaultGRPCPort}
RicServices["nriccflm"] = &RicCflmgr
RicDbagent := msgx.RICService{Name:"nricdbagent",ServiceVer: 1,IpAddr: internal.DbagentHost,Port: internal.DefaultGRPCPort}
RicServices["nricdbagent"] = &RicDbagent
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_RESP,
MsgVer: 1,
XappRequestID: &msgx.XAPPRequestID{
XappID: XappID, //返回分配的XappID
XappInstanceID: RegMsg.Header.XappRequestID.XappInstanceID,
},
},
RicServices: RicServices,
Topic:Topic,
KafkaURL: internal.KafkaURL,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) SendRegisterFailureResp(Client2Xapp *msgx.MsgSender,Cause string,params *xapp.MsgParams) {
xapp.Logger.Info("Send RegisterFailureResp Msg to Xapp: %s\n",xapp.RicMessageTypeToName[params.Mtype])
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_FAILURE,
MsgVer: 1,
},
Cause: Cause,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) handleSMOInit(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOInitMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Init msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
}
//-------------------------------------------------------------------
// handle from SMO Enable or Disable Xapp Request
//------------------------------------------------------------------
func (c *Control) handleSMOEnableOrDisable(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOEnableMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Enable or Disable msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
}
| {
endpoint := make(map[uint32]*msgx.KafkaMsgSender)
c := &Control{
//MsgClientToXapp: MsgClientToXapp,
MsgSendertoSMO: MsgSendertoSMO,
//subscriber: subscriber,
AccessDbagent: AcessDbAgent,
Endpoint: endpoint,
}
return c
} | identifier_body |
daemon.go | //go:build !nodaemon
// +build !nodaemon
package main
import (
"bufio"
"context"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime/pprof"
"strings"
"github.com/DataDog/zstd"
metricsprom "github.com/ipfs/go-metrics-prometheus"
"github.com/mitchellh/go-homedir"
"github.com/multiformats/go-multiaddr"
"github.com/urfave/cli/v2"
"go.opencensus.io/plugin/runmetrics"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
"gopkg.in/cheggaaa/pb.v1"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-paramfetch"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/journal/fsjournal"
"github.com/filecoin-project/lotus/lib/httpreader"
"github.com/filecoin-project/lotus/lib/peermgr"
"github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/testing"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
)
const (
makeGenFlag = "lotus-make-genesis"
preTemplateFlag = "genesis-template"
)
var daemonStopCmd = &cli.Command{
Name: "stop",
Usage: "Stop a running lotus daemon",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetAPI(cctx)
if err != nil {
return err
}
defer closer()
err = api.Shutdown(lcli.ReqContext(cctx))
if err != nil {
return err
}
return nil
},
}
// DaemonCmd is the `go-lotus daemon` command
var DaemonCmd = &cli.Command{
Name: "daemon",
Usage: "Start a lotus daemon process",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "api",
Value: "1234",
},
&cli.StringFlag{
Name: makeGenFlag,
Value: "",
Hidden: true,
},
&cli.StringFlag{
Name: preTemplateFlag,
Hidden: true,
},
&cli.StringFlag{
Name: "import-key",
Usage: "on first run, import a default key from a given file",
Hidden: true,
},
&cli.StringFlag{
Name: "genesis",
Usage: "genesis file to use for first node run",
},
&cli.BoolFlag{
Name: "bootstrap",
Value: true,
},
&cli.StringFlag{
Name: "import-chain",
Usage: "on first run, load chain from given file or url and validate",
},
&cli.StringFlag{
Name: "import-snapshot",
Usage: "import chain state from a given chain export file or url",
},
&cli.BoolFlag{
Name: "remove-existing-chain",
Usage: "remove existing chain and splitstore data on a snapshot-import",
},
&cli.BoolFlag{
Name: "halt-after-import",
Usage: "halt the process after importing chain from file",
},
&cli.BoolFlag{
Name: "lite",
Usage: "start lotus in lite mode",
},
&cli.StringFlag{
Name: "pprof",
Usage: "specify name of file for writing cpu profile to",
},
&cli.StringFlag{
Name: "profile",
Usage: "specify type of node",
},
&cli.BoolFlag{
Name: "manage-fdlimit",
Usage: "manage open file limit",
Value: true,
},
&cli.StringFlag{
Name: "config",
Usage: "specify path of config file to use",
},
// FIXME: This is not the correct place to put this configuration
// option. Ideally it would be part of `config.toml` but at the
// moment that only applies to the node configuration and not outside
// components like the RPC server.
&cli.IntFlag{
Name: "api-max-req-size",
Usage: "maximum API request size accepted by the JSON RPC server",
},
&cli.PathFlag{
Name: "restore",
Usage: "restore from backup file",
},
&cli.PathFlag{
Name: "restore-config",
Usage: "config file to use when restoring from backup",
},
},
Action: func(cctx *cli.Context) error {
isLite := cctx.Bool("lite")
err := runmetrics.Enable(runmetrics.RunMetricOptions{
EnableCPU: true,
EnableMemory: true,
})
if err != nil {
return xerrors.Errorf("enabling runtime metrics: %w", err)
}
if cctx.Bool("manage-fdlimit") {
if _, _, err := ulimit.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
if prof := cctx.String("pprof"); prof != "" {
profile, err := os.Create(prof)
if err != nil {
return err
}
if err := pprof.StartCPUProfile(profile); err != nil {
return err
}
defer pprof.StopCPUProfile()
}
var isBootstrapper dtypes.Bootstrapper
switch profile := cctx.String("profile"); profile {
case "bootstrapper":
isBootstrapper = true
case "":
// do nothing
default:
return fmt.Errorf("unrecognized profile type: %q", profile)
}
ctx, _ := tag.New(context.Background(),
tag.Insert(metrics.Version, build.BuildVersion),
tag.Insert(metrics.Commit, build.CurrentCommit),
tag.Insert(metrics.NodeType, "chain"),
)
// Register all metric views
if err = view.Register(
metrics.ChainNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
// Set the metric to one so it is published to the exporter
stats.Record(ctx, metrics.LotusInfo.M(1))
{
dir, err := homedir.Expand(cctx.String("repo"))
if err != nil {
log.Warnw("could not expand repo location", "error", err)
} else {
log.Infof("lotus repo: %s", dir)
}
}
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("opening fs repo: %w", err)
}
if cctx.String("config") != "" {
r.SetConfigPath(cctx.String("config"))
}
err = r.Init(repo.FullNode)
if err != nil && err != repo.ErrRepoExists {
return xerrors.Errorf("repo init error: %w", err)
}
freshRepo := err != repo.ErrRepoExists
if !isLite {
if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
}
var genBytes []byte
if cctx.String("genesis") != "" {
genBytes, err = os.ReadFile(cctx.String("genesis"))
if err != nil {
return xerrors.Errorf("reading genesis: %w", err)
}
} else {
genBytes = build.MaybeGenesis()
}
if cctx.IsSet("restore") {
if !freshRepo {
return xerrors.Errorf("restoring from backup is only possible with a fresh repo!")
}
if err := restore(cctx, r); err != nil {
return xerrors.Errorf("restoring from backup: %w", err)
}
}
if cctx.Bool("remove-existing-chain") {
lr, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("error opening fs repo: %w", err)
}
exists, err := lr.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
err = removeExistingChain(cctx, lr)
if err != nil {
return err
}
}
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
if chainfile != "" && snapshot != "" {
return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'")
}
var issnapshot bool
if chainfile == "" {
chainfile = snapshot
issnapshot = true
}
if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
return err
}
if cctx.Bool("halt-after-import") {
fmt.Println("Chain import complete, halting as requested...")
return nil
}
}
genesis := node.Options()
if len(genBytes) > 0 {
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genBytes))
}
if cctx.String(makeGenFlag) != "" {
if cctx.String(preTemplateFlag) == "" {
return xerrors.Errorf("must also pass file with genesis template to `--%s`", preTemplateFlag)
}
genesis = node.Override(new(modules.Genesis), testing.MakeGenesis(cctx.String(makeGenFlag), cctx.String(preTemplateFlag)))
}
shutdownChan := make(chan struct{})
// If the daemon is started in "lite mode", provide a Gateway
// for RPC calls
liteModeDeps := node.Options()
if isLite {
gapi, closer, err := lcli.GetGatewayAPI(cctx)
if err != nil {
return err
}
defer closer()
liteModeDeps = node.Override(new(lapi.Gateway), gapi)
}
// some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore
// use ipfs/go-metrics-interface. This injects a Prometheus exporter
// for those. Metrics are exported to the default registry.
if err := metricsprom.Inject(); err != nil {
log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err)
}
var api lapi.FullNode
stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)),
node.Base(),
node.Repo(r),
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
genesis,
liteModeDeps,
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error {
apima, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" +
cctx.String("api"))
if err != nil {
return err
}
return lr.SetAPIEndpoint(apima)
})),
node.ApplyIf(func(s *node.Settings) bool { return !cctx.Bool("bootstrap") },
node.Unset(node.RunPeerMgrKey),
node.Unset(new(*peermgr.PeerMgr)),
),
)
if err != nil {
return xerrors.Errorf("initializing node: %w", err)
}
if cctx.String("import-key") != "" {
if err := importKey(ctx, api, cctx.String("import-key")); err != nil {
log.Errorf("importing key failed: %+v", err)
}
}
endpoint, err := r.APIEndpoint()
if err != nil {
return xerrors.Errorf("getting api endpoint: %w", err)
}
//
// Instantiate JSON-RPC endpoint.
// ----
// Populate JSON-RPC options.
serverOptions := []jsonrpc.ServerOption{jsonrpc.WithServerErrors(lapi.RPCErrors)}
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
// Instantiate the full node handler.
h, err := node.FullNodeHandler(api, true, serverOptions...)
if err != nil {
return fmt.Errorf("failed to instantiate rpc handler: %s", err)
}
// Serve the RPC.
rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
node.ShutdownHandler{Component: "node", StopFunc: stop},
)
<-finishCh // fires when shutdown is complete.
// TODO: properly parse api endpoint (or make it a URL)
return nil
},
Subcommands: []*cli.Command{
daemonStopCmd,
},
}
func importKey(ctx context.Context, api lapi.FullNode, f string) error {
f, err := homedir.Expand(f)
if err != nil {
return err
}
hexdata, err := os.ReadFile(f)
if err != nil {
return err
}
data, err := hex.DecodeString(strings.TrimSpace(string(hexdata)))
if err != nil {
return err
}
var ki types.KeyInfo
if err := json.Unmarshal(data, &ki); err != nil {
return err
}
addr, err := api.WalletImport(ctx, &ki)
if err != nil {
return err
}
if err := api.WalletSetDefault(ctx, addr); err != nil {
return err
}
log.Infof("successfully imported key for %s", addr)
return nil
}
func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) {
var rd io.Reader
var l int64
if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
rrd, err := httpreader.NewResumableReader(ctx, fname)
if err != nil {
return xerrors.Errorf("fetching chain CAR failed: setting up resumable reader: %w", err)
}
rd = rrd
l = rrd.ContentLength()
} else {
fname, err = homedir.Expand(fname)
if err != nil {
return err
}
fi, err := os.Open(fname)
if err != nil {
return err
}
defer fi.Close() //nolint:errcheck
st, err := os.Stat(fname)
if err != nil {
return err
}
rd = fi
l = st.Size()
}
lr, err := r.Lock(repo.FullNode)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return xerrors.Errorf("failed to open blockstore: %w", err)
}
mds, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return err
}
j, err := fsjournal.OpenFSJournal(lr, journal.EnvDisabledEvents())
if err != nil {
return xerrors.Errorf("failed to open journal: %w", err)
}
cst := store.NewChainStore(bs, bs, mds, filcns.Weight, j)
defer cst.Close() //nolint:errcheck
log.Infof("importing chain from %s...", fname)
bufr := bufio.NewReaderSize(rd, 1<<20)
header, err := bufr.Peek(4)
if err != nil {
return xerrors.Errorf("peek header: %w", err)
}
bar := pb.New64(l)
br := bar.NewProxyReader(bufr)
bar.ShowTimeLeft = true
bar.ShowPercent = true
bar.ShowSpeed = true
bar.Units = pb.U_BYTES
var ir io.Reader = br
if string(header[1:]) == "\xB5\x2F\xFD" { // zstd
zr := zstd.NewReader(br)
defer func() {
if err := zr.Close(); err != nil {
log.Errorw("closing zstd reader", "error", err)
}
}()
ir = zr
}
bar.Start()
ts, err := cst.Import(ctx, ir)
bar.Finish()
if err != nil {
return xerrors.Errorf("importing chain failed: %w", err)
}
if err := cst.FlushValidationCache(ctx); err != nil {
return xerrors.Errorf("flushing validation cache failed: %w", err)
}
gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true)
if err != nil {
return err
}
err = cst.SetGenesis(ctx, gb.Blocks()[0])
if err != nil {
return err
}
if !snapshot {
shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gb.MinTimestamp(), nil)
if err != nil {
return xerrors.Errorf("failed to construct beacon schedule: %w", err)
}
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
if err != nil {
return err
}
log.Infof("validating imported chain...")
if err := stm.ValidateChain(ctx, ts); err != nil {
return xerrors.Errorf("chain validation failed: %w", err)
}
}
log.Infof("accepting %s as new head", ts.Cids())
if err := cst.ForceHeadSilent(ctx, ts); err != nil {
return err
}
// populate the message index if user has EnableMsgIndex enabled
//
c, err := lr.Config()
if err != nil {
return err
}
cfg, ok := c.(*config.FullNode)
if !ok {
return xerrors.Errorf("invalid config for repo, got: %T", c)
}
if cfg.Index.EnableMsgIndex {
log.Info("populating message index...")
if err := index.PopulateAfterSnapshot(ctx, path.Join(lr.Path(), "sqlite"), cst); err != nil {
return err
}
log.Info("populating message index done")
}
return nil
}
func | (cctx *cli.Context, lr repo.Repo) error {
lockedRepo, err := lr.Lock(repo.FullNode)
if err != nil {
return xerrors.Errorf("error locking repo: %w", err)
}
// Ensure that lockedRepo is closed when this function exits
defer func() {
if closeErr := lockedRepo.Close(); closeErr != nil {
log.Errorf("Error closing the lockedRepo: %v", closeErr)
}
}()
cfg, err := lockedRepo.Config()
if err != nil {
return xerrors.Errorf("error getting config: %w", err)
}
fullNodeConfig, ok := cfg.(*config.FullNode)
if !ok {
return xerrors.Errorf("wrong config type: %T", cfg)
}
if fullNodeConfig.Chainstore.EnableSplitstore {
log.Info("removing splitstore directory...")
err = deleteSplitstoreDir(lockedRepo)
if err != nil {
return xerrors.Errorf("error removing splitstore directory: %w", err)
}
}
// Get the base repo path
repoPath := lockedRepo.Path()
// Construct the path to the chain directory
chainPath := filepath.Join(repoPath, "datastore", "chain")
log.Info("removing chain directory:", chainPath)
err = os.RemoveAll(chainPath)
if err != nil {
return xerrors.Errorf("error removing chain directory: %w", err)
}
log.Info("chain and splitstore data have been removed")
return nil
}
func deleteSplitstoreDir(lr repo.LockedRepo) error {
path, err := lr.SplitstorePath()
if err != nil {
return xerrors.Errorf("error getting splitstore path: %w", err)
}
return os.RemoveAll(path)
}
| removeExistingChain | identifier_name |
daemon.go | //go:build !nodaemon
// +build !nodaemon
package main
import (
"bufio"
"context"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime/pprof"
"strings"
"github.com/DataDog/zstd"
metricsprom "github.com/ipfs/go-metrics-prometheus"
"github.com/mitchellh/go-homedir"
"github.com/multiformats/go-multiaddr"
"github.com/urfave/cli/v2"
"go.opencensus.io/plugin/runmetrics"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
"gopkg.in/cheggaaa/pb.v1"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-paramfetch"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/journal/fsjournal"
"github.com/filecoin-project/lotus/lib/httpreader"
"github.com/filecoin-project/lotus/lib/peermgr"
"github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/testing"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
)
const (
makeGenFlag = "lotus-make-genesis"
preTemplateFlag = "genesis-template"
)
var daemonStopCmd = &cli.Command{
Name: "stop",
Usage: "Stop a running lotus daemon",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetAPI(cctx)
if err != nil {
return err
}
defer closer()
err = api.Shutdown(lcli.ReqContext(cctx))
if err != nil {
return err
}
return nil
},
}
// DaemonCmd is the `go-lotus daemon` command
var DaemonCmd = &cli.Command{
Name: "daemon",
Usage: "Start a lotus daemon process",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "api",
Value: "1234",
},
&cli.StringFlag{
Name: makeGenFlag,
Value: "",
Hidden: true,
},
&cli.StringFlag{
Name: preTemplateFlag,
Hidden: true,
},
&cli.StringFlag{
Name: "import-key",
Usage: "on first run, import a default key from a given file",
Hidden: true,
},
&cli.StringFlag{
Name: "genesis",
Usage: "genesis file to use for first node run",
},
&cli.BoolFlag{
Name: "bootstrap",
Value: true,
},
&cli.StringFlag{
Name: "import-chain",
Usage: "on first run, load chain from given file or url and validate",
},
&cli.StringFlag{
Name: "import-snapshot",
Usage: "import chain state from a given chain export file or url",
},
&cli.BoolFlag{
Name: "remove-existing-chain",
Usage: "remove existing chain and splitstore data on a snapshot-import",
},
&cli.BoolFlag{
Name: "halt-after-import",
Usage: "halt the process after importing chain from file",
},
&cli.BoolFlag{
Name: "lite",
Usage: "start lotus in lite mode",
},
&cli.StringFlag{
Name: "pprof",
Usage: "specify name of file for writing cpu profile to",
},
&cli.StringFlag{
Name: "profile",
Usage: "specify type of node",
},
&cli.BoolFlag{
Name: "manage-fdlimit",
Usage: "manage open file limit",
Value: true,
},
&cli.StringFlag{
Name: "config",
Usage: "specify path of config file to use",
},
// FIXME: This is not the correct place to put this configuration
// option. Ideally it would be part of `config.toml` but at the
// moment that only applies to the node configuration and not outside
// components like the RPC server.
&cli.IntFlag{
Name: "api-max-req-size",
Usage: "maximum API request size accepted by the JSON RPC server",
},
&cli.PathFlag{
Name: "restore",
Usage: "restore from backup file",
},
&cli.PathFlag{
Name: "restore-config",
Usage: "config file to use when restoring from backup",
},
},
Action: func(cctx *cli.Context) error {
isLite := cctx.Bool("lite")
err := runmetrics.Enable(runmetrics.RunMetricOptions{
EnableCPU: true,
EnableMemory: true,
})
if err != nil {
return xerrors.Errorf("enabling runtime metrics: %w", err)
}
if cctx.Bool("manage-fdlimit") {
if _, _, err := ulimit.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
if prof := cctx.String("pprof"); prof != "" {
profile, err := os.Create(prof)
if err != nil {
return err
}
if err := pprof.StartCPUProfile(profile); err != nil {
return err
}
defer pprof.StopCPUProfile()
}
var isBootstrapper dtypes.Bootstrapper
switch profile := cctx.String("profile"); profile {
case "bootstrapper":
isBootstrapper = true
case "":
// do nothing
default:
return fmt.Errorf("unrecognized profile type: %q", profile)
}
ctx, _ := tag.New(context.Background(),
tag.Insert(metrics.Version, build.BuildVersion),
tag.Insert(metrics.Commit, build.CurrentCommit),
tag.Insert(metrics.NodeType, "chain"),
)
// Register all metric views
if err = view.Register(
metrics.ChainNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
// Set the metric to one so it is published to the exporter
stats.Record(ctx, metrics.LotusInfo.M(1))
{
dir, err := homedir.Expand(cctx.String("repo"))
if err != nil {
log.Warnw("could not expand repo location", "error", err)
} else {
log.Infof("lotus repo: %s", dir)
}
}
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("opening fs repo: %w", err)
}
if cctx.String("config") != "" {
r.SetConfigPath(cctx.String("config"))
}
err = r.Init(repo.FullNode)
if err != nil && err != repo.ErrRepoExists {
return xerrors.Errorf("repo init error: %w", err)
}
freshRepo := err != repo.ErrRepoExists
if !isLite {
if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
}
var genBytes []byte
if cctx.String("genesis") != "" {
genBytes, err = os.ReadFile(cctx.String("genesis"))
if err != nil {
return xerrors.Errorf("reading genesis: %w", err)
}
} else {
genBytes = build.MaybeGenesis()
}
if cctx.IsSet("restore") {
if !freshRepo {
return xerrors.Errorf("restoring from backup is only possible with a fresh repo!")
}
if err := restore(cctx, r); err != nil {
return xerrors.Errorf("restoring from backup: %w", err)
}
}
if cctx.Bool("remove-existing-chain") {
lr, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("error opening fs repo: %w", err)
}
exists, err := lr.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
err = removeExistingChain(cctx, lr)
if err != nil {
return err
}
}
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
if chainfile != "" && snapshot != "" {
return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'")
}
var issnapshot bool
if chainfile == "" {
chainfile = snapshot
issnapshot = true
}
if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
return err
}
if cctx.Bool("halt-after-import") {
fmt.Println("Chain import complete, halting as requested...")
return nil
}
}
genesis := node.Options()
if len(genBytes) > 0 {
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genBytes))
}
if cctx.String(makeGenFlag) != "" {
if cctx.String(preTemplateFlag) == "" {
return xerrors.Errorf("must also pass file with genesis template to `--%s`", preTemplateFlag)
}
genesis = node.Override(new(modules.Genesis), testing.MakeGenesis(cctx.String(makeGenFlag), cctx.String(preTemplateFlag)))
}
shutdownChan := make(chan struct{})
// If the daemon is started in "lite mode", provide a Gateway
// for RPC calls
liteModeDeps := node.Options()
if isLite {
gapi, closer, err := lcli.GetGatewayAPI(cctx)
if err != nil {
return err
}
defer closer()
liteModeDeps = node.Override(new(lapi.Gateway), gapi)
}
// some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore
// use ipfs/go-metrics-interface. This injects a Prometheus exporter
// for those. Metrics are exported to the default registry.
if err := metricsprom.Inject(); err != nil {
log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err)
}
var api lapi.FullNode
stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)),
node.Base(),
node.Repo(r),
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
genesis,
liteModeDeps,
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error {
apima, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" +
cctx.String("api"))
if err != nil {
return err
}
return lr.SetAPIEndpoint(apima)
})),
node.ApplyIf(func(s *node.Settings) bool { return !cctx.Bool("bootstrap") },
node.Unset(node.RunPeerMgrKey),
node.Unset(new(*peermgr.PeerMgr)),
),
)
if err != nil {
return xerrors.Errorf("initializing node: %w", err)
}
if cctx.String("import-key") != "" {
if err := importKey(ctx, api, cctx.String("import-key")); err != nil {
log.Errorf("importing key failed: %+v", err)
}
}
endpoint, err := r.APIEndpoint()
if err != nil {
return xerrors.Errorf("getting api endpoint: %w", err)
}
//
// Instantiate JSON-RPC endpoint.
// ----
// Populate JSON-RPC options.
serverOptions := []jsonrpc.ServerOption{jsonrpc.WithServerErrors(lapi.RPCErrors)}
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
// Instantiate the full node handler.
h, err := node.FullNodeHandler(api, true, serverOptions...)
if err != nil {
return fmt.Errorf("failed to instantiate rpc handler: %s", err)
}
// Serve the RPC.
rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
node.ShutdownHandler{Component: "node", StopFunc: stop},
)
<-finishCh // fires when shutdown is complete.
// TODO: properly parse api endpoint (or make it a URL)
return nil
},
Subcommands: []*cli.Command{
daemonStopCmd,
},
}
func importKey(ctx context.Context, api lapi.FullNode, f string) error {
f, err := homedir.Expand(f)
if err != nil {
return err
}
hexdata, err := os.ReadFile(f)
if err != nil {
return err
}
data, err := hex.DecodeString(strings.TrimSpace(string(hexdata)))
if err != nil {
return err
}
var ki types.KeyInfo
if err := json.Unmarshal(data, &ki); err != nil {
return err
}
addr, err := api.WalletImport(ctx, &ki)
if err != nil {
return err
}
if err := api.WalletSetDefault(ctx, addr); err != nil {
return err
}
log.Infof("successfully imported key for %s", addr)
return nil
}
func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) {
var rd io.Reader
var l int64
if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
rrd, err := httpreader.NewResumableReader(ctx, fname)
if err != nil {
return xerrors.Errorf("fetching chain CAR failed: setting up resumable reader: %w", err)
}
rd = rrd
l = rrd.ContentLength()
} else {
fname, err = homedir.Expand(fname)
if err != nil {
return err
}
fi, err := os.Open(fname)
if err != nil {
return err
}
defer fi.Close() //nolint:errcheck
st, err := os.Stat(fname)
if err != nil {
return err
}
rd = fi
l = st.Size()
}
lr, err := r.Lock(repo.FullNode)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return xerrors.Errorf("failed to open blockstore: %w", err)
}
mds, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return err
}
j, err := fsjournal.OpenFSJournal(lr, journal.EnvDisabledEvents())
if err != nil {
return xerrors.Errorf("failed to open journal: %w", err)
}
cst := store.NewChainStore(bs, bs, mds, filcns.Weight, j)
defer cst.Close() //nolint:errcheck
log.Infof("importing chain from %s...", fname)
bufr := bufio.NewReaderSize(rd, 1<<20)
header, err := bufr.Peek(4)
if err != nil {
return xerrors.Errorf("peek header: %w", err)
}
bar := pb.New64(l)
br := bar.NewProxyReader(bufr)
bar.ShowTimeLeft = true
bar.ShowPercent = true
bar.ShowSpeed = true
bar.Units = pb.U_BYTES
var ir io.Reader = br
if string(header[1:]) == "\xB5\x2F\xFD" { // zstd
zr := zstd.NewReader(br)
defer func() {
if err := zr.Close(); err != nil |
}()
ir = zr
}
bar.Start()
ts, err := cst.Import(ctx, ir)
bar.Finish()
if err != nil {
return xerrors.Errorf("importing chain failed: %w", err)
}
if err := cst.FlushValidationCache(ctx); err != nil {
return xerrors.Errorf("flushing validation cache failed: %w", err)
}
gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true)
if err != nil {
return err
}
err = cst.SetGenesis(ctx, gb.Blocks()[0])
if err != nil {
return err
}
if !snapshot {
shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gb.MinTimestamp(), nil)
if err != nil {
return xerrors.Errorf("failed to construct beacon schedule: %w", err)
}
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
if err != nil {
return err
}
log.Infof("validating imported chain...")
if err := stm.ValidateChain(ctx, ts); err != nil {
return xerrors.Errorf("chain validation failed: %w", err)
}
}
log.Infof("accepting %s as new head", ts.Cids())
if err := cst.ForceHeadSilent(ctx, ts); err != nil {
return err
}
// populate the message index if user has EnableMsgIndex enabled
//
c, err := lr.Config()
if err != nil {
return err
}
cfg, ok := c.(*config.FullNode)
if !ok {
return xerrors.Errorf("invalid config for repo, got: %T", c)
}
if cfg.Index.EnableMsgIndex {
log.Info("populating message index...")
if err := index.PopulateAfterSnapshot(ctx, path.Join(lr.Path(), "sqlite"), cst); err != nil {
return err
}
log.Info("populating message index done")
}
return nil
}
func removeExistingChain(cctx *cli.Context, lr repo.Repo) error {
lockedRepo, err := lr.Lock(repo.FullNode)
if err != nil {
return xerrors.Errorf("error locking repo: %w", err)
}
// Ensure that lockedRepo is closed when this function exits
defer func() {
if closeErr := lockedRepo.Close(); closeErr != nil {
log.Errorf("Error closing the lockedRepo: %v", closeErr)
}
}()
cfg, err := lockedRepo.Config()
if err != nil {
return xerrors.Errorf("error getting config: %w", err)
}
fullNodeConfig, ok := cfg.(*config.FullNode)
if !ok {
return xerrors.Errorf("wrong config type: %T", cfg)
}
if fullNodeConfig.Chainstore.EnableSplitstore {
log.Info("removing splitstore directory...")
err = deleteSplitstoreDir(lockedRepo)
if err != nil {
return xerrors.Errorf("error removing splitstore directory: %w", err)
}
}
// Get the base repo path
repoPath := lockedRepo.Path()
// Construct the path to the chain directory
chainPath := filepath.Join(repoPath, "datastore", "chain")
log.Info("removing chain directory:", chainPath)
err = os.RemoveAll(chainPath)
if err != nil {
return xerrors.Errorf("error removing chain directory: %w", err)
}
log.Info("chain and splitstore data have been removed")
return nil
}
func deleteSplitstoreDir(lr repo.LockedRepo) error {
path, err := lr.SplitstorePath()
if err != nil {
return xerrors.Errorf("error getting splitstore path: %w", err)
}
return os.RemoveAll(path)
}
| {
log.Errorw("closing zstd reader", "error", err)
} | conditional_block |
daemon.go | //go:build !nodaemon
// +build !nodaemon
package main
import (
"bufio"
"context"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime/pprof"
"strings"
"github.com/DataDog/zstd"
metricsprom "github.com/ipfs/go-metrics-prometheus"
"github.com/mitchellh/go-homedir"
"github.com/multiformats/go-multiaddr"
"github.com/urfave/cli/v2"
"go.opencensus.io/plugin/runmetrics"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
"gopkg.in/cheggaaa/pb.v1"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-paramfetch"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/journal/fsjournal"
"github.com/filecoin-project/lotus/lib/httpreader"
"github.com/filecoin-project/lotus/lib/peermgr"
"github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/testing"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
)
const (
makeGenFlag = "lotus-make-genesis"
preTemplateFlag = "genesis-template"
)
var daemonStopCmd = &cli.Command{
Name: "stop",
Usage: "Stop a running lotus daemon",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetAPI(cctx)
if err != nil {
return err
}
defer closer()
err = api.Shutdown(lcli.ReqContext(cctx))
if err != nil {
return err
}
return nil
},
}
// DaemonCmd is the `go-lotus daemon` command
var DaemonCmd = &cli.Command{
Name: "daemon",
Usage: "Start a lotus daemon process",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "api",
Value: "1234",
},
&cli.StringFlag{
Name: makeGenFlag,
Value: "",
Hidden: true,
},
&cli.StringFlag{
Name: preTemplateFlag,
Hidden: true,
},
&cli.StringFlag{
Name: "import-key",
Usage: "on first run, import a default key from a given file",
Hidden: true,
},
&cli.StringFlag{
Name: "genesis",
Usage: "genesis file to use for first node run",
},
&cli.BoolFlag{
Name: "bootstrap",
Value: true,
},
&cli.StringFlag{
Name: "import-chain",
Usage: "on first run, load chain from given file or url and validate",
},
&cli.StringFlag{
Name: "import-snapshot",
Usage: "import chain state from a given chain export file or url",
},
&cli.BoolFlag{
Name: "remove-existing-chain",
Usage: "remove existing chain and splitstore data on a snapshot-import",
},
&cli.BoolFlag{
Name: "halt-after-import",
Usage: "halt the process after importing chain from file",
},
&cli.BoolFlag{
Name: "lite",
Usage: "start lotus in lite mode",
},
&cli.StringFlag{
Name: "pprof",
Usage: "specify name of file for writing cpu profile to",
},
&cli.StringFlag{
Name: "profile",
Usage: "specify type of node",
},
&cli.BoolFlag{
Name: "manage-fdlimit",
Usage: "manage open file limit",
Value: true,
},
&cli.StringFlag{
Name: "config",
Usage: "specify path of config file to use",
},
// FIXME: This is not the correct place to put this configuration
// option. Ideally it would be part of `config.toml` but at the
// moment that only applies to the node configuration and not outside
// components like the RPC server.
&cli.IntFlag{
Name: "api-max-req-size",
Usage: "maximum API request size accepted by the JSON RPC server",
},
&cli.PathFlag{
Name: "restore",
Usage: "restore from backup file",
},
&cli.PathFlag{
Name: "restore-config",
Usage: "config file to use when restoring from backup",
},
},
Action: func(cctx *cli.Context) error {
isLite := cctx.Bool("lite")
err := runmetrics.Enable(runmetrics.RunMetricOptions{
EnableCPU: true,
EnableMemory: true,
})
if err != nil {
return xerrors.Errorf("enabling runtime metrics: %w", err)
}
if cctx.Bool("manage-fdlimit") {
if _, _, err := ulimit.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
if prof := cctx.String("pprof"); prof != "" {
profile, err := os.Create(prof)
if err != nil {
return err
}
if err := pprof.StartCPUProfile(profile); err != nil {
return err
}
defer pprof.StopCPUProfile()
}
var isBootstrapper dtypes.Bootstrapper
switch profile := cctx.String("profile"); profile {
case "bootstrapper":
isBootstrapper = true
case "":
// do nothing
default:
return fmt.Errorf("unrecognized profile type: %q", profile)
}
ctx, _ := tag.New(context.Background(),
tag.Insert(metrics.Version, build.BuildVersion),
tag.Insert(metrics.Commit, build.CurrentCommit),
tag.Insert(metrics.NodeType, "chain"),
)
// Register all metric views
if err = view.Register(
metrics.ChainNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
// Set the metric to one so it is published to the exporter
stats.Record(ctx, metrics.LotusInfo.M(1)) |
{
dir, err := homedir.Expand(cctx.String("repo"))
if err != nil {
log.Warnw("could not expand repo location", "error", err)
} else {
log.Infof("lotus repo: %s", dir)
}
}
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("opening fs repo: %w", err)
}
if cctx.String("config") != "" {
r.SetConfigPath(cctx.String("config"))
}
err = r.Init(repo.FullNode)
if err != nil && err != repo.ErrRepoExists {
return xerrors.Errorf("repo init error: %w", err)
}
freshRepo := err != repo.ErrRepoExists
if !isLite {
if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
}
var genBytes []byte
if cctx.String("genesis") != "" {
genBytes, err = os.ReadFile(cctx.String("genesis"))
if err != nil {
return xerrors.Errorf("reading genesis: %w", err)
}
} else {
genBytes = build.MaybeGenesis()
}
if cctx.IsSet("restore") {
if !freshRepo {
return xerrors.Errorf("restoring from backup is only possible with a fresh repo!")
}
if err := restore(cctx, r); err != nil {
return xerrors.Errorf("restoring from backup: %w", err)
}
}
if cctx.Bool("remove-existing-chain") {
lr, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("error opening fs repo: %w", err)
}
exists, err := lr.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
err = removeExistingChain(cctx, lr)
if err != nil {
return err
}
}
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
if chainfile != "" && snapshot != "" {
return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'")
}
var issnapshot bool
if chainfile == "" {
chainfile = snapshot
issnapshot = true
}
if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
return err
}
if cctx.Bool("halt-after-import") {
fmt.Println("Chain import complete, halting as requested...")
return nil
}
}
genesis := node.Options()
if len(genBytes) > 0 {
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genBytes))
}
if cctx.String(makeGenFlag) != "" {
if cctx.String(preTemplateFlag) == "" {
return xerrors.Errorf("must also pass file with genesis template to `--%s`", preTemplateFlag)
}
genesis = node.Override(new(modules.Genesis), testing.MakeGenesis(cctx.String(makeGenFlag), cctx.String(preTemplateFlag)))
}
shutdownChan := make(chan struct{})
// If the daemon is started in "lite mode", provide a Gateway
// for RPC calls
liteModeDeps := node.Options()
if isLite {
gapi, closer, err := lcli.GetGatewayAPI(cctx)
if err != nil {
return err
}
defer closer()
liteModeDeps = node.Override(new(lapi.Gateway), gapi)
}
// some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore
// use ipfs/go-metrics-interface. This injects a Prometheus exporter
// for those. Metrics are exported to the default registry.
if err := metricsprom.Inject(); err != nil {
log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err)
}
var api lapi.FullNode
stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)),
node.Base(),
node.Repo(r),
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
genesis,
liteModeDeps,
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error {
apima, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" +
cctx.String("api"))
if err != nil {
return err
}
return lr.SetAPIEndpoint(apima)
})),
node.ApplyIf(func(s *node.Settings) bool { return !cctx.Bool("bootstrap") },
node.Unset(node.RunPeerMgrKey),
node.Unset(new(*peermgr.PeerMgr)),
),
)
if err != nil {
return xerrors.Errorf("initializing node: %w", err)
}
if cctx.String("import-key") != "" {
if err := importKey(ctx, api, cctx.String("import-key")); err != nil {
log.Errorf("importing key failed: %+v", err)
}
}
endpoint, err := r.APIEndpoint()
if err != nil {
return xerrors.Errorf("getting api endpoint: %w", err)
}
//
// Instantiate JSON-RPC endpoint.
// ----
// Populate JSON-RPC options.
serverOptions := []jsonrpc.ServerOption{jsonrpc.WithServerErrors(lapi.RPCErrors)}
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
// Instantiate the full node handler.
h, err := node.FullNodeHandler(api, true, serverOptions...)
if err != nil {
return fmt.Errorf("failed to instantiate rpc handler: %s", err)
}
// Serve the RPC.
rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
node.ShutdownHandler{Component: "node", StopFunc: stop},
)
<-finishCh // fires when shutdown is complete.
// TODO: properly parse api endpoint (or make it a URL)
return nil
},
Subcommands: []*cli.Command{
daemonStopCmd,
},
}
func importKey(ctx context.Context, api lapi.FullNode, f string) error {
f, err := homedir.Expand(f)
if err != nil {
return err
}
hexdata, err := os.ReadFile(f)
if err != nil {
return err
}
data, err := hex.DecodeString(strings.TrimSpace(string(hexdata)))
if err != nil {
return err
}
var ki types.KeyInfo
if err := json.Unmarshal(data, &ki); err != nil {
return err
}
addr, err := api.WalletImport(ctx, &ki)
if err != nil {
return err
}
if err := api.WalletSetDefault(ctx, addr); err != nil {
return err
}
log.Infof("successfully imported key for %s", addr)
return nil
}
func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) {
var rd io.Reader
var l int64
if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
rrd, err := httpreader.NewResumableReader(ctx, fname)
if err != nil {
return xerrors.Errorf("fetching chain CAR failed: setting up resumable reader: %w", err)
}
rd = rrd
l = rrd.ContentLength()
} else {
fname, err = homedir.Expand(fname)
if err != nil {
return err
}
fi, err := os.Open(fname)
if err != nil {
return err
}
defer fi.Close() //nolint:errcheck
st, err := os.Stat(fname)
if err != nil {
return err
}
rd = fi
l = st.Size()
}
lr, err := r.Lock(repo.FullNode)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return xerrors.Errorf("failed to open blockstore: %w", err)
}
mds, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return err
}
j, err := fsjournal.OpenFSJournal(lr, journal.EnvDisabledEvents())
if err != nil {
return xerrors.Errorf("failed to open journal: %w", err)
}
cst := store.NewChainStore(bs, bs, mds, filcns.Weight, j)
defer cst.Close() //nolint:errcheck
log.Infof("importing chain from %s...", fname)
bufr := bufio.NewReaderSize(rd, 1<<20)
header, err := bufr.Peek(4)
if err != nil {
return xerrors.Errorf("peek header: %w", err)
}
bar := pb.New64(l)
br := bar.NewProxyReader(bufr)
bar.ShowTimeLeft = true
bar.ShowPercent = true
bar.ShowSpeed = true
bar.Units = pb.U_BYTES
var ir io.Reader = br
if string(header[1:]) == "\xB5\x2F\xFD" { // zstd
zr := zstd.NewReader(br)
defer func() {
if err := zr.Close(); err != nil {
log.Errorw("closing zstd reader", "error", err)
}
}()
ir = zr
}
bar.Start()
ts, err := cst.Import(ctx, ir)
bar.Finish()
if err != nil {
return xerrors.Errorf("importing chain failed: %w", err)
}
if err := cst.FlushValidationCache(ctx); err != nil {
return xerrors.Errorf("flushing validation cache failed: %w", err)
}
gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true)
if err != nil {
return err
}
err = cst.SetGenesis(ctx, gb.Blocks()[0])
if err != nil {
return err
}
if !snapshot {
shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gb.MinTimestamp(), nil)
if err != nil {
return xerrors.Errorf("failed to construct beacon schedule: %w", err)
}
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
if err != nil {
return err
}
log.Infof("validating imported chain...")
if err := stm.ValidateChain(ctx, ts); err != nil {
return xerrors.Errorf("chain validation failed: %w", err)
}
}
log.Infof("accepting %s as new head", ts.Cids())
if err := cst.ForceHeadSilent(ctx, ts); err != nil {
return err
}
// populate the message index if user has EnableMsgIndex enabled
//
c, err := lr.Config()
if err != nil {
return err
}
cfg, ok := c.(*config.FullNode)
if !ok {
return xerrors.Errorf("invalid config for repo, got: %T", c)
}
if cfg.Index.EnableMsgIndex {
log.Info("populating message index...")
if err := index.PopulateAfterSnapshot(ctx, path.Join(lr.Path(), "sqlite"), cst); err != nil {
return err
}
log.Info("populating message index done")
}
return nil
}
func removeExistingChain(cctx *cli.Context, lr repo.Repo) error {
lockedRepo, err := lr.Lock(repo.FullNode)
if err != nil {
return xerrors.Errorf("error locking repo: %w", err)
}
// Ensure that lockedRepo is closed when this function exits
defer func() {
if closeErr := lockedRepo.Close(); closeErr != nil {
log.Errorf("Error closing the lockedRepo: %v", closeErr)
}
}()
cfg, err := lockedRepo.Config()
if err != nil {
return xerrors.Errorf("error getting config: %w", err)
}
fullNodeConfig, ok := cfg.(*config.FullNode)
if !ok {
return xerrors.Errorf("wrong config type: %T", cfg)
}
if fullNodeConfig.Chainstore.EnableSplitstore {
log.Info("removing splitstore directory...")
err = deleteSplitstoreDir(lockedRepo)
if err != nil {
return xerrors.Errorf("error removing splitstore directory: %w", err)
}
}
// Get the base repo path
repoPath := lockedRepo.Path()
// Construct the path to the chain directory
chainPath := filepath.Join(repoPath, "datastore", "chain")
log.Info("removing chain directory:", chainPath)
err = os.RemoveAll(chainPath)
if err != nil {
return xerrors.Errorf("error removing chain directory: %w", err)
}
log.Info("chain and splitstore data have been removed")
return nil
}
func deleteSplitstoreDir(lr repo.LockedRepo) error {
path, err := lr.SplitstorePath()
if err != nil {
return xerrors.Errorf("error getting splitstore path: %w", err)
}
return os.RemoveAll(path)
} | random_line_split | |
daemon.go | //go:build !nodaemon
// +build !nodaemon
package main
import (
"bufio"
"context"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime/pprof"
"strings"
"github.com/DataDog/zstd"
metricsprom "github.com/ipfs/go-metrics-prometheus"
"github.com/mitchellh/go-homedir"
"github.com/multiformats/go-multiaddr"
"github.com/urfave/cli/v2"
"go.opencensus.io/plugin/runmetrics"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
"gopkg.in/cheggaaa/pb.v1"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-paramfetch"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/journal/fsjournal"
"github.com/filecoin-project/lotus/lib/httpreader"
"github.com/filecoin-project/lotus/lib/peermgr"
"github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/testing"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
)
const (
makeGenFlag = "lotus-make-genesis"
preTemplateFlag = "genesis-template"
)
var daemonStopCmd = &cli.Command{
Name: "stop",
Usage: "Stop a running lotus daemon",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetAPI(cctx)
if err != nil {
return err
}
defer closer()
err = api.Shutdown(lcli.ReqContext(cctx))
if err != nil {
return err
}
return nil
},
}
// DaemonCmd is the `go-lotus daemon` command
var DaemonCmd = &cli.Command{
Name: "daemon",
Usage: "Start a lotus daemon process",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "api",
Value: "1234",
},
&cli.StringFlag{
Name: makeGenFlag,
Value: "",
Hidden: true,
},
&cli.StringFlag{
Name: preTemplateFlag,
Hidden: true,
},
&cli.StringFlag{
Name: "import-key",
Usage: "on first run, import a default key from a given file",
Hidden: true,
},
&cli.StringFlag{
Name: "genesis",
Usage: "genesis file to use for first node run",
},
&cli.BoolFlag{
Name: "bootstrap",
Value: true,
},
&cli.StringFlag{
Name: "import-chain",
Usage: "on first run, load chain from given file or url and validate",
},
&cli.StringFlag{
Name: "import-snapshot",
Usage: "import chain state from a given chain export file or url",
},
&cli.BoolFlag{
Name: "remove-existing-chain",
Usage: "remove existing chain and splitstore data on a snapshot-import",
},
&cli.BoolFlag{
Name: "halt-after-import",
Usage: "halt the process after importing chain from file",
},
&cli.BoolFlag{
Name: "lite",
Usage: "start lotus in lite mode",
},
&cli.StringFlag{
Name: "pprof",
Usage: "specify name of file for writing cpu profile to",
},
&cli.StringFlag{
Name: "profile",
Usage: "specify type of node",
},
&cli.BoolFlag{
Name: "manage-fdlimit",
Usage: "manage open file limit",
Value: true,
},
&cli.StringFlag{
Name: "config",
Usage: "specify path of config file to use",
},
// FIXME: This is not the correct place to put this configuration
// option. Ideally it would be part of `config.toml` but at the
// moment that only applies to the node configuration and not outside
// components like the RPC server.
&cli.IntFlag{
Name: "api-max-req-size",
Usage: "maximum API request size accepted by the JSON RPC server",
},
&cli.PathFlag{
Name: "restore",
Usage: "restore from backup file",
},
&cli.PathFlag{
Name: "restore-config",
Usage: "config file to use when restoring from backup",
},
},
Action: func(cctx *cli.Context) error {
isLite := cctx.Bool("lite")
err := runmetrics.Enable(runmetrics.RunMetricOptions{
EnableCPU: true,
EnableMemory: true,
})
if err != nil {
return xerrors.Errorf("enabling runtime metrics: %w", err)
}
if cctx.Bool("manage-fdlimit") {
if _, _, err := ulimit.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
if prof := cctx.String("pprof"); prof != "" {
profile, err := os.Create(prof)
if err != nil {
return err
}
if err := pprof.StartCPUProfile(profile); err != nil {
return err
}
defer pprof.StopCPUProfile()
}
var isBootstrapper dtypes.Bootstrapper
switch profile := cctx.String("profile"); profile {
case "bootstrapper":
isBootstrapper = true
case "":
// do nothing
default:
return fmt.Errorf("unrecognized profile type: %q", profile)
}
ctx, _ := tag.New(context.Background(),
tag.Insert(metrics.Version, build.BuildVersion),
tag.Insert(metrics.Commit, build.CurrentCommit),
tag.Insert(metrics.NodeType, "chain"),
)
// Register all metric views
if err = view.Register(
metrics.ChainNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
// Set the metric to one so it is published to the exporter
stats.Record(ctx, metrics.LotusInfo.M(1))
{
dir, err := homedir.Expand(cctx.String("repo"))
if err != nil {
log.Warnw("could not expand repo location", "error", err)
} else {
log.Infof("lotus repo: %s", dir)
}
}
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("opening fs repo: %w", err)
}
if cctx.String("config") != "" {
r.SetConfigPath(cctx.String("config"))
}
err = r.Init(repo.FullNode)
if err != nil && err != repo.ErrRepoExists {
return xerrors.Errorf("repo init error: %w", err)
}
freshRepo := err != repo.ErrRepoExists
if !isLite {
if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
}
var genBytes []byte
if cctx.String("genesis") != "" {
genBytes, err = os.ReadFile(cctx.String("genesis"))
if err != nil {
return xerrors.Errorf("reading genesis: %w", err)
}
} else {
genBytes = build.MaybeGenesis()
}
if cctx.IsSet("restore") {
if !freshRepo {
return xerrors.Errorf("restoring from backup is only possible with a fresh repo!")
}
if err := restore(cctx, r); err != nil {
return xerrors.Errorf("restoring from backup: %w", err)
}
}
if cctx.Bool("remove-existing-chain") {
lr, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("error opening fs repo: %w", err)
}
exists, err := lr.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
err = removeExistingChain(cctx, lr)
if err != nil {
return err
}
}
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
if chainfile != "" && snapshot != "" {
return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'")
}
var issnapshot bool
if chainfile == "" {
chainfile = snapshot
issnapshot = true
}
if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
return err
}
if cctx.Bool("halt-after-import") {
fmt.Println("Chain import complete, halting as requested...")
return nil
}
}
genesis := node.Options()
if len(genBytes) > 0 {
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genBytes))
}
if cctx.String(makeGenFlag) != "" {
if cctx.String(preTemplateFlag) == "" {
return xerrors.Errorf("must also pass file with genesis template to `--%s`", preTemplateFlag)
}
genesis = node.Override(new(modules.Genesis), testing.MakeGenesis(cctx.String(makeGenFlag), cctx.String(preTemplateFlag)))
}
shutdownChan := make(chan struct{})
// If the daemon is started in "lite mode", provide a Gateway
// for RPC calls
liteModeDeps := node.Options()
if isLite {
gapi, closer, err := lcli.GetGatewayAPI(cctx)
if err != nil {
return err
}
defer closer()
liteModeDeps = node.Override(new(lapi.Gateway), gapi)
}
// some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore
// use ipfs/go-metrics-interface. This injects a Prometheus exporter
// for those. Metrics are exported to the default registry.
if err := metricsprom.Inject(); err != nil {
log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err)
}
var api lapi.FullNode
stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)),
node.Base(),
node.Repo(r),
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
genesis,
liteModeDeps,
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error {
apima, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" +
cctx.String("api"))
if err != nil {
return err
}
return lr.SetAPIEndpoint(apima)
})),
node.ApplyIf(func(s *node.Settings) bool { return !cctx.Bool("bootstrap") },
node.Unset(node.RunPeerMgrKey),
node.Unset(new(*peermgr.PeerMgr)),
),
)
if err != nil {
return xerrors.Errorf("initializing node: %w", err)
}
if cctx.String("import-key") != "" {
if err := importKey(ctx, api, cctx.String("import-key")); err != nil {
log.Errorf("importing key failed: %+v", err)
}
}
endpoint, err := r.APIEndpoint()
if err != nil {
return xerrors.Errorf("getting api endpoint: %w", err)
}
//
// Instantiate JSON-RPC endpoint.
// ----
// Populate JSON-RPC options.
serverOptions := []jsonrpc.ServerOption{jsonrpc.WithServerErrors(lapi.RPCErrors)}
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
// Instantiate the full node handler.
h, err := node.FullNodeHandler(api, true, serverOptions...)
if err != nil {
return fmt.Errorf("failed to instantiate rpc handler: %s", err)
}
// Serve the RPC.
rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
node.ShutdownHandler{Component: "node", StopFunc: stop},
)
<-finishCh // fires when shutdown is complete.
// TODO: properly parse api endpoint (or make it a URL)
return nil
},
Subcommands: []*cli.Command{
daemonStopCmd,
},
}
func importKey(ctx context.Context, api lapi.FullNode, f string) error |
func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) {
var rd io.Reader
var l int64
if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
rrd, err := httpreader.NewResumableReader(ctx, fname)
if err != nil {
return xerrors.Errorf("fetching chain CAR failed: setting up resumable reader: %w", err)
}
rd = rrd
l = rrd.ContentLength()
} else {
fname, err = homedir.Expand(fname)
if err != nil {
return err
}
fi, err := os.Open(fname)
if err != nil {
return err
}
defer fi.Close() //nolint:errcheck
st, err := os.Stat(fname)
if err != nil {
return err
}
rd = fi
l = st.Size()
}
lr, err := r.Lock(repo.FullNode)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return xerrors.Errorf("failed to open blockstore: %w", err)
}
mds, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return err
}
j, err := fsjournal.OpenFSJournal(lr, journal.EnvDisabledEvents())
if err != nil {
return xerrors.Errorf("failed to open journal: %w", err)
}
cst := store.NewChainStore(bs, bs, mds, filcns.Weight, j)
defer cst.Close() //nolint:errcheck
log.Infof("importing chain from %s...", fname)
bufr := bufio.NewReaderSize(rd, 1<<20)
header, err := bufr.Peek(4)
if err != nil {
return xerrors.Errorf("peek header: %w", err)
}
bar := pb.New64(l)
br := bar.NewProxyReader(bufr)
bar.ShowTimeLeft = true
bar.ShowPercent = true
bar.ShowSpeed = true
bar.Units = pb.U_BYTES
var ir io.Reader = br
if string(header[1:]) == "\xB5\x2F\xFD" { // zstd
zr := zstd.NewReader(br)
defer func() {
if err := zr.Close(); err != nil {
log.Errorw("closing zstd reader", "error", err)
}
}()
ir = zr
}
bar.Start()
ts, err := cst.Import(ctx, ir)
bar.Finish()
if err != nil {
return xerrors.Errorf("importing chain failed: %w", err)
}
if err := cst.FlushValidationCache(ctx); err != nil {
return xerrors.Errorf("flushing validation cache failed: %w", err)
}
gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true)
if err != nil {
return err
}
err = cst.SetGenesis(ctx, gb.Blocks()[0])
if err != nil {
return err
}
if !snapshot {
shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gb.MinTimestamp(), nil)
if err != nil {
return xerrors.Errorf("failed to construct beacon schedule: %w", err)
}
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
if err != nil {
return err
}
log.Infof("validating imported chain...")
if err := stm.ValidateChain(ctx, ts); err != nil {
return xerrors.Errorf("chain validation failed: %w", err)
}
}
log.Infof("accepting %s as new head", ts.Cids())
if err := cst.ForceHeadSilent(ctx, ts); err != nil {
return err
}
// populate the message index if user has EnableMsgIndex enabled
//
c, err := lr.Config()
if err != nil {
return err
}
cfg, ok := c.(*config.FullNode)
if !ok {
return xerrors.Errorf("invalid config for repo, got: %T", c)
}
if cfg.Index.EnableMsgIndex {
log.Info("populating message index...")
if err := index.PopulateAfterSnapshot(ctx, path.Join(lr.Path(), "sqlite"), cst); err != nil {
return err
}
log.Info("populating message index done")
}
return nil
}
func removeExistingChain(cctx *cli.Context, lr repo.Repo) error {
lockedRepo, err := lr.Lock(repo.FullNode)
if err != nil {
return xerrors.Errorf("error locking repo: %w", err)
}
// Ensure that lockedRepo is closed when this function exits
defer func() {
if closeErr := lockedRepo.Close(); closeErr != nil {
log.Errorf("Error closing the lockedRepo: %v", closeErr)
}
}()
cfg, err := lockedRepo.Config()
if err != nil {
return xerrors.Errorf("error getting config: %w", err)
}
fullNodeConfig, ok := cfg.(*config.FullNode)
if !ok {
return xerrors.Errorf("wrong config type: %T", cfg)
}
if fullNodeConfig.Chainstore.EnableSplitstore {
log.Info("removing splitstore directory...")
err = deleteSplitstoreDir(lockedRepo)
if err != nil {
return xerrors.Errorf("error removing splitstore directory: %w", err)
}
}
// Get the base repo path
repoPath := lockedRepo.Path()
// Construct the path to the chain directory
chainPath := filepath.Join(repoPath, "datastore", "chain")
log.Info("removing chain directory:", chainPath)
err = os.RemoveAll(chainPath)
if err != nil {
return xerrors.Errorf("error removing chain directory: %w", err)
}
log.Info("chain and splitstore data have been removed")
return nil
}
func deleteSplitstoreDir(lr repo.LockedRepo) error {
path, err := lr.SplitstorePath()
if err != nil {
return xerrors.Errorf("error getting splitstore path: %w", err)
}
return os.RemoveAll(path)
}
| {
f, err := homedir.Expand(f)
if err != nil {
return err
}
hexdata, err := os.ReadFile(f)
if err != nil {
return err
}
data, err := hex.DecodeString(strings.TrimSpace(string(hexdata)))
if err != nil {
return err
}
var ki types.KeyInfo
if err := json.Unmarshal(data, &ki); err != nil {
return err
}
addr, err := api.WalletImport(ctx, &ki)
if err != nil {
return err
}
if err := api.WalletSetDefault(ctx, addr); err != nil {
return err
}
log.Infof("successfully imported key for %s", addr)
return nil
} | identifier_body |
store.rs | /*! Bit management
The `BitStore` trait defines constants and associated functions suitable for
managing the bit patterns of a fundamental, and is the constraint for the
storage type of the data structures of the rest of the crate.
The other types in this module provide stronger rules about how indices map to
concrete bits in fundamental elements. They are implementation details, and are
not exported in the prelude.
!*/
| use core::{
cmp::Eq,
fmt::{
Binary,
Debug,
Display,
LowerHex,
UpperHex,
},
mem::size_of,
ops::{
BitAnd,
BitAndAssign,
BitOrAssign,
Not,
Shl,
ShlAssign,
Shr,
ShrAssign,
},
};
#[cfg(feature = "atomic")]
use core::sync::atomic::{
self,
Ordering::Relaxed,
};
#[cfg(not(feature = "atomic"))]
use core::cell::Cell;
/** Generalizes over the fundamental types for use in `bitvec` data structures.
This trait must only be implemented on unsigned integer primitives with full
alignment. It cannot be implemented on `u128` on any architecture, or on `u64`
on 32-bit systems.
The `Sealed` supertrait ensures that this can only be implemented locally, and
will never be implemented by downstream crates on new types.
**/
pub trait BitStore:
// Forbid external implementation
Sealed
+ Binary
// Element-wise binary manipulation
+ BitAnd<Self, Output=Self>
+ BitAndAssign<Self>
+ BitOrAssign<Self>
// Permit indexing into a generic array
+ Copy
+ Debug
+ Default
+ Display
// Permit testing a value against 1 in `get()`.
+ Eq
// Rust treats numeric literals in code as vaguely typed and does not make
// them concrete until long after trait expansion, so this enables building
// a concrete Self value from a numeric literal.
+ From<u8>
// Permit extending into a `u64`.
+ Into<u64>
+ LowerHex
+ Not<Output=Self>
+ Send
+ Shl<u8, Output=Self>
+ ShlAssign<u8>
+ Shr<u8, Output=Self>
+ ShrAssign<u8>
// Allow direct access to a concrete implementor type.
+ Sized
+ Sync
+ UpperHex
{
/// The width, in bits, of this type.
const BITS: u8 = size_of::<Self>() as u8 * 8;
/// The number of bits required to index a bit inside the type. This is
/// always log<sub>2</sub> of the type’s bit width.
const INDX: u8 = Self::BITS.trailing_zeros() as u8;
/// The bitmask to turn an arbitrary number into a bit index. Bit indices
/// are always stored in the lowest bits of an index value.
const MASK: u8 = Self::BITS - 1;
/// Name of the implementing type. This is only necessary until the compiler
/// stabilizes `type_name()`.
const TYPENAME: &'static str;
/// Shared-mutability wrapper type used to safely mutate aliased data.
///
/// Within `&/mut BitSlice` contexts, the `Nucleus` type **must** be used to
/// ensure correctly-synchronized access to memory elements that may have
/// aliased mutable access. When a codepath knows that it has full ownership
/// of a memory element of `Self`, and no other codepath may observe, much
/// less modify, it, then that codepath may skip the `Nucleus` type and use
/// plain accessors.
type Nucleus: BitAccess<Self>;
/// Sets a specific bit in an element to a given value.
///
/// # Safety
///
/// This method cannot be called from within a `&mut BitSlice` context; it
/// may only be called by construction of an `&mut Self` reference from a
/// `Self` element directly.
///
/// # Parameters
///
/// - `&mut self`
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be set according to `value`.
/// - `value`: A Boolean value, which sets the bit on `true` and clears it
/// on `false`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
#[inline(always)]
fn set<C>(&mut self, place: BitIdx<Self>, value: bool)
where C: Cursor {
let mask = *C::mask(place);
if value {
*self |= mask;
}
else {
*self &= !mask;
}
}
/// Gets a specific bit in an element.
///
/// # Safety
///
/// This method cannot be called from within a `&BitSlice` context; it may
/// only be called by construction of an `&Self` reference from a `Self`
/// element directly.
///
/// # Parameters
///
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<Self>) -> bool
where C: Cursor {
*self & *C::mask(place) != Self::from(0)
}
/// Counts how many bits in `self` are set to `1`.
///
/// This zero-extends `self` to `u64`, and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `1`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_ones(&0u8), 0);
/// assert_eq!(BitStore::count_ones(&128u8), 1);
/// assert_eq!(BitStore::count_ones(&192u8), 2);
/// assert_eq!(BitStore::count_ones(&224u8), 3);
/// assert_eq!(BitStore::count_ones(&240u8), 4);
/// assert_eq!(BitStore::count_ones(&248u8), 5);
/// assert_eq!(BitStore::count_ones(&252u8), 6);
/// assert_eq!(BitStore::count_ones(&254u8), 7);
/// assert_eq!(BitStore::count_ones(&255u8), 8);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_ones(&self) -> usize {
u64::count_ones((*self).into()) as usize
}
/// Counts how many bits in `self` are set to `0`.
///
/// This inverts `self`, so all `0` bits are `1` and all `1` bits are `0`,
/// then zero-extends `self` to `u64` and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `0`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_zeros(&0u8), 8);
/// assert_eq!(BitStore::count_zeros(&1u8), 7);
/// assert_eq!(BitStore::count_zeros(&3u8), 6);
/// assert_eq!(BitStore::count_zeros(&7u8), 5);
/// assert_eq!(BitStore::count_zeros(&15u8), 4);
/// assert_eq!(BitStore::count_zeros(&31u8), 3);
/// assert_eq!(BitStore::count_zeros(&63u8), 2);
/// assert_eq!(BitStore::count_zeros(&127u8), 1);
/// assert_eq!(BitStore::count_zeros(&255u8), 0);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_zeros(&self) -> usize {
// invert (0 becomes 1, 1 becomes 0), zero-extend, count ones
u64::count_ones((!*self).into()) as usize
}
/// Extends a single bit to fill the entire element.
///
/// # Parameters
///
/// - `bit`: The bit to extend.
///
/// # Returns
///
/// An element with all bits set to the input.
#[inline]
fn bits(bit: bool) -> Self {
if bit {
!Self::from(0)
}
else {
Self::from(0)
}
}
}
/** Marker trait to seal `BitStore` against downstream implementation.
This trait is public in the module, so that other modules in the crate can use
it, but so long as it is not exported by the crate root and this module is
private, this trait effectively forbids downstream implementation of the
`BitStore` trait.
**/
#[doc(hidden)]
pub trait Sealed {}
macro_rules! store {
( $( $t:ty , $a:ty $( ; )? );* ) => { $(
impl Sealed for $t {}
impl BitStore for $t {
const TYPENAME: &'static str = stringify!($t);
#[cfg(feature = "atomic")]
type Nucleus = $a;
#[cfg(not(feature = "atomic"))]
type Nucleus = Cell<Self>;
}
)* };
}
store![
u8, atomic::AtomicU8;
u16, atomic::AtomicU16;
u32, atomic::AtomicU32;
];
#[cfg(target_pointer_width = "64")]
store![u64, atomic::AtomicU64];
/// Type alias to the CPU word element, `u32`.
#[cfg(target_pointer_width = "32")]
pub type Word = u32;
/// Type alias to the CPU word element, `u64`.
#[cfg(target_pointer_width = "64")]
pub type Word = u64;
/** Common interface for atomic and cellular shared-mutability wrappers.
`&/mut BitSlice` contexts must use the `BitStore::Nucleus` type for all
reference production, and must route through this trait in order to access the
underlying memory. In multi-threaded contexts, this trait enforces that all
access is synchronized through atomic accesses; in single-threaded contexts,
this trait solely permits modification of an aliased element.
It is implemented on the atomic type wrappers when the `atomic` feature is set,
and implemented on the `Cell` type wrapper when the feature is missing. Coupled
with the `Send` implementation on `BitSlice`
**/
pub trait BitAccess<T>: Sized
where T: BitStore {
/// Sets a specific bit in an element low.
///
/// `BitAccess::set` calls this when its `value` is `false`; it
/// unconditionally writes a `0` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn clear_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Sets a specific bit in an element high.
///
/// `BitAccess::set` calls this when its `value` is `true`; it
/// unconditionally writes a `1` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn set_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Inverts a specific bit in an element.
///
/// This is the driver of `BitStore::invert_bit`, and has the same API and
/// documented behavior.
fn invert_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Gets a specific bit in an element.
///
/// # Parameters
///
/// - `&self`: A shared reference to a maybe-mutable element. This uses the
/// trait `load` function to ensure correct reads from memory.
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<T>) -> bool
where C: Cursor {
self.load() & *C::mask(place) != T::from(0)
}
/// Sets a specific bit in an element to a given value.
///
/// This is the driver of `BitStore::set`, and has the same API and
/// documented behavior.
#[inline(always)]
fn set<C>(&self, place: BitIdx<T>, value: bool)
where C: Cursor {
if value {
self.set_bit::<C>(place);
}
else {
self.clear_bit::<C>(place);
}
}
/// Removes the shared-mutability wrapper, producing a read reference to the
/// inner type.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// A read reference to the wrapped type.
///
/// # Safety
///
/// As this removes mutability, it is strictly safe.
#[inline(always)]
fn base(&self) -> &T {
unsafe { &*(self as *const Self as *const T) }
}
/// Transforms a reference of `&[T::Nucleus]` into `&mut [T]`.
///
/// # Safety
///
/// This function is undefined when the `this` slice referent has aliasing
/// pointers. It must only ever be called when the slice referent is
/// guaranteed to have no aliases, but mutability has been removed from the
/// type system at an earlier point in the call stack.
///
/// # Parameters
///
/// - `this`: A slice reference to some shared-mutability reference type.
///
/// # Returns
///
/// A mutable reference to the wrapped interior type of the `this` referent.
#[inline(always)]
unsafe fn base_slice_mut(this: &[Self]) -> &mut [T] {
&mut *(this as *const [Self] as *const [T] as *mut [T])
}
/// Performs a synchronized load on an unsynchronized reference.
///
/// Atomic implementors must ensure that the load is well synchronized, and
/// cell implementors can just read. Each implementor must be strictly gated
/// on the `atomic` feature flag.
fn load(&self) -> T;
}
/* FIXME(myrrlyn): When the `radium` crate publishes generic traits, erase the
implementations currently in use and enable the generic implementation below:
impl<T, R> BitAccess<T> for R
where T: BitStore, R: RadiumBits<T> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
}
*/
#[cfg(feature = "atomic")] fn _atom() {
impl BitAccess<u8> for atomic::AtomicU8 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u8 {
self.load(Relaxed)
}
}
impl BitAccess<u16> for atomic::AtomicU16 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u16 {
self.load(Relaxed)
}
}
impl BitAccess<u32> for atomic::AtomicU32 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u32 {
self.load(Relaxed)
}
}
#[cfg(target_pointer_width = "64")]
impl BitAccess<u64> for atomic::AtomicU64 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u64 {
self.load(Relaxed)
}
}
}
#[cfg(not(feature = "atomic"))] fn _cell() {
impl BitAccess<u8> for Cell<u8> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u8 {
self.get()
}
}
impl BitAccess<u16> for Cell<u16> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u16 {
self.get()
}
}
impl BitAccess<u32> for Cell<u32> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u32 {
self.get()
}
}
#[cfg(target_pointer_width = "64")]
impl BitAccess<u64> for Cell<u64> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u64 {
self.get()
}
}
} | use crate::{
cursor::Cursor,
indices::BitIdx,
};
| random_line_split |
store.rs | /*! Bit management
The `BitStore` trait defines constants and associated functions suitable for
managing the bit patterns of a fundamental, and is the constraint for the
storage type of the data structures of the rest of the crate.
The other types in this module provide stronger rules about how indices map to
concrete bits in fundamental elements. They are implementation details, and are
not exported in the prelude.
!*/
use crate::{
cursor::Cursor,
indices::BitIdx,
};
use core::{
cmp::Eq,
fmt::{
Binary,
Debug,
Display,
LowerHex,
UpperHex,
},
mem::size_of,
ops::{
BitAnd,
BitAndAssign,
BitOrAssign,
Not,
Shl,
ShlAssign,
Shr,
ShrAssign,
},
};
#[cfg(feature = "atomic")]
use core::sync::atomic::{
self,
Ordering::Relaxed,
};
#[cfg(not(feature = "atomic"))]
use core::cell::Cell;
/** Generalizes over the fundamental types for use in `bitvec` data structures.
This trait must only be implemented on unsigned integer primitives with full
alignment. It cannot be implemented on `u128` on any architecture, or on `u64`
on 32-bit systems.
The `Sealed` supertrait ensures that this can only be implemented locally, and
will never be implemented by downstream crates on new types.
**/
pub trait BitStore:
// Forbid external implementation
Sealed
+ Binary
// Element-wise binary manipulation
+ BitAnd<Self, Output=Self>
+ BitAndAssign<Self>
+ BitOrAssign<Self>
// Permit indexing into a generic array
+ Copy
+ Debug
+ Default
+ Display
// Permit testing a value against 1 in `get()`.
+ Eq
// Rust treats numeric literals in code as vaguely typed and does not make
// them concrete until long after trait expansion, so this enables building
// a concrete Self value from a numeric literal.
+ From<u8>
// Permit extending into a `u64`.
+ Into<u64>
+ LowerHex
+ Not<Output=Self>
+ Send
+ Shl<u8, Output=Self>
+ ShlAssign<u8>
+ Shr<u8, Output=Self>
+ ShrAssign<u8>
// Allow direct access to a concrete implementor type.
+ Sized
+ Sync
+ UpperHex
{
/// The width, in bits, of this type.
const BITS: u8 = size_of::<Self>() as u8 * 8;
/// The number of bits required to index a bit inside the type. This is
/// always log<sub>2</sub> of the type’s bit width.
const INDX: u8 = Self::BITS.trailing_zeros() as u8;
/// The bitmask to turn an arbitrary number into a bit index. Bit indices
/// are always stored in the lowest bits of an index value.
const MASK: u8 = Self::BITS - 1;
/// Name of the implementing type. This is only necessary until the compiler
/// stabilizes `type_name()`.
const TYPENAME: &'static str;
/// Shared-mutability wrapper type used to safely mutate aliased data.
///
/// Within `&/mut BitSlice` contexts, the `Nucleus` type **must** be used to
/// ensure correctly-synchronized access to memory elements that may have
/// aliased mutable access. When a codepath knows that it has full ownership
/// of a memory element of `Self`, and no other codepath may observe, much
/// less modify, it, then that codepath may skip the `Nucleus` type and use
/// plain accessors.
type Nucleus: BitAccess<Self>;
/// Sets a specific bit in an element to a given value.
///
/// # Safety
///
/// This method cannot be called from within a `&mut BitSlice` context; it
/// may only be called by construction of an `&mut Self` reference from a
/// `Self` element directly.
///
/// # Parameters
///
/// - `&mut self`
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be set according to `value`.
/// - `value`: A Boolean value, which sets the bit on `true` and clears it
/// on `false`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
#[inline(always)]
fn set<C>(&mut self, place: BitIdx<Self>, value: bool)
where C: Cursor {
let mask = *C::mask(place);
if value {
*self |= mask;
}
else {
*self &= !mask;
}
}
/// Gets a specific bit in an element.
///
/// # Safety
///
/// This method cannot be called from within a `&BitSlice` context; it may
/// only be called by construction of an `&Self` reference from a `Self`
/// element directly.
///
/// # Parameters
///
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<Self>) -> bool
where C: Cursor {
*self & *C::mask(place) != Self::from(0)
}
/// Counts how many bits in `self` are set to `1`.
///
/// This zero-extends `self` to `u64`, and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `1`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_ones(&0u8), 0);
/// assert_eq!(BitStore::count_ones(&128u8), 1);
/// assert_eq!(BitStore::count_ones(&192u8), 2);
/// assert_eq!(BitStore::count_ones(&224u8), 3);
/// assert_eq!(BitStore::count_ones(&240u8), 4);
/// assert_eq!(BitStore::count_ones(&248u8), 5);
/// assert_eq!(BitStore::count_ones(&252u8), 6);
/// assert_eq!(BitStore::count_ones(&254u8), 7);
/// assert_eq!(BitStore::count_ones(&255u8), 8);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_ones(&self) -> usize {
u64::count_ones((*self).into()) as usize
}
/// Counts how many bits in `self` are set to `0`.
///
/// This inverts `self`, so all `0` bits are `1` and all `1` bits are `0`,
/// then zero-extends `self` to `u64` and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `0`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_zeros(&0u8), 8);
/// assert_eq!(BitStore::count_zeros(&1u8), 7);
/// assert_eq!(BitStore::count_zeros(&3u8), 6);
/// assert_eq!(BitStore::count_zeros(&7u8), 5);
/// assert_eq!(BitStore::count_zeros(&15u8), 4);
/// assert_eq!(BitStore::count_zeros(&31u8), 3);
/// assert_eq!(BitStore::count_zeros(&63u8), 2);
/// assert_eq!(BitStore::count_zeros(&127u8), 1);
/// assert_eq!(BitStore::count_zeros(&255u8), 0);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_zeros(&self) -> usize {
// invert (0 becomes 1, 1 becomes 0), zero-extend, count ones
u64::count_ones((!*self).into()) as usize
}
/// Extends a single bit to fill the entire element.
///
/// # Parameters
///
/// - `bit`: The bit to extend.
///
/// # Returns
///
/// An element with all bits set to the input.
#[inline]
fn bits(bit: bool) -> Self {
if bit {
!Self::from(0)
}
else {
Self::from(0)
}
}
}
/** Marker trait to seal `BitStore` against downstream implementation.
This trait is public in the module, so that other modules in the crate can use
it, but so long as it is not exported by the crate root and this module is
private, this trait effectively forbids downstream implementation of the
`BitStore` trait.
**/
#[doc(hidden)]
pub trait Sealed {}
macro_rules! store {
( $( $t:ty , $a:ty $( ; )? );* ) => { $(
impl Sealed for $t {}
impl BitStore for $t {
const TYPENAME: &'static str = stringify!($t);
#[cfg(feature = "atomic")]
type Nucleus = $a;
#[cfg(not(feature = "atomic"))]
type Nucleus = Cell<Self>;
}
)* };
}
store![
u8, atomic::AtomicU8;
u16, atomic::AtomicU16;
u32, atomic::AtomicU32;
];
#[cfg(target_pointer_width = "64")]
store![u64, atomic::AtomicU64];
/// Type alias to the CPU word element, `u32`.
#[cfg(target_pointer_width = "32")]
pub type Word = u32;
/// Type alias to the CPU word element, `u64`.
#[cfg(target_pointer_width = "64")]
pub type Word = u64;
/** Common interface for atomic and cellular shared-mutability wrappers.
`&/mut BitSlice` contexts must use the `BitStore::Nucleus` type for all
reference production, and must route through this trait in order to access the
underlying memory. In multi-threaded contexts, this trait enforces that all
access is synchronized through atomic accesses; in single-threaded contexts,
this trait solely permits modification of an aliased element.
It is implemented on the atomic type wrappers when the `atomic` feature is set,
and implemented on the `Cell` type wrapper when the feature is missing. Coupled
with the `Send` implementation on `BitSlice`
**/
pub trait BitAccess<T>: Sized
where T: BitStore {
/// Sets a specific bit in an element low.
///
/// `BitAccess::set` calls this when its `value` is `false`; it
/// unconditionally writes a `0` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn clear_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Sets a specific bit in an element high.
///
/// `BitAccess::set` calls this when its `value` is `true`; it
/// unconditionally writes a `1` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn set_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Inverts a specific bit in an element.
///
/// This is the driver of `BitStore::invert_bit`, and has the same API and
/// documented behavior.
fn invert_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Gets a specific bit in an element.
///
/// # Parameters
///
/// - `&self`: A shared reference to a maybe-mutable element. This uses the
/// trait `load` function to ensure correct reads from memory.
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<T>) -> bool
where C: Cursor {
self.load() & *C::mask(place) != T::from(0)
}
/// Sets a specific bit in an element to a given value.
///
/// This is the driver of `BitStore::set`, and has the same API and
/// documented behavior.
#[inline(always)]
fn set<C>(&self, place: BitIdx<T>, value: bool)
where C: Cursor {
if value {
self.set_bit::<C>(place);
}
else {
self.clear_bit::<C>(place);
}
}
/// Removes the shared-mutability wrapper, producing a read reference to the
/// inner type.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// A read reference to the wrapped type.
///
/// # Safety
///
/// As this removes mutability, it is strictly safe.
#[inline(always)]
fn base(&self) -> &T {
unsafe { &*(self as *const Self as *const T) }
}
/// Transforms a reference of `&[T::Nucleus]` into `&mut [T]`.
///
/// # Safety
///
/// This function is undefined when the `this` slice referent has aliasing
/// pointers. It must only ever be called when the slice referent is
/// guaranteed to have no aliases, but mutability has been removed from the
/// type system at an earlier point in the call stack.
///
/// # Parameters
///
/// - `this`: A slice reference to some shared-mutability reference type.
///
/// # Returns
///
/// A mutable reference to the wrapped interior type of the `this` referent.
#[inline(always)]
unsafe fn base_slice_mut(this: &[Self]) -> &mut [T] {
&mut *(this as *const [Self] as *const [T] as *mut [T])
}
/// Performs a synchronized load on an unsynchronized reference.
///
/// Atomic implementors must ensure that the load is well synchronized, and
/// cell implementors can just read. Each implementor must be strictly gated
/// on the `atomic` feature flag.
fn load(&self) -> T;
}
/* FIXME(myrrlyn): When the `radium` crate publishes generic traits, erase the
implementations currently in use and enable the generic implementation below:
impl<T, R> BitAccess<T> for R
where T: BitStore, R: RadiumBits<T> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
}
*/
#[cfg(feature = "atomic")] fn _atom() {
impl BitAccess<u8> for atomic::AtomicU8 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u8 {
self.load(Relaxed)
}
}
impl BitAccess<u16> for atomic::AtomicU16 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
| #[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u16 {
self.load(Relaxed)
}
}
impl BitAccess<u32> for atomic::AtomicU32 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u32 {
self.load(Relaxed)
}
}
#[cfg(target_pointer_width = "64")]
impl BitAccess<u64> for atomic::AtomicU64 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u64 {
self.load(Relaxed)
}
}
}
#[cfg(not(feature = "atomic"))] fn _cell() {
impl BitAccess<u8> for Cell<u8> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u8 {
self.get()
}
}
impl BitAccess<u16> for Cell<u16> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u16 {
self.get()
}
}
impl BitAccess<u32> for Cell<u32> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u32 {
self.get()
}
}
#[cfg(target_pointer_width = "64")]
impl BitAccess<u64> for Cell<u64> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u64 {
self.get()
}
}
}
| self.fetch_and(!*C::mask(bit), Relaxed);
}
| identifier_body |
store.rs | /*! Bit management
The `BitStore` trait defines constants and associated functions suitable for
managing the bit patterns of a fundamental, and is the constraint for the
storage type of the data structures of the rest of the crate.
The other types in this module provide stronger rules about how indices map to
concrete bits in fundamental elements. They are implementation details, and are
not exported in the prelude.
!*/
use crate::{
cursor::Cursor,
indices::BitIdx,
};
use core::{
cmp::Eq,
fmt::{
Binary,
Debug,
Display,
LowerHex,
UpperHex,
},
mem::size_of,
ops::{
BitAnd,
BitAndAssign,
BitOrAssign,
Not,
Shl,
ShlAssign,
Shr,
ShrAssign,
},
};
#[cfg(feature = "atomic")]
use core::sync::atomic::{
self,
Ordering::Relaxed,
};
#[cfg(not(feature = "atomic"))]
use core::cell::Cell;
/** Generalizes over the fundamental types for use in `bitvec` data structures.
This trait must only be implemented on unsigned integer primitives with full
alignment. It cannot be implemented on `u128` on any architecture, or on `u64`
on 32-bit systems.
The `Sealed` supertrait ensures that this can only be implemented locally, and
will never be implemented by downstream crates on new types.
**/
pub trait BitStore:
// Forbid external implementation
Sealed
+ Binary
// Element-wise binary manipulation
+ BitAnd<Self, Output=Self>
+ BitAndAssign<Self>
+ BitOrAssign<Self>
// Permit indexing into a generic array
+ Copy
+ Debug
+ Default
+ Display
// Permit testing a value against 1 in `get()`.
+ Eq
// Rust treats numeric literals in code as vaguely typed and does not make
// them concrete until long after trait expansion, so this enables building
// a concrete Self value from a numeric literal.
+ From<u8>
// Permit extending into a `u64`.
+ Into<u64>
+ LowerHex
+ Not<Output=Self>
+ Send
+ Shl<u8, Output=Self>
+ ShlAssign<u8>
+ Shr<u8, Output=Self>
+ ShrAssign<u8>
// Allow direct access to a concrete implementor type.
+ Sized
+ Sync
+ UpperHex
{
/// The width, in bits, of this type.
const BITS: u8 = size_of::<Self>() as u8 * 8;
/// The number of bits required to index a bit inside the type. This is
/// always log<sub>2</sub> of the type’s bit width.
const INDX: u8 = Self::BITS.trailing_zeros() as u8;
/// The bitmask to turn an arbitrary number into a bit index. Bit indices
/// are always stored in the lowest bits of an index value.
const MASK: u8 = Self::BITS - 1;
/// Name of the implementing type. This is only necessary until the compiler
/// stabilizes `type_name()`.
const TYPENAME: &'static str;
/// Shared-mutability wrapper type used to safely mutate aliased data.
///
/// Within `&/mut BitSlice` contexts, the `Nucleus` type **must** be used to
/// ensure correctly-synchronized access to memory elements that may have
/// aliased mutable access. When a codepath knows that it has full ownership
/// of a memory element of `Self`, and no other codepath may observe, much
/// less modify, it, then that codepath may skip the `Nucleus` type and use
/// plain accessors.
type Nucleus: BitAccess<Self>;
/// Sets a specific bit in an element to a given value.
///
/// # Safety
///
/// This method cannot be called from within a `&mut BitSlice` context; it
/// may only be called by construction of an `&mut Self` reference from a
/// `Self` element directly.
///
/// # Parameters
///
/// - `&mut self`
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be set according to `value`.
/// - `value`: A Boolean value, which sets the bit on `true` and clears it
/// on `false`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
#[inline(always)]
fn set<C>(&mut self, place: BitIdx<Self>, value: bool)
where C: Cursor {
let mask = *C::mask(place);
if value {
*self |= mask;
}
else {
*self &= !mask;
}
}
/// Gets a specific bit in an element.
///
/// # Safety
///
/// This method cannot be called from within a `&BitSlice` context; it may
/// only be called by construction of an `&Self` reference from a `Self`
/// element directly.
///
/// # Parameters
///
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<Self>) -> bool
where C: Cursor {
*self & *C::mask(place) != Self::from(0)
}
/// Counts how many bits in `self` are set to `1`.
///
/// This zero-extends `self` to `u64`, and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `1`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_ones(&0u8), 0);
/// assert_eq!(BitStore::count_ones(&128u8), 1);
/// assert_eq!(BitStore::count_ones(&192u8), 2);
/// assert_eq!(BitStore::count_ones(&224u8), 3);
/// assert_eq!(BitStore::count_ones(&240u8), 4);
/// assert_eq!(BitStore::count_ones(&248u8), 5);
/// assert_eq!(BitStore::count_ones(&252u8), 6);
/// assert_eq!(BitStore::count_ones(&254u8), 7);
/// assert_eq!(BitStore::count_ones(&255u8), 8);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_ones(&self) -> usize {
u64::count_ones((*self).into()) as usize
}
/// Counts how many bits in `self` are set to `0`.
///
/// This inverts `self`, so all `0` bits are `1` and all `1` bits are `0`,
/// then zero-extends `self` to `u64` and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `0`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_zeros(&0u8), 8);
/// assert_eq!(BitStore::count_zeros(&1u8), 7);
/// assert_eq!(BitStore::count_zeros(&3u8), 6);
/// assert_eq!(BitStore::count_zeros(&7u8), 5);
/// assert_eq!(BitStore::count_zeros(&15u8), 4);
/// assert_eq!(BitStore::count_zeros(&31u8), 3);
/// assert_eq!(BitStore::count_zeros(&63u8), 2);
/// assert_eq!(BitStore::count_zeros(&127u8), 1);
/// assert_eq!(BitStore::count_zeros(&255u8), 0);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_zeros(&self) -> usize {
// invert (0 becomes 1, 1 becomes 0), zero-extend, count ones
u64::count_ones((!*self).into()) as usize
}
/// Extends a single bit to fill the entire element.
///
/// # Parameters
///
/// - `bit`: The bit to extend.
///
/// # Returns
///
/// An element with all bits set to the input.
#[inline]
fn bits(bit: bool) -> Self {
if bit {
!Self::from(0)
}
else {
Self::from(0)
}
}
}
/** Marker trait to seal `BitStore` against downstream implementation.
This trait is public in the module, so that other modules in the crate can use
it, but so long as it is not exported by the crate root and this module is
private, this trait effectively forbids downstream implementation of the
`BitStore` trait.
**/
#[doc(hidden)]
pub trait Sealed {}
macro_rules! store {
( $( $t:ty , $a:ty $( ; )? );* ) => { $(
impl Sealed for $t {}
impl BitStore for $t {
const TYPENAME: &'static str = stringify!($t);
#[cfg(feature = "atomic")]
type Nucleus = $a;
#[cfg(not(feature = "atomic"))]
type Nucleus = Cell<Self>;
}
)* };
}
store![
u8, atomic::AtomicU8;
u16, atomic::AtomicU16;
u32, atomic::AtomicU32;
];
#[cfg(target_pointer_width = "64")]
store![u64, atomic::AtomicU64];
/// Type alias to the CPU word element, `u32`.
#[cfg(target_pointer_width = "32")]
pub type Word = u32;
/// Type alias to the CPU word element, `u64`.
#[cfg(target_pointer_width = "64")]
pub type Word = u64;
/** Common interface for atomic and cellular shared-mutability wrappers.
`&/mut BitSlice` contexts must use the `BitStore::Nucleus` type for all
reference production, and must route through this trait in order to access the
underlying memory. In multi-threaded contexts, this trait enforces that all
access is synchronized through atomic accesses; in single-threaded contexts,
this trait solely permits modification of an aliased element.
It is implemented on the atomic type wrappers when the `atomic` feature is set,
and implemented on the `Cell` type wrapper when the feature is missing. Coupled
with the `Send` implementation on `BitSlice`
**/
pub trait BitAccess<T>: Sized
where T: BitStore {
/// Sets a specific bit in an element low.
///
/// `BitAccess::set` calls this when its `value` is `false`; it
/// unconditionally writes a `0` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn clear_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Sets a specific bit in an element high.
///
/// `BitAccess::set` calls this when its `value` is `true`; it
/// unconditionally writes a `1` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn set_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Inverts a specific bit in an element.
///
/// This is the driver of `BitStore::invert_bit`, and has the same API and
/// documented behavior.
fn invert_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Gets a specific bit in an element.
///
/// # Parameters
///
/// - `&self`: A shared reference to a maybe-mutable element. This uses the
/// trait `load` function to ensure correct reads from memory.
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<T>) -> bool
where C: Cursor {
self.load() & *C::mask(place) != T::from(0)
}
/// Sets a specific bit in an element to a given value.
///
/// This is the driver of `BitStore::set`, and has the same API and
/// documented behavior.
#[inline(always)]
fn set<C>(&self, place: BitIdx<T>, value: bool)
where C: Cursor {
if value {
self.set_bit::<C>(place);
}
else {
self.clear_bit::<C>(place);
}
}
/// Removes the shared-mutability wrapper, producing a read reference to the
/// inner type.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// A read reference to the wrapped type.
///
/// # Safety
///
/// As this removes mutability, it is strictly safe.
#[inline(always)]
fn base(&self) -> &T {
unsafe { &*(self as *const Self as *const T) }
}
/// Transforms a reference of `&[T::Nucleus]` into `&mut [T]`.
///
/// # Safety
///
/// This function is undefined when the `this` slice referent has aliasing
/// pointers. It must only ever be called when the slice referent is
/// guaranteed to have no aliases, but mutability has been removed from the
/// type system at an earlier point in the call stack.
///
/// # Parameters
///
/// - `this`: A slice reference to some shared-mutability reference type.
///
/// # Returns
///
/// A mutable reference to the wrapped interior type of the `this` referent.
#[inline(always)]
unsafe fn base_slice_mut(this: &[Self]) -> &mut [T] {
&mut *(this as *const [Self] as *const [T] as *mut [T])
}
/// Performs a synchronized load on an unsynchronized reference.
///
/// Atomic implementors must ensure that the load is well synchronized, and
/// cell implementors can just read. Each implementor must be strictly gated
/// on the `atomic` feature flag.
fn load(&self) -> T;
}
/* FIXME(myrrlyn): When the `radium` crate publishes generic traits, erase the
implementations currently in use and enable the generic implementation below:
impl<T, R> BitAccess<T> for R
where T: BitStore, R: RadiumBits<T> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
}
*/
#[cfg(feature = "atomic")] fn _atom() {
impl BitAccess<u8> for atomic::AtomicU8 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u8 {
self.load(Relaxed)
}
}
impl BitAccess<u16> for atomic::AtomicU16 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u16 {
self.load(Relaxed)
}
}
impl BitAccess<u32> for atomic::AtomicU32 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u32 {
self.load(Relaxed)
}
}
#[cfg(target_pointer_width = "64")]
impl BitAccess<u64> for atomic::AtomicU64 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn lo | self) -> u64 {
self.load(Relaxed)
}
}
}
#[cfg(not(feature = "atomic"))] fn _cell() {
impl BitAccess<u8> for Cell<u8> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u8 {
self.get()
}
}
impl BitAccess<u16> for Cell<u16> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u16 {
self.get()
}
}
impl BitAccess<u32> for Cell<u32> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u32 {
self.get()
}
}
#[cfg(target_pointer_width = "64")]
impl BitAccess<u64> for Cell<u64> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u64 {
self.get()
}
}
}
| ad(& | identifier_name |
store.rs | /*! Bit management
The `BitStore` trait defines constants and associated functions suitable for
managing the bit patterns of a fundamental, and is the constraint for the
storage type of the data structures of the rest of the crate.
The other types in this module provide stronger rules about how indices map to
concrete bits in fundamental elements. They are implementation details, and are
not exported in the prelude.
!*/
use crate::{
cursor::Cursor,
indices::BitIdx,
};
use core::{
cmp::Eq,
fmt::{
Binary,
Debug,
Display,
LowerHex,
UpperHex,
},
mem::size_of,
ops::{
BitAnd,
BitAndAssign,
BitOrAssign,
Not,
Shl,
ShlAssign,
Shr,
ShrAssign,
},
};
#[cfg(feature = "atomic")]
use core::sync::atomic::{
self,
Ordering::Relaxed,
};
#[cfg(not(feature = "atomic"))]
use core::cell::Cell;
/** Generalizes over the fundamental types for use in `bitvec` data structures.
This trait must only be implemented on unsigned integer primitives with full
alignment. It cannot be implemented on `u128` on any architecture, or on `u64`
on 32-bit systems.
The `Sealed` supertrait ensures that this can only be implemented locally, and
will never be implemented by downstream crates on new types.
**/
pub trait BitStore:
// Forbid external implementation
Sealed
+ Binary
// Element-wise binary manipulation
+ BitAnd<Self, Output=Self>
+ BitAndAssign<Self>
+ BitOrAssign<Self>
// Permit indexing into a generic array
+ Copy
+ Debug
+ Default
+ Display
// Permit testing a value against 1 in `get()`.
+ Eq
// Rust treats numeric literals in code as vaguely typed and does not make
// them concrete until long after trait expansion, so this enables building
// a concrete Self value from a numeric literal.
+ From<u8>
// Permit extending into a `u64`.
+ Into<u64>
+ LowerHex
+ Not<Output=Self>
+ Send
+ Shl<u8, Output=Self>
+ ShlAssign<u8>
+ Shr<u8, Output=Self>
+ ShrAssign<u8>
// Allow direct access to a concrete implementor type.
+ Sized
+ Sync
+ UpperHex
{
/// The width, in bits, of this type.
const BITS: u8 = size_of::<Self>() as u8 * 8;
/// The number of bits required to index a bit inside the type. This is
/// always log<sub>2</sub> of the type’s bit width.
const INDX: u8 = Self::BITS.trailing_zeros() as u8;
/// The bitmask to turn an arbitrary number into a bit index. Bit indices
/// are always stored in the lowest bits of an index value.
const MASK: u8 = Self::BITS - 1;
/// Name of the implementing type. This is only necessary until the compiler
/// stabilizes `type_name()`.
const TYPENAME: &'static str;
/// Shared-mutability wrapper type used to safely mutate aliased data.
///
/// Within `&/mut BitSlice` contexts, the `Nucleus` type **must** be used to
/// ensure correctly-synchronized access to memory elements that may have
/// aliased mutable access. When a codepath knows that it has full ownership
/// of a memory element of `Self`, and no other codepath may observe, much
/// less modify, it, then that codepath may skip the `Nucleus` type and use
/// plain accessors.
type Nucleus: BitAccess<Self>;
/// Sets a specific bit in an element to a given value.
///
/// # Safety
///
/// This method cannot be called from within a `&mut BitSlice` context; it
/// may only be called by construction of an `&mut Self` reference from a
/// `Self` element directly.
///
/// # Parameters
///
/// - `&mut self`
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be set according to `value`.
/// - `value`: A Boolean value, which sets the bit on `true` and clears it
/// on `false`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
#[inline(always)]
fn set<C>(&mut self, place: BitIdx<Self>, value: bool)
where C: Cursor {
let mask = *C::mask(place);
if value {
| else {
*self &= !mask;
}
}
/// Gets a specific bit in an element.
///
/// # Safety
///
/// This method cannot be called from within a `&BitSlice` context; it may
/// only be called by construction of an `&Self` reference from a `Self`
/// element directly.
///
/// # Parameters
///
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<Self>) -> bool
where C: Cursor {
*self & *C::mask(place) != Self::from(0)
}
/// Counts how many bits in `self` are set to `1`.
///
/// This zero-extends `self` to `u64`, and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `1`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_ones(&0u8), 0);
/// assert_eq!(BitStore::count_ones(&128u8), 1);
/// assert_eq!(BitStore::count_ones(&192u8), 2);
/// assert_eq!(BitStore::count_ones(&224u8), 3);
/// assert_eq!(BitStore::count_ones(&240u8), 4);
/// assert_eq!(BitStore::count_ones(&248u8), 5);
/// assert_eq!(BitStore::count_ones(&252u8), 6);
/// assert_eq!(BitStore::count_ones(&254u8), 7);
/// assert_eq!(BitStore::count_ones(&255u8), 8);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_ones(&self) -> usize {
u64::count_ones((*self).into()) as usize
}
/// Counts how many bits in `self` are set to `0`.
///
/// This inverts `self`, so all `0` bits are `1` and all `1` bits are `0`,
/// then zero-extends `self` to `u64` and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `0`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_zeros(&0u8), 8);
/// assert_eq!(BitStore::count_zeros(&1u8), 7);
/// assert_eq!(BitStore::count_zeros(&3u8), 6);
/// assert_eq!(BitStore::count_zeros(&7u8), 5);
/// assert_eq!(BitStore::count_zeros(&15u8), 4);
/// assert_eq!(BitStore::count_zeros(&31u8), 3);
/// assert_eq!(BitStore::count_zeros(&63u8), 2);
/// assert_eq!(BitStore::count_zeros(&127u8), 1);
/// assert_eq!(BitStore::count_zeros(&255u8), 0);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_zeros(&self) -> usize {
// invert (0 becomes 1, 1 becomes 0), zero-extend, count ones
u64::count_ones((!*self).into()) as usize
}
/// Extends a single bit to fill the entire element.
///
/// # Parameters
///
/// - `bit`: The bit to extend.
///
/// # Returns
///
/// An element with all bits set to the input.
#[inline]
fn bits(bit: bool) -> Self {
if bit {
!Self::from(0)
}
else {
Self::from(0)
}
}
}
/** Marker trait to seal `BitStore` against downstream implementation.
This trait is public in the module, so that other modules in the crate can use
it, but so long as it is not exported by the crate root and this module is
private, this trait effectively forbids downstream implementation of the
`BitStore` trait.
**/
#[doc(hidden)]
pub trait Sealed {}
macro_rules! store {
( $( $t:ty , $a:ty $( ; )? );* ) => { $(
impl Sealed for $t {}
impl BitStore for $t {
const TYPENAME: &'static str = stringify!($t);
#[cfg(feature = "atomic")]
type Nucleus = $a;
#[cfg(not(feature = "atomic"))]
type Nucleus = Cell<Self>;
}
)* };
}
store![
u8, atomic::AtomicU8;
u16, atomic::AtomicU16;
u32, atomic::AtomicU32;
];
#[cfg(target_pointer_width = "64")]
store![u64, atomic::AtomicU64];
/// Type alias to the CPU word element, `u32`.
#[cfg(target_pointer_width = "32")]
pub type Word = u32;
/// Type alias to the CPU word element, `u64`.
#[cfg(target_pointer_width = "64")]
pub type Word = u64;
/** Common interface for atomic and cellular shared-mutability wrappers.
`&/mut BitSlice` contexts must use the `BitStore::Nucleus` type for all
reference production, and must route through this trait in order to access the
underlying memory. In multi-threaded contexts, this trait enforces that all
access is synchronized through atomic accesses; in single-threaded contexts,
this trait solely permits modification of an aliased element.
It is implemented on the atomic type wrappers when the `atomic` feature is set,
and implemented on the `Cell` type wrapper when the feature is missing. Coupled
with the `Send` implementation on `BitSlice`
**/
pub trait BitAccess<T>: Sized
where T: BitStore {
/// Sets a specific bit in an element low.
///
/// `BitAccess::set` calls this when its `value` is `false`; it
/// unconditionally writes a `0` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn clear_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Sets a specific bit in an element high.
///
/// `BitAccess::set` calls this when its `value` is `true`; it
/// unconditionally writes a `1` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn set_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Inverts a specific bit in an element.
///
/// This is the driver of `BitStore::invert_bit`, and has the same API and
/// documented behavior.
fn invert_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Gets a specific bit in an element.
///
/// # Parameters
///
/// - `&self`: A shared reference to a maybe-mutable element. This uses the
/// trait `load` function to ensure correct reads from memory.
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<T>) -> bool
where C: Cursor {
self.load() & *C::mask(place) != T::from(0)
}
/// Sets a specific bit in an element to a given value.
///
/// This is the driver of `BitStore::set`, and has the same API and
/// documented behavior.
#[inline(always)]
fn set<C>(&self, place: BitIdx<T>, value: bool)
where C: Cursor {
if value {
self.set_bit::<C>(place);
}
else {
self.clear_bit::<C>(place);
}
}
/// Removes the shared-mutability wrapper, producing a read reference to the
/// inner type.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// A read reference to the wrapped type.
///
/// # Safety
///
/// As this removes mutability, it is strictly safe.
#[inline(always)]
fn base(&self) -> &T {
unsafe { &*(self as *const Self as *const T) }
}
/// Transforms a reference of `&[T::Nucleus]` into `&mut [T]`.
///
/// # Safety
///
/// This function is undefined when the `this` slice referent has aliasing
/// pointers. It must only ever be called when the slice referent is
/// guaranteed to have no aliases, but mutability has been removed from the
/// type system at an earlier point in the call stack.
///
/// # Parameters
///
/// - `this`: A slice reference to some shared-mutability reference type.
///
/// # Returns
///
/// A mutable reference to the wrapped interior type of the `this` referent.
#[inline(always)]
unsafe fn base_slice_mut(this: &[Self]) -> &mut [T] {
&mut *(this as *const [Self] as *const [T] as *mut [T])
}
/// Performs a synchronized load on an unsynchronized reference.
///
/// Atomic implementors must ensure that the load is well synchronized, and
/// cell implementors can just read. Each implementor must be strictly gated
/// on the `atomic` feature flag.
fn load(&self) -> T;
}
/* FIXME(myrrlyn): When the `radium` crate publishes generic traits, erase the
implementations currently in use and enable the generic implementation below:
impl<T, R> BitAccess<T> for R
where T: BitStore, R: RadiumBits<T> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
}
*/
#[cfg(feature = "atomic")] fn _atom() {
impl BitAccess<u8> for atomic::AtomicU8 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u8 {
self.load(Relaxed)
}
}
impl BitAccess<u16> for atomic::AtomicU16 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u16 {
self.load(Relaxed)
}
}
impl BitAccess<u32> for atomic::AtomicU32 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u32 {
self.load(Relaxed)
}
}
#[cfg(target_pointer_width = "64")]
impl BitAccess<u64> for atomic::AtomicU64 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u64 {
self.load(Relaxed)
}
}
}
#[cfg(not(feature = "atomic"))] fn _cell() {
impl BitAccess<u8> for Cell<u8> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u8 {
self.get()
}
}
impl BitAccess<u16> for Cell<u16> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u16 {
self.get()
}
}
impl BitAccess<u32> for Cell<u32> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u32 {
self.get()
}
}
#[cfg(target_pointer_width = "64")]
impl BitAccess<u64> for Cell<u64> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() & !*C::mask(bit));
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() | *C::mask(bit));
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.set(self.get() ^ *C::mask(bit));
}
#[inline(always)]
fn load(&self) -> u64 {
self.get()
}
}
}
| *self |= mask;
}
| conditional_block |
game.go | package main
import (
"log"
"math/rand"
"time"
"github.com/anaseto/gruid"
"github.com/anaseto/gruid/paths"
"github.com/anaseto/gruid/rl"
)
var Version string = "v0.5.0"
// game contains the game logic's state, without ui stuff. Everything could be
// in the model struct instead, with only the game logic's fiend exported, as
// some game functions need the model anyway (like animations), but this allows
// to differentiate a bit things that are mainly game-logic from the stuff that
// is more about ui.
type game struct {
Dungeon *dungeon
Player *player
Monsters []*monster
MonstersPosCache []int // monster (dungeon index + 1) / no monster (0)
Bands []bandInfo
Events *rl.EventQueue
EventIndex int
Depth int
ExploredLevels int
DepthPlayerTurn int
Turn int
Highlight map[gruid.Point]bool // highlighted positions (e.g. targeted ray)
Objects objects
Clouds map[gruid.Point]cloud
MagicalBarriers map[gruid.Point]cell
GeneratedLore map[int]bool
GeneratedMagaras []magaraKind
GeneratedCloaks []item
GeneratedAmulets []item
GenPlan [MaxDepth + 1]genFlavour
TerrainKnowledge map[gruid.Point]cell
ExclusionsMap map[gruid.Point]bool
Noise map[gruid.Point]bool
NoiseIllusion map[gruid.Point]bool
LastMonsterKnownAt map[gruid.Point]int
MonsterLOS map[gruid.Point]bool
MonsterTargLOS map[gruid.Point]bool
LightFOV *rl.FOV
RaysCache rayMap
Resting bool
RestingTurns int
Autoexploring bool
AutoexploreMapRebuild bool
AutoTarget gruid.Point
AutoDir gruid.Point
autoDirNeighbors dirNeighbors
autoDirChanged bool
AutoHalt bool
Log []logEntry
LogIndex int
LogNextTick int
InfoEntry string
Stats stats
Wizard bool
WizardMode wizardMode
Version string
Places places
Params startParams
//Opts startOpts
md *model // needed for animations and a few more cases
LiberatedShaedra bool
LiberatedArtifact bool
PlayerAgain bool
mfov *rl.FOV
PR *paths.PathRange
PRauto *paths.PathRange
autosources []gruid.Point // cache
nbs paths.Neighbors
rand *rand.Rand
}
type specialEvent int
const (
NormalLevel specialEvent = iota
UnstableLevel
EarthquakeLevel
MistLevel
)
const spEvMax = int(MistLevel)
type startParams struct {
Lore map[int]bool
Blocked map[int]bool
Special []specialRoom
Event map[int]specialEvent
Windows map[int]bool
Trees map[int]bool
Holes map[int]bool
Stones map[int]bool
Tables map[int]bool
NoMagara map[int]bool
FakeStair map[int]bool
ExtraBanana map[int]int
HealthPotion map[int]bool
MappingStone map[int]bool
CrazyImp int
}
type wizardMode int
const (
WizardNormal wizardMode = iota
WizardMap
WizardSeeAll
)
func (g *game) FreePassableCell() gruid.Point {
d := g.Dungeon
count := 0
for {
count++
if count > maxIterations {
panic("FreePassableCell")
}
x := RandInt(DungeonWidth)
y := RandInt(DungeonHeight)
p := gruid.Point{x, y}
c := d.Cell(p)
if !c.IsPassable() {
continue
}
if g.Player != nil && g.Player.P == p {
continue
}
mons := g.MonsterAt(p)
if mons.Exists() {
continue
}
return p
}
}
const MaxDepth = 11
const WinDepth = 8
const (
DungeonHeight = 21
DungeonWidth = 80
DungeonNCells = DungeonWidth * DungeonHeight
)
func (g *game) GenDungeon() {
ml := AutomataCave
switch g.Depth {
case 2, 6, 7:
ml = RandomWalkCave
if RandInt(3) == 0 {
ml = NaturalCave
}
case 4, 10, 11:
ml = RandomWalkTreeCave
if RandInt(4) == 0 && g.Depth < 11 {
ml = RandomSmallWalkCaveUrbanised
} else if g.Depth == 11 && RandInt(2) == 0 {
ml = RandomSmallWalkCaveUrbanised
}
case 9:
switch RandInt(4) {
case 0:
ml = NaturalCave
case 1:
ml = RandomWalkCave
}
default:
if RandInt(10) == 0 {
ml = RandomSmallWalkCaveUrbanised
} else if RandInt(10) == 0 {
ml = NaturalCave
}
}
g.GenRoomTunnels(ml)
}
func (g *game) InitPlayer() {
g.Player = &player{
HP: DefaultHealth,
MP: DefaultMPmax,
Bananas: 1,
}
g.Player.LOS = map[gruid.Point]bool{}
g.Player.Statuses = map[status]int{}
g.Player.Expire = map[status]int{}
g.Player.Magaras = []magara{
{},
{},
{},
{},
}
g.GeneratedMagaras = []magaraKind{}
g.Player.Magaras[0] = g.RandomStartingMagara()
g.GeneratedMagaras = append(g.GeneratedMagaras, g.Player.Magaras[0].Kind)
g.Player.Inventory.Misc = MarevorMagara
g.Player.FOV = rl.NewFOV(visionRange(g.Player.P, TreeRange))
// Testing
//g.Player.Magaras[1] = magara{Kind: DispersalMagara, Charges: 10}
//g.Player.Magaras[2] = magara{Kind: DelayedOricExplosionMagara, Charges: 10}
//g.Player.Magaras[2] = ConfusionMagara
}
type genFlavour int
const (
GenNothing genFlavour = iota
//GenWeapon
GenAmulet
GenCloak
)
func PutRandomLevels(m map[int]bool, n int) {
for i := 0; i < n; i++ {
j := 1 + RandInt(MaxDepth)
if !m[j] {
m[j] = true
} else {
i--
}
}
}
func (g *game) InitFirstLevel() {
g.Version = Version
g.Depth++ // start at 1
g.InitPlayer()
g.AutoTarget = invalidPos
g.RaysCache = rayMap{}
g.GeneratedLore = map[int]bool{}
g.Stats.KilledMons = map[monsterKind]int{}
g.Stats.UsedMagaras = map[magaraKind]int{}
g.Stats.Achievements = map[achievement]int{}
g.Stats.Lore = map[int]bool{}
g.Stats.Statuses = map[status]int{}
g.GenPlan = [MaxDepth + 1]genFlavour{
1: GenNothing,
2: GenCloak,
3: GenNothing,
4: GenAmulet,
5: GenNothing,
6: GenCloak,
7: GenNothing,
8: GenAmulet,
9: GenNothing,
10: GenCloak,
11: GenNothing,
}
g.Params.Lore = map[int]bool{}
PutRandomLevels(g.Params.Lore, 8)
g.Params.HealthPotion = map[int]bool{}
PutRandomLevels(g.Params.HealthPotion, 5)
g.Params.MappingStone = map[int]bool{}
PutRandomLevels(g.Params.MappingStone, 3)
g.Params.Blocked = map[int]bool{}
if RandInt(10) > 0 {
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
if RandInt(10) == 0 {
// a second one sometimes!
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
g.Params.Special = []specialRoom{
noSpecialRoom, // unused (depth 0)
noSpecialRoom,
noSpecialRoom,
roomMilfids,
roomCelmists,
roomVampires,
roomHarpies,
roomTreeMushrooms,
roomShaedra,
roomCelmists,
roomMirrorSpecters,
roomArtifact,
}
if RandInt(2) == 0 {
g.Params.Special[5] = roomNixes
}
if RandInt(4) == 0 {
if g.Params.Special[5] == roomNixes {
g.Params.Special[9] = roomVampires
} else {
g.Params.Special[9] = roomNixes
}
}
if RandInt(4) == 0 {
if RandInt(2) == 0 {
g.Params.Special[3] = roomFrogs
} else {
g.Params.Special[7] = roomFrogs
}
}
if RandInt(4) == 0 {
g.Params.Special[10], g.Params.Special[5] = g.Params.Special[5], g.Params.Special[10]
}
if RandInt(4) == 0 {
g.Params.Special[6], g.Params.Special[7] = g.Params.Special[7], g.Params.Special[6]
}
if RandInt(4) == 0 {
g.Params.Special[3], g.Params.Special[4] = g.Params.Special[4], g.Params.Special[3]
}
g.Params.Event = map[int]specialEvent{}
for i := 0; i < 2; i++ {
g.Params.Event[2+5*i+RandInt(5)] = specialEvent(1 + RandInt(spEvMax))
}
g.Params.Event[2+RandInt(MaxDepth-1)] = NormalLevel
g.Params.FakeStair = map[int]bool{}
if RandInt(MaxDepth) > 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) == 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
}
}
}
g.Params.ExtraBanana = map[int]int{}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]++
}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]--
}
g.Params.Windows = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Holes = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Trees = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Tables = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.NoMagara = map[int]bool{}
g.Params.NoMagara[WinDepth] = true
g.Params.Stones = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
}
}
permi := RandInt(WinDepth - 1)
switch permi {
case 0, 1, 2, 3:
g.GenPlan[permi+1], g.GenPlan[permi+2] = g.GenPlan[permi+2], g.GenPlan[permi+1]
}
if RandInt(4) == 0 {
g.GenPlan[6], g.GenPlan[7] = g.GenPlan[7], g.GenPlan[6]
}
if RandInt(4) == 0 {
g.GenPlan[MaxDepth-1], g.GenPlan[MaxDepth] = g.GenPlan[MaxDepth], g.GenPlan[MaxDepth-1]
}
g.Params.CrazyImp = 2 + RandInt(MaxDepth-2)
g.PR = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
g.PRauto = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
}
func (g *game) InitLevelStructures() {
g.MonstersPosCache = make([]int, DungeonNCells)
g.Noise = map[gruid.Point]bool{}
g.TerrainKnowledge = map[gruid.Point]cell{}
g.ExclusionsMap = map[gruid.Point]bool{}
g.MagicalBarriers = map[gruid.Point]cell{}
g.LastMonsterKnownAt = map[gruid.Point]int{}
g.Objects.Magaras = map[gruid.Point]magara{}
g.Objects.Lore = map[gruid.Point]int{}
g.Objects.Items = map[gruid.Point]item{}
g.Objects.Scrolls = map[gruid.Point]scroll{}
g.Objects.Stairs = map[gruid.Point]stair{}
g.Objects.Bananas = make(map[gruid.Point]bool, 2)
g.Objects.Barrels = map[gruid.Point]bool{}
g.Objects.Lights = map[gruid.Point]bool{}
g.Objects.FakeStairs = map[gruid.Point]bool{}
g.Objects.Potions = map[gruid.Point]potion{}
g.NoiseIllusion = map[gruid.Point]bool{}
g.Clouds = map[gruid.Point]cloud{}
g.MonsterLOS = map[gruid.Point]bool{}
g.Stats.AtNotablePos = map[gruid.Point]bool{}
}
var Testing = false
func (g *game) InitLevel() {
if g.rand == nil {
g.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Starting data
if g.Depth == 0 {
g.InitFirstLevel()
}
g.InitLevelStructures()
// Dungeon terrain
g.GenDungeon()
// Events
if g.Depth == 1 {
g.StoryPrintf("Started with %s", g.Player.Magaras[0])
g.Events = rl.NewEventQueue()
//g.PushEvent(&simpleEvent{ERank: 0, EAction: PlayerTurn})
} else {
g.CleanEvents()
for st := range g.Player.Statuses {
if st.Clean() {
g.Player.Statuses[st] = 0
}
}
}
monsters := make([]*monster, len(g.Monsters))
copy(monsters, g.Monsters)
rand.Shuffle(len(monsters), func(i, j int) {
monsters[i], monsters[j] = monsters[j], monsters[i]
})
for _, m := range monsters {
g.PushEvent(&monsterTurnEvent{Index: m.Index}, g.Turn)
}
switch g.Params.Event[g.Depth] {
case UnstableLevel:
g.PrintStyled("Uncontrolled oric magic fills the air on this level.", logSpecial)
g.StoryPrint("Special event: magically unstable level")
for i := 0; i < 7; i++ {
g.PushEvent(&posEvent{Action: ObstructionProgression},
g.Turn+DurationObstructionProgression+RandInt(DurationObstructionProgression/2))
}
case MistLevel:
g.PrintStyled("The air seems dense on this level.", logSpecial)
g.StoryPrint("Special event: mist level")
for i := 0; i < 20; i++ {
g.PushEvent(&posEvent{Action: MistProgression},
g.Turn+DurationMistProgression+RandInt(DurationMistProgression/2))
}
case EarthquakeLevel:
g.PushEvent(&posEvent{P: gruid.Point{DungeonWidth/2 - 15 + RandInt(30), DungeonHeight/2 - 5 + RandInt(10)}, Action: Earthquake},
g.Turn+10+RandInt(50))
}
// initialize LOS
if g.Depth == 1 {
g.PrintStyled("► Press ? for help on keys or use the mouse and [buttons].", logSpecial)
}
if g.Depth == WinDepth {
g.PrintStyled("Finally! Shaedra should be imprisoned somewhere around here.", logSpecial)
} else if g.Depth == MaxDepth {
g.PrintStyled("This the bottom floor, you now have to look for the artifact.", logSpecial)
}
g.ComputeLOS()
g.MakeMonstersAware()
g.ComputeMonsterLOS()
if !Testing { // disable when testing
g.md.updateStatusInfo()
}
}
func (g *game) CleanEvents() {
g.Events.Filter(func(ev rl.Event) bool {
switch ev.(type) {
case *monsterTurnEvent, *posEvent, *monsterStatusEvent, *playerEvent:
return false
default:
// keep player statuses events
return true
}
})
// finish current turn's other effects (like status progression)
turn := g.Turn
for !g.Events.Empty() {
ev, r := g.Events.PopR()
if r == turn {
e, ok := ev.(event)
if ok {
e.Handle(g)
}
continue
}
g.Events.PushFirst(ev, r)
break
}
g.Turn++
}
func (g *game) StairsSlice() []gruid.Point {
stairs := []gruid.Point{}
it := g.Dungeon.Grid.Iterator()
for it.Next() {
c := cell(it.Cell())
if (terrain(c) != StairCell && terrain(c) != FakeStairCell) || !explored(c) {
continue
}
stairs = append(stairs, it.P())
}
return stairs
}
type descendstyle int
const (
DescendNormal descendstyle = iota
DescendJump
DescendFall
)
func (g *game) Descend(style descendstyle) bool {
g.LevelStats()
if g.Stats.DUSpotted[g.Depth] < 3 {
AchStealthNovice.Get(g)
}
if g.Depth >= 3 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 {
AchInsomniaNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 {
AchInsomniaInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 && g.Stats.DRests[g.Depth-4] == 0 && g.Stats.DRests[g.Depth-5] == 0 {
AchInsomniaMaster.Get(g)
}
}
if g.Depth >= 3 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 {
AchAntimagicNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 {
AchAntimagicInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 && g.Stats.DMagaraUses[g.Depth-4] == 0 && g.Stats.DMagaraUses[g.Depth-5] == 0 {
AchAntimagicMaster.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 {
AchStealthInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DUSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 &&
g.Stats.DSpotted[g.Depth-3] < 3 {
AchStealthMaster.Get(g)
}
}
c := g.Dungeon.Cell(g.Player.P)
if terrain(c) == StairCell && g.Objects.Stairs[g.Player.P] == WinStair {
g.StoryPrint("Escaped!")
g.ExploredLevels = g.Depth
g.Depth = -1
return true
}
if style != DescendNormal {
g.md.AbyssFallAnimation()
g.PrintStyled("You fall into the abyss. It hurts!", logDamage)
g.StoryPrint("Fell into the abyss")
} else {
g.Print("You descend deeper in the dungeon.")
g.StoryPrint("Descended stairs")
}
g.Depth++
g.DepthPlayerTurn = 0
g.InitLevel()
g.Save()
return false
}
func (g *game) EnterWizardMode() {
g.Wizard = true
g.PrintStyled("Wizard mode activated: winner status disabled.", logSpecial)
g.StoryPrint("Entered wizard mode.")
}
func (g *game) ApplyRest() {
g.Player.HP = g.Player.HPMax()
g.Player.HPbonus = 0
g.Player.MP = g.Player.MPMax()
g.Stats.Rest++
g.Stats.DRests[g.Depth]++
g.PrintStyled("You feel fresh again after eating banana and sleeping.", logStatusEnd)
g.StoryPrintf("Rested in barrel (bananas: %d)", g.Player.Bananas)
if g.Stats.Rest == 10 {
AchSleepy.Get(g)
}
}
func (g *game) AutoPlayer() bool {
switch {
case g.Resting:
const enoughRestTurns = 25
if g.RestingTurns < enoughRestTurns {
g.RestingTurns++
return true
}
if g.RestingTurns >= enoughRestTurns {
g.ApplyRest()
}
g.Resting = false
case g.Autoexploring:
switch {
case g.AutoHalt:
// stop exploring
default:
var n *gruid.Point
var finished bool
if g.AutoexploreMapRebuild {
if g.AllExplored() {
g.Print("You finished exploring.")
break
}
sources := g.AutoexploreSources()
g.BuildAutoexploreMap(sources)
}
n, finished = g.NextAuto()
if finished {
n = nil
}
if finished && g.AllExplored() {
g.Print("You finished exploring.")
} else if n == nil {
g.Print("You could not safely reach some places.")
}
if n != nil {
again, err := g.PlayerBump(*n)
if err != nil {
g.Print(err.Error())
break
}
return !again
}
}
g.Autoexploring = false
case valid(g.AutoTarget):
if g.MoveToTarget() {
return true
}
g.AutoTarget = invalidPos
case g.AutoDir != ZP:
if g.AutoToDir() {
return true
}
g.AutoDir = ZP
}
return false
}
func (g *game) Died() bool {
| type msgAuto int
func (g *game) EndTurn() {
g.Events.Push(endTurnAction, g.Turn+DurationTurn)
for {
if g.Died() {
return
}
if g.Events.Empty() {
return
}
ev, r := g.Events.PopR()
g.Turn = r
switch ev := ev.(type) {
case endTurnEvent:
return
case event:
ev.Handle(g)
default:
log.Printf("bad event: %v", ev)
}
}
}
func (g *game) checks() {
if !Testing {
return
}
for _, m := range g.Monsters {
mons := g.MonsterAt(m.P)
if !mons.Exists() && m.Exists() {
log.Printf("does not exist")
continue
}
if mons != m {
log.Printf("bad monster: %v vs %v", mons.Index, m.Index)
}
}
}
func (g *game) randInt(n int) int {
if n <= 0 {
return 0
}
return g.rand.Intn(n)
}
| if g.Player.HP <= 0 {
if g.Wizard {
g.Player.HP = g.Player.HPMax()
g.PrintStyled("You died.", logSpecial)
g.StoryPrint("You died (wizard mode)")
} else {
g.LevelStats()
return true
}
}
return false
}
| identifier_body |
game.go | package main
import (
"log"
"math/rand"
"time"
"github.com/anaseto/gruid"
"github.com/anaseto/gruid/paths"
"github.com/anaseto/gruid/rl"
)
var Version string = "v0.5.0"
// game contains the game logic's state, without ui stuff. Everything could be
// in the model struct instead, with only the game logic's fiend exported, as
// some game functions need the model anyway (like animations), but this allows
// to differentiate a bit things that are mainly game-logic from the stuff that
// is more about ui.
type game struct {
Dungeon *dungeon
Player *player
Monsters []*monster
MonstersPosCache []int // monster (dungeon index + 1) / no monster (0)
Bands []bandInfo
Events *rl.EventQueue
EventIndex int
Depth int
ExploredLevels int
DepthPlayerTurn int
Turn int
Highlight map[gruid.Point]bool // highlighted positions (e.g. targeted ray)
Objects objects
Clouds map[gruid.Point]cloud
MagicalBarriers map[gruid.Point]cell
GeneratedLore map[int]bool
GeneratedMagaras []magaraKind
GeneratedCloaks []item
GeneratedAmulets []item
GenPlan [MaxDepth + 1]genFlavour
TerrainKnowledge map[gruid.Point]cell
ExclusionsMap map[gruid.Point]bool
Noise map[gruid.Point]bool
NoiseIllusion map[gruid.Point]bool
LastMonsterKnownAt map[gruid.Point]int
MonsterLOS map[gruid.Point]bool
MonsterTargLOS map[gruid.Point]bool
LightFOV *rl.FOV
RaysCache rayMap
Resting bool
RestingTurns int
Autoexploring bool
AutoexploreMapRebuild bool
AutoTarget gruid.Point
AutoDir gruid.Point
autoDirNeighbors dirNeighbors
autoDirChanged bool
AutoHalt bool
Log []logEntry
LogIndex int
LogNextTick int
InfoEntry string
Stats stats
Wizard bool
WizardMode wizardMode
Version string
Places places
Params startParams
//Opts startOpts
md *model // needed for animations and a few more cases
LiberatedShaedra bool
LiberatedArtifact bool
PlayerAgain bool
mfov *rl.FOV
PR *paths.PathRange
PRauto *paths.PathRange
autosources []gruid.Point // cache
nbs paths.Neighbors
rand *rand.Rand
}
type specialEvent int
const (
NormalLevel specialEvent = iota
UnstableLevel
EarthquakeLevel
MistLevel
)
const spEvMax = int(MistLevel)
type startParams struct {
Lore map[int]bool
Blocked map[int]bool
Special []specialRoom
Event map[int]specialEvent
Windows map[int]bool
Trees map[int]bool
Holes map[int]bool
Stones map[int]bool
Tables map[int]bool
NoMagara map[int]bool
FakeStair map[int]bool
ExtraBanana map[int]int
HealthPotion map[int]bool
MappingStone map[int]bool
CrazyImp int
}
type wizardMode int
const (
WizardNormal wizardMode = iota
WizardMap
WizardSeeAll
)
func (g *game) FreePassableCell() gruid.Point {
d := g.Dungeon
count := 0
for {
count++
if count > maxIterations {
panic("FreePassableCell")
}
x := RandInt(DungeonWidth)
y := RandInt(DungeonHeight)
p := gruid.Point{x, y}
c := d.Cell(p)
if !c.IsPassable() {
continue
}
if g.Player != nil && g.Player.P == p {
continue
}
mons := g.MonsterAt(p)
if mons.Exists() {
continue
}
return p
}
}
const MaxDepth = 11
const WinDepth = 8
const (
DungeonHeight = 21
DungeonWidth = 80
DungeonNCells = DungeonWidth * DungeonHeight
)
func (g *game) GenDungeon() {
ml := AutomataCave
switch g.Depth {
case 2, 6, 7:
ml = RandomWalkCave
if RandInt(3) == 0 {
ml = NaturalCave
}
case 4, 10, 11:
ml = RandomWalkTreeCave
if RandInt(4) == 0 && g.Depth < 11 {
ml = RandomSmallWalkCaveUrbanised
} else if g.Depth == 11 && RandInt(2) == 0 {
ml = RandomSmallWalkCaveUrbanised
}
case 9:
switch RandInt(4) {
case 0:
ml = NaturalCave
case 1:
ml = RandomWalkCave
}
default:
if RandInt(10) == 0 {
ml = RandomSmallWalkCaveUrbanised
} else if RandInt(10) == 0 {
ml = NaturalCave
}
}
g.GenRoomTunnels(ml)
}
func (g *game) InitPlayer() {
g.Player = &player{
HP: DefaultHealth,
MP: DefaultMPmax,
Bananas: 1,
}
g.Player.LOS = map[gruid.Point]bool{}
g.Player.Statuses = map[status]int{}
g.Player.Expire = map[status]int{}
g.Player.Magaras = []magara{
{},
{},
{},
{},
}
g.GeneratedMagaras = []magaraKind{}
g.Player.Magaras[0] = g.RandomStartingMagara()
g.GeneratedMagaras = append(g.GeneratedMagaras, g.Player.Magaras[0].Kind)
g.Player.Inventory.Misc = MarevorMagara
g.Player.FOV = rl.NewFOV(visionRange(g.Player.P, TreeRange))
// Testing
//g.Player.Magaras[1] = magara{Kind: DispersalMagara, Charges: 10}
//g.Player.Magaras[2] = magara{Kind: DelayedOricExplosionMagara, Charges: 10}
//g.Player.Magaras[2] = ConfusionMagara
}
type genFlavour int
const (
GenNothing genFlavour = iota
//GenWeapon
GenAmulet
GenCloak
)
func PutRandomLevels(m map[int]bool, n int) {
for i := 0; i < n; i++ {
j := 1 + RandInt(MaxDepth)
if !m[j] {
m[j] = true
} else {
i--
}
}
}
func (g *game) InitFirstLevel() {
g.Version = Version
g.Depth++ // start at 1
g.InitPlayer()
g.AutoTarget = invalidPos
g.RaysCache = rayMap{}
g.GeneratedLore = map[int]bool{}
g.Stats.KilledMons = map[monsterKind]int{}
g.Stats.UsedMagaras = map[magaraKind]int{}
g.Stats.Achievements = map[achievement]int{}
g.Stats.Lore = map[int]bool{}
g.Stats.Statuses = map[status]int{}
g.GenPlan = [MaxDepth + 1]genFlavour{
1: GenNothing,
2: GenCloak,
3: GenNothing,
4: GenAmulet,
5: GenNothing,
6: GenCloak,
7: GenNothing,
8: GenAmulet,
9: GenNothing,
10: GenCloak,
11: GenNothing,
}
g.Params.Lore = map[int]bool{}
PutRandomLevels(g.Params.Lore, 8)
g.Params.HealthPotion = map[int]bool{}
PutRandomLevels(g.Params.HealthPotion, 5)
g.Params.MappingStone = map[int]bool{}
PutRandomLevels(g.Params.MappingStone, 3)
g.Params.Blocked = map[int]bool{}
if RandInt(10) > 0 {
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
if RandInt(10) == 0 {
// a second one sometimes!
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
g.Params.Special = []specialRoom{
noSpecialRoom, // unused (depth 0)
noSpecialRoom,
noSpecialRoom,
roomMilfids,
roomCelmists,
roomVampires,
roomHarpies,
roomTreeMushrooms,
roomShaedra,
roomCelmists,
roomMirrorSpecters,
roomArtifact,
}
if RandInt(2) == 0 {
g.Params.Special[5] = roomNixes
}
if RandInt(4) == 0 |
if RandInt(4) == 0 {
if RandInt(2) == 0 {
g.Params.Special[3] = roomFrogs
} else {
g.Params.Special[7] = roomFrogs
}
}
if RandInt(4) == 0 {
g.Params.Special[10], g.Params.Special[5] = g.Params.Special[5], g.Params.Special[10]
}
if RandInt(4) == 0 {
g.Params.Special[6], g.Params.Special[7] = g.Params.Special[7], g.Params.Special[6]
}
if RandInt(4) == 0 {
g.Params.Special[3], g.Params.Special[4] = g.Params.Special[4], g.Params.Special[3]
}
g.Params.Event = map[int]specialEvent{}
for i := 0; i < 2; i++ {
g.Params.Event[2+5*i+RandInt(5)] = specialEvent(1 + RandInt(spEvMax))
}
g.Params.Event[2+RandInt(MaxDepth-1)] = NormalLevel
g.Params.FakeStair = map[int]bool{}
if RandInt(MaxDepth) > 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) == 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
}
}
}
g.Params.ExtraBanana = map[int]int{}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]++
}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]--
}
g.Params.Windows = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Holes = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Trees = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Tables = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.NoMagara = map[int]bool{}
g.Params.NoMagara[WinDepth] = true
g.Params.Stones = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
}
}
permi := RandInt(WinDepth - 1)
switch permi {
case 0, 1, 2, 3:
g.GenPlan[permi+1], g.GenPlan[permi+2] = g.GenPlan[permi+2], g.GenPlan[permi+1]
}
if RandInt(4) == 0 {
g.GenPlan[6], g.GenPlan[7] = g.GenPlan[7], g.GenPlan[6]
}
if RandInt(4) == 0 {
g.GenPlan[MaxDepth-1], g.GenPlan[MaxDepth] = g.GenPlan[MaxDepth], g.GenPlan[MaxDepth-1]
}
g.Params.CrazyImp = 2 + RandInt(MaxDepth-2)
g.PR = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
g.PRauto = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
}
func (g *game) InitLevelStructures() {
g.MonstersPosCache = make([]int, DungeonNCells)
g.Noise = map[gruid.Point]bool{}
g.TerrainKnowledge = map[gruid.Point]cell{}
g.ExclusionsMap = map[gruid.Point]bool{}
g.MagicalBarriers = map[gruid.Point]cell{}
g.LastMonsterKnownAt = map[gruid.Point]int{}
g.Objects.Magaras = map[gruid.Point]magara{}
g.Objects.Lore = map[gruid.Point]int{}
g.Objects.Items = map[gruid.Point]item{}
g.Objects.Scrolls = map[gruid.Point]scroll{}
g.Objects.Stairs = map[gruid.Point]stair{}
g.Objects.Bananas = make(map[gruid.Point]bool, 2)
g.Objects.Barrels = map[gruid.Point]bool{}
g.Objects.Lights = map[gruid.Point]bool{}
g.Objects.FakeStairs = map[gruid.Point]bool{}
g.Objects.Potions = map[gruid.Point]potion{}
g.NoiseIllusion = map[gruid.Point]bool{}
g.Clouds = map[gruid.Point]cloud{}
g.MonsterLOS = map[gruid.Point]bool{}
g.Stats.AtNotablePos = map[gruid.Point]bool{}
}
var Testing = false
func (g *game) InitLevel() {
if g.rand == nil {
g.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Starting data
if g.Depth == 0 {
g.InitFirstLevel()
}
g.InitLevelStructures()
// Dungeon terrain
g.GenDungeon()
// Events
if g.Depth == 1 {
g.StoryPrintf("Started with %s", g.Player.Magaras[0])
g.Events = rl.NewEventQueue()
//g.PushEvent(&simpleEvent{ERank: 0, EAction: PlayerTurn})
} else {
g.CleanEvents()
for st := range g.Player.Statuses {
if st.Clean() {
g.Player.Statuses[st] = 0
}
}
}
monsters := make([]*monster, len(g.Monsters))
copy(monsters, g.Monsters)
rand.Shuffle(len(monsters), func(i, j int) {
monsters[i], monsters[j] = monsters[j], monsters[i]
})
for _, m := range monsters {
g.PushEvent(&monsterTurnEvent{Index: m.Index}, g.Turn)
}
switch g.Params.Event[g.Depth] {
case UnstableLevel:
g.PrintStyled("Uncontrolled oric magic fills the air on this level.", logSpecial)
g.StoryPrint("Special event: magically unstable level")
for i := 0; i < 7; i++ {
g.PushEvent(&posEvent{Action: ObstructionProgression},
g.Turn+DurationObstructionProgression+RandInt(DurationObstructionProgression/2))
}
case MistLevel:
g.PrintStyled("The air seems dense on this level.", logSpecial)
g.StoryPrint("Special event: mist level")
for i := 0; i < 20; i++ {
g.PushEvent(&posEvent{Action: MistProgression},
g.Turn+DurationMistProgression+RandInt(DurationMistProgression/2))
}
case EarthquakeLevel:
g.PushEvent(&posEvent{P: gruid.Point{DungeonWidth/2 - 15 + RandInt(30), DungeonHeight/2 - 5 + RandInt(10)}, Action: Earthquake},
g.Turn+10+RandInt(50))
}
// initialize LOS
if g.Depth == 1 {
g.PrintStyled("► Press ? for help on keys or use the mouse and [buttons].", logSpecial)
}
if g.Depth == WinDepth {
g.PrintStyled("Finally! Shaedra should be imprisoned somewhere around here.", logSpecial)
} else if g.Depth == MaxDepth {
g.PrintStyled("This the bottom floor, you now have to look for the artifact.", logSpecial)
}
g.ComputeLOS()
g.MakeMonstersAware()
g.ComputeMonsterLOS()
if !Testing { // disable when testing
g.md.updateStatusInfo()
}
}
func (g *game) CleanEvents() {
g.Events.Filter(func(ev rl.Event) bool {
switch ev.(type) {
case *monsterTurnEvent, *posEvent, *monsterStatusEvent, *playerEvent:
return false
default:
// keep player statuses events
return true
}
})
// finish current turn's other effects (like status progression)
turn := g.Turn
for !g.Events.Empty() {
ev, r := g.Events.PopR()
if r == turn {
e, ok := ev.(event)
if ok {
e.Handle(g)
}
continue
}
g.Events.PushFirst(ev, r)
break
}
g.Turn++
}
func (g *game) StairsSlice() []gruid.Point {
stairs := []gruid.Point{}
it := g.Dungeon.Grid.Iterator()
for it.Next() {
c := cell(it.Cell())
if (terrain(c) != StairCell && terrain(c) != FakeStairCell) || !explored(c) {
continue
}
stairs = append(stairs, it.P())
}
return stairs
}
type descendstyle int
const (
DescendNormal descendstyle = iota
DescendJump
DescendFall
)
func (g *game) Descend(style descendstyle) bool {
g.LevelStats()
if g.Stats.DUSpotted[g.Depth] < 3 {
AchStealthNovice.Get(g)
}
if g.Depth >= 3 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 {
AchInsomniaNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 {
AchInsomniaInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 && g.Stats.DRests[g.Depth-4] == 0 && g.Stats.DRests[g.Depth-5] == 0 {
AchInsomniaMaster.Get(g)
}
}
if g.Depth >= 3 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 {
AchAntimagicNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 {
AchAntimagicInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 && g.Stats.DMagaraUses[g.Depth-4] == 0 && g.Stats.DMagaraUses[g.Depth-5] == 0 {
AchAntimagicMaster.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 {
AchStealthInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DUSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 &&
g.Stats.DSpotted[g.Depth-3] < 3 {
AchStealthMaster.Get(g)
}
}
c := g.Dungeon.Cell(g.Player.P)
if terrain(c) == StairCell && g.Objects.Stairs[g.Player.P] == WinStair {
g.StoryPrint("Escaped!")
g.ExploredLevels = g.Depth
g.Depth = -1
return true
}
if style != DescendNormal {
g.md.AbyssFallAnimation()
g.PrintStyled("You fall into the abyss. It hurts!", logDamage)
g.StoryPrint("Fell into the abyss")
} else {
g.Print("You descend deeper in the dungeon.")
g.StoryPrint("Descended stairs")
}
g.Depth++
g.DepthPlayerTurn = 0
g.InitLevel()
g.Save()
return false
}
func (g *game) EnterWizardMode() {
g.Wizard = true
g.PrintStyled("Wizard mode activated: winner status disabled.", logSpecial)
g.StoryPrint("Entered wizard mode.")
}
func (g *game) ApplyRest() {
g.Player.HP = g.Player.HPMax()
g.Player.HPbonus = 0
g.Player.MP = g.Player.MPMax()
g.Stats.Rest++
g.Stats.DRests[g.Depth]++
g.PrintStyled("You feel fresh again after eating banana and sleeping.", logStatusEnd)
g.StoryPrintf("Rested in barrel (bananas: %d)", g.Player.Bananas)
if g.Stats.Rest == 10 {
AchSleepy.Get(g)
}
}
func (g *game) AutoPlayer() bool {
switch {
case g.Resting:
const enoughRestTurns = 25
if g.RestingTurns < enoughRestTurns {
g.RestingTurns++
return true
}
if g.RestingTurns >= enoughRestTurns {
g.ApplyRest()
}
g.Resting = false
case g.Autoexploring:
switch {
case g.AutoHalt:
// stop exploring
default:
var n *gruid.Point
var finished bool
if g.AutoexploreMapRebuild {
if g.AllExplored() {
g.Print("You finished exploring.")
break
}
sources := g.AutoexploreSources()
g.BuildAutoexploreMap(sources)
}
n, finished = g.NextAuto()
if finished {
n = nil
}
if finished && g.AllExplored() {
g.Print("You finished exploring.")
} else if n == nil {
g.Print("You could not safely reach some places.")
}
if n != nil {
again, err := g.PlayerBump(*n)
if err != nil {
g.Print(err.Error())
break
}
return !again
}
}
g.Autoexploring = false
case valid(g.AutoTarget):
if g.MoveToTarget() {
return true
}
g.AutoTarget = invalidPos
case g.AutoDir != ZP:
if g.AutoToDir() {
return true
}
g.AutoDir = ZP
}
return false
}
func (g *game) Died() bool {
if g.Player.HP <= 0 {
if g.Wizard {
g.Player.HP = g.Player.HPMax()
g.PrintStyled("You died.", logSpecial)
g.StoryPrint("You died (wizard mode)")
} else {
g.LevelStats()
return true
}
}
return false
}
type msgAuto int
func (g *game) EndTurn() {
g.Events.Push(endTurnAction, g.Turn+DurationTurn)
for {
if g.Died() {
return
}
if g.Events.Empty() {
return
}
ev, r := g.Events.PopR()
g.Turn = r
switch ev := ev.(type) {
case endTurnEvent:
return
case event:
ev.Handle(g)
default:
log.Printf("bad event: %v", ev)
}
}
}
func (g *game) checks() {
if !Testing {
return
}
for _, m := range g.Monsters {
mons := g.MonsterAt(m.P)
if !mons.Exists() && m.Exists() {
log.Printf("does not exist")
continue
}
if mons != m {
log.Printf("bad monster: %v vs %v", mons.Index, m.Index)
}
}
}
func (g *game) randInt(n int) int {
if n <= 0 {
return 0
}
return g.rand.Intn(n)
}
| {
if g.Params.Special[5] == roomNixes {
g.Params.Special[9] = roomVampires
} else {
g.Params.Special[9] = roomNixes
}
} | conditional_block |
game.go | package main
import (
"log"
"math/rand"
"time"
"github.com/anaseto/gruid"
"github.com/anaseto/gruid/paths"
"github.com/anaseto/gruid/rl"
)
var Version string = "v0.5.0"
// game contains the game logic's state, without ui stuff. Everything could be
// in the model struct instead, with only the game logic's fiend exported, as
// some game functions need the model anyway (like animations), but this allows
// to differentiate a bit things that are mainly game-logic from the stuff that
// is more about ui.
type game struct {
Dungeon *dungeon
Player *player
Monsters []*monster
MonstersPosCache []int // monster (dungeon index + 1) / no monster (0)
Bands []bandInfo
Events *rl.EventQueue
EventIndex int
Depth int
ExploredLevels int
DepthPlayerTurn int
Turn int
Highlight map[gruid.Point]bool // highlighted positions (e.g. targeted ray)
Objects objects
Clouds map[gruid.Point]cloud
MagicalBarriers map[gruid.Point]cell
GeneratedLore map[int]bool
GeneratedMagaras []magaraKind
GeneratedCloaks []item
GeneratedAmulets []item
GenPlan [MaxDepth + 1]genFlavour
TerrainKnowledge map[gruid.Point]cell
ExclusionsMap map[gruid.Point]bool
Noise map[gruid.Point]bool
NoiseIllusion map[gruid.Point]bool
LastMonsterKnownAt map[gruid.Point]int
MonsterLOS map[gruid.Point]bool
MonsterTargLOS map[gruid.Point]bool
LightFOV *rl.FOV
RaysCache rayMap
Resting bool
RestingTurns int
Autoexploring bool
AutoexploreMapRebuild bool
AutoTarget gruid.Point
AutoDir gruid.Point
autoDirNeighbors dirNeighbors
autoDirChanged bool
AutoHalt bool
Log []logEntry
LogIndex int
LogNextTick int
InfoEntry string
Stats stats
Wizard bool
WizardMode wizardMode
Version string
Places places
Params startParams
//Opts startOpts
md *model // needed for animations and a few more cases
LiberatedShaedra bool
LiberatedArtifact bool
PlayerAgain bool
mfov *rl.FOV
PR *paths.PathRange
PRauto *paths.PathRange
autosources []gruid.Point // cache
nbs paths.Neighbors
rand *rand.Rand
}
type specialEvent int
const (
NormalLevel specialEvent = iota
UnstableLevel
EarthquakeLevel
MistLevel
)
const spEvMax = int(MistLevel)
type startParams struct {
Lore map[int]bool
Blocked map[int]bool
Special []specialRoom
Event map[int]specialEvent
Windows map[int]bool
Trees map[int]bool
Holes map[int]bool
Stones map[int]bool
Tables map[int]bool
NoMagara map[int]bool
FakeStair map[int]bool
ExtraBanana map[int]int
HealthPotion map[int]bool
MappingStone map[int]bool
CrazyImp int
}
type wizardMode int
const (
WizardNormal wizardMode = iota
WizardMap
WizardSeeAll
)
func (g *game) FreePassableCell() gruid.Point {
d := g.Dungeon
count := 0
for {
count++
if count > maxIterations {
panic("FreePassableCell")
}
x := RandInt(DungeonWidth)
y := RandInt(DungeonHeight)
p := gruid.Point{x, y}
c := d.Cell(p)
if !c.IsPassable() {
continue
}
if g.Player != nil && g.Player.P == p {
continue
}
mons := g.MonsterAt(p)
if mons.Exists() {
continue
}
return p
}
}
const MaxDepth = 11
const WinDepth = 8
const (
DungeonHeight = 21
DungeonWidth = 80
DungeonNCells = DungeonWidth * DungeonHeight
)
func (g *game) GenDungeon() {
ml := AutomataCave
switch g.Depth {
case 2, 6, 7:
ml = RandomWalkCave
if RandInt(3) == 0 {
ml = NaturalCave
}
case 4, 10, 11:
ml = RandomWalkTreeCave
if RandInt(4) == 0 && g.Depth < 11 {
ml = RandomSmallWalkCaveUrbanised
} else if g.Depth == 11 && RandInt(2) == 0 {
ml = RandomSmallWalkCaveUrbanised
}
case 9:
switch RandInt(4) {
case 0:
ml = NaturalCave
case 1:
ml = RandomWalkCave
}
default:
if RandInt(10) == 0 {
ml = RandomSmallWalkCaveUrbanised
} else if RandInt(10) == 0 {
ml = NaturalCave
}
}
g.GenRoomTunnels(ml)
}
func (g *game) InitPlayer() {
g.Player = &player{
HP: DefaultHealth,
MP: DefaultMPmax,
Bananas: 1,
}
g.Player.LOS = map[gruid.Point]bool{}
g.Player.Statuses = map[status]int{}
g.Player.Expire = map[status]int{}
g.Player.Magaras = []magara{
{},
{},
{},
{},
}
g.GeneratedMagaras = []magaraKind{}
g.Player.Magaras[0] = g.RandomStartingMagara()
g.GeneratedMagaras = append(g.GeneratedMagaras, g.Player.Magaras[0].Kind)
g.Player.Inventory.Misc = MarevorMagara
g.Player.FOV = rl.NewFOV(visionRange(g.Player.P, TreeRange))
// Testing
//g.Player.Magaras[1] = magara{Kind: DispersalMagara, Charges: 10}
//g.Player.Magaras[2] = magara{Kind: DelayedOricExplosionMagara, Charges: 10}
//g.Player.Magaras[2] = ConfusionMagara
}
type genFlavour int
const (
GenNothing genFlavour = iota
//GenWeapon
GenAmulet
GenCloak
)
func | (m map[int]bool, n int) {
for i := 0; i < n; i++ {
j := 1 + RandInt(MaxDepth)
if !m[j] {
m[j] = true
} else {
i--
}
}
}
func (g *game) InitFirstLevel() {
g.Version = Version
g.Depth++ // start at 1
g.InitPlayer()
g.AutoTarget = invalidPos
g.RaysCache = rayMap{}
g.GeneratedLore = map[int]bool{}
g.Stats.KilledMons = map[monsterKind]int{}
g.Stats.UsedMagaras = map[magaraKind]int{}
g.Stats.Achievements = map[achievement]int{}
g.Stats.Lore = map[int]bool{}
g.Stats.Statuses = map[status]int{}
g.GenPlan = [MaxDepth + 1]genFlavour{
1: GenNothing,
2: GenCloak,
3: GenNothing,
4: GenAmulet,
5: GenNothing,
6: GenCloak,
7: GenNothing,
8: GenAmulet,
9: GenNothing,
10: GenCloak,
11: GenNothing,
}
g.Params.Lore = map[int]bool{}
PutRandomLevels(g.Params.Lore, 8)
g.Params.HealthPotion = map[int]bool{}
PutRandomLevels(g.Params.HealthPotion, 5)
g.Params.MappingStone = map[int]bool{}
PutRandomLevels(g.Params.MappingStone, 3)
g.Params.Blocked = map[int]bool{}
if RandInt(10) > 0 {
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
if RandInt(10) == 0 {
// a second one sometimes!
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
g.Params.Special = []specialRoom{
noSpecialRoom, // unused (depth 0)
noSpecialRoom,
noSpecialRoom,
roomMilfids,
roomCelmists,
roomVampires,
roomHarpies,
roomTreeMushrooms,
roomShaedra,
roomCelmists,
roomMirrorSpecters,
roomArtifact,
}
if RandInt(2) == 0 {
g.Params.Special[5] = roomNixes
}
if RandInt(4) == 0 {
if g.Params.Special[5] == roomNixes {
g.Params.Special[9] = roomVampires
} else {
g.Params.Special[9] = roomNixes
}
}
if RandInt(4) == 0 {
if RandInt(2) == 0 {
g.Params.Special[3] = roomFrogs
} else {
g.Params.Special[7] = roomFrogs
}
}
if RandInt(4) == 0 {
g.Params.Special[10], g.Params.Special[5] = g.Params.Special[5], g.Params.Special[10]
}
if RandInt(4) == 0 {
g.Params.Special[6], g.Params.Special[7] = g.Params.Special[7], g.Params.Special[6]
}
if RandInt(4) == 0 {
g.Params.Special[3], g.Params.Special[4] = g.Params.Special[4], g.Params.Special[3]
}
g.Params.Event = map[int]specialEvent{}
for i := 0; i < 2; i++ {
g.Params.Event[2+5*i+RandInt(5)] = specialEvent(1 + RandInt(spEvMax))
}
g.Params.Event[2+RandInt(MaxDepth-1)] = NormalLevel
g.Params.FakeStair = map[int]bool{}
if RandInt(MaxDepth) > 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) == 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
}
}
}
g.Params.ExtraBanana = map[int]int{}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]++
}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]--
}
g.Params.Windows = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Holes = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Trees = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Tables = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.NoMagara = map[int]bool{}
g.Params.NoMagara[WinDepth] = true
g.Params.Stones = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
}
}
permi := RandInt(WinDepth - 1)
switch permi {
case 0, 1, 2, 3:
g.GenPlan[permi+1], g.GenPlan[permi+2] = g.GenPlan[permi+2], g.GenPlan[permi+1]
}
if RandInt(4) == 0 {
g.GenPlan[6], g.GenPlan[7] = g.GenPlan[7], g.GenPlan[6]
}
if RandInt(4) == 0 {
g.GenPlan[MaxDepth-1], g.GenPlan[MaxDepth] = g.GenPlan[MaxDepth], g.GenPlan[MaxDepth-1]
}
g.Params.CrazyImp = 2 + RandInt(MaxDepth-2)
g.PR = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
g.PRauto = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
}
func (g *game) InitLevelStructures() {
g.MonstersPosCache = make([]int, DungeonNCells)
g.Noise = map[gruid.Point]bool{}
g.TerrainKnowledge = map[gruid.Point]cell{}
g.ExclusionsMap = map[gruid.Point]bool{}
g.MagicalBarriers = map[gruid.Point]cell{}
g.LastMonsterKnownAt = map[gruid.Point]int{}
g.Objects.Magaras = map[gruid.Point]magara{}
g.Objects.Lore = map[gruid.Point]int{}
g.Objects.Items = map[gruid.Point]item{}
g.Objects.Scrolls = map[gruid.Point]scroll{}
g.Objects.Stairs = map[gruid.Point]stair{}
g.Objects.Bananas = make(map[gruid.Point]bool, 2)
g.Objects.Barrels = map[gruid.Point]bool{}
g.Objects.Lights = map[gruid.Point]bool{}
g.Objects.FakeStairs = map[gruid.Point]bool{}
g.Objects.Potions = map[gruid.Point]potion{}
g.NoiseIllusion = map[gruid.Point]bool{}
g.Clouds = map[gruid.Point]cloud{}
g.MonsterLOS = map[gruid.Point]bool{}
g.Stats.AtNotablePos = map[gruid.Point]bool{}
}
var Testing = false
func (g *game) InitLevel() {
if g.rand == nil {
g.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Starting data
if g.Depth == 0 {
g.InitFirstLevel()
}
g.InitLevelStructures()
// Dungeon terrain
g.GenDungeon()
// Events
if g.Depth == 1 {
g.StoryPrintf("Started with %s", g.Player.Magaras[0])
g.Events = rl.NewEventQueue()
//g.PushEvent(&simpleEvent{ERank: 0, EAction: PlayerTurn})
} else {
g.CleanEvents()
for st := range g.Player.Statuses {
if st.Clean() {
g.Player.Statuses[st] = 0
}
}
}
monsters := make([]*monster, len(g.Monsters))
copy(monsters, g.Monsters)
rand.Shuffle(len(monsters), func(i, j int) {
monsters[i], monsters[j] = monsters[j], monsters[i]
})
for _, m := range monsters {
g.PushEvent(&monsterTurnEvent{Index: m.Index}, g.Turn)
}
switch g.Params.Event[g.Depth] {
case UnstableLevel:
g.PrintStyled("Uncontrolled oric magic fills the air on this level.", logSpecial)
g.StoryPrint("Special event: magically unstable level")
for i := 0; i < 7; i++ {
g.PushEvent(&posEvent{Action: ObstructionProgression},
g.Turn+DurationObstructionProgression+RandInt(DurationObstructionProgression/2))
}
case MistLevel:
g.PrintStyled("The air seems dense on this level.", logSpecial)
g.StoryPrint("Special event: mist level")
for i := 0; i < 20; i++ {
g.PushEvent(&posEvent{Action: MistProgression},
g.Turn+DurationMistProgression+RandInt(DurationMistProgression/2))
}
case EarthquakeLevel:
g.PushEvent(&posEvent{P: gruid.Point{DungeonWidth/2 - 15 + RandInt(30), DungeonHeight/2 - 5 + RandInt(10)}, Action: Earthquake},
g.Turn+10+RandInt(50))
}
// initialize LOS
if g.Depth == 1 {
g.PrintStyled("► Press ? for help on keys or use the mouse and [buttons].", logSpecial)
}
if g.Depth == WinDepth {
g.PrintStyled("Finally! Shaedra should be imprisoned somewhere around here.", logSpecial)
} else if g.Depth == MaxDepth {
g.PrintStyled("This the bottom floor, you now have to look for the artifact.", logSpecial)
}
g.ComputeLOS()
g.MakeMonstersAware()
g.ComputeMonsterLOS()
if !Testing { // disable when testing
g.md.updateStatusInfo()
}
}
func (g *game) CleanEvents() {
g.Events.Filter(func(ev rl.Event) bool {
switch ev.(type) {
case *monsterTurnEvent, *posEvent, *monsterStatusEvent, *playerEvent:
return false
default:
// keep player statuses events
return true
}
})
// finish current turn's other effects (like status progression)
turn := g.Turn
for !g.Events.Empty() {
ev, r := g.Events.PopR()
if r == turn {
e, ok := ev.(event)
if ok {
e.Handle(g)
}
continue
}
g.Events.PushFirst(ev, r)
break
}
g.Turn++
}
func (g *game) StairsSlice() []gruid.Point {
stairs := []gruid.Point{}
it := g.Dungeon.Grid.Iterator()
for it.Next() {
c := cell(it.Cell())
if (terrain(c) != StairCell && terrain(c) != FakeStairCell) || !explored(c) {
continue
}
stairs = append(stairs, it.P())
}
return stairs
}
type descendstyle int
const (
DescendNormal descendstyle = iota
DescendJump
DescendFall
)
func (g *game) Descend(style descendstyle) bool {
g.LevelStats()
if g.Stats.DUSpotted[g.Depth] < 3 {
AchStealthNovice.Get(g)
}
if g.Depth >= 3 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 {
AchInsomniaNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 {
AchInsomniaInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 && g.Stats.DRests[g.Depth-4] == 0 && g.Stats.DRests[g.Depth-5] == 0 {
AchInsomniaMaster.Get(g)
}
}
if g.Depth >= 3 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 {
AchAntimagicNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 {
AchAntimagicInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 && g.Stats.DMagaraUses[g.Depth-4] == 0 && g.Stats.DMagaraUses[g.Depth-5] == 0 {
AchAntimagicMaster.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 {
AchStealthInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DUSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 &&
g.Stats.DSpotted[g.Depth-3] < 3 {
AchStealthMaster.Get(g)
}
}
c := g.Dungeon.Cell(g.Player.P)
if terrain(c) == StairCell && g.Objects.Stairs[g.Player.P] == WinStair {
g.StoryPrint("Escaped!")
g.ExploredLevels = g.Depth
g.Depth = -1
return true
}
if style != DescendNormal {
g.md.AbyssFallAnimation()
g.PrintStyled("You fall into the abyss. It hurts!", logDamage)
g.StoryPrint("Fell into the abyss")
} else {
g.Print("You descend deeper in the dungeon.")
g.StoryPrint("Descended stairs")
}
g.Depth++
g.DepthPlayerTurn = 0
g.InitLevel()
g.Save()
return false
}
func (g *game) EnterWizardMode() {
g.Wizard = true
g.PrintStyled("Wizard mode activated: winner status disabled.", logSpecial)
g.StoryPrint("Entered wizard mode.")
}
func (g *game) ApplyRest() {
g.Player.HP = g.Player.HPMax()
g.Player.HPbonus = 0
g.Player.MP = g.Player.MPMax()
g.Stats.Rest++
g.Stats.DRests[g.Depth]++
g.PrintStyled("You feel fresh again after eating banana and sleeping.", logStatusEnd)
g.StoryPrintf("Rested in barrel (bananas: %d)", g.Player.Bananas)
if g.Stats.Rest == 10 {
AchSleepy.Get(g)
}
}
func (g *game) AutoPlayer() bool {
switch {
case g.Resting:
const enoughRestTurns = 25
if g.RestingTurns < enoughRestTurns {
g.RestingTurns++
return true
}
if g.RestingTurns >= enoughRestTurns {
g.ApplyRest()
}
g.Resting = false
case g.Autoexploring:
switch {
case g.AutoHalt:
// stop exploring
default:
var n *gruid.Point
var finished bool
if g.AutoexploreMapRebuild {
if g.AllExplored() {
g.Print("You finished exploring.")
break
}
sources := g.AutoexploreSources()
g.BuildAutoexploreMap(sources)
}
n, finished = g.NextAuto()
if finished {
n = nil
}
if finished && g.AllExplored() {
g.Print("You finished exploring.")
} else if n == nil {
g.Print("You could not safely reach some places.")
}
if n != nil {
again, err := g.PlayerBump(*n)
if err != nil {
g.Print(err.Error())
break
}
return !again
}
}
g.Autoexploring = false
case valid(g.AutoTarget):
if g.MoveToTarget() {
return true
}
g.AutoTarget = invalidPos
case g.AutoDir != ZP:
if g.AutoToDir() {
return true
}
g.AutoDir = ZP
}
return false
}
func (g *game) Died() bool {
if g.Player.HP <= 0 {
if g.Wizard {
g.Player.HP = g.Player.HPMax()
g.PrintStyled("You died.", logSpecial)
g.StoryPrint("You died (wizard mode)")
} else {
g.LevelStats()
return true
}
}
return false
}
type msgAuto int
func (g *game) EndTurn() {
g.Events.Push(endTurnAction, g.Turn+DurationTurn)
for {
if g.Died() {
return
}
if g.Events.Empty() {
return
}
ev, r := g.Events.PopR()
g.Turn = r
switch ev := ev.(type) {
case endTurnEvent:
return
case event:
ev.Handle(g)
default:
log.Printf("bad event: %v", ev)
}
}
}
func (g *game) checks() {
if !Testing {
return
}
for _, m := range g.Monsters {
mons := g.MonsterAt(m.P)
if !mons.Exists() && m.Exists() {
log.Printf("does not exist")
continue
}
if mons != m {
log.Printf("bad monster: %v vs %v", mons.Index, m.Index)
}
}
}
func (g *game) randInt(n int) int {
if n <= 0 {
return 0
}
return g.rand.Intn(n)
}
| PutRandomLevels | identifier_name |
game.go | package main
import (
"log"
"math/rand"
"time"
"github.com/anaseto/gruid"
"github.com/anaseto/gruid/paths"
"github.com/anaseto/gruid/rl"
)
var Version string = "v0.5.0"
// game contains the game logic's state, without ui stuff. Everything could be
// in the model struct instead, with only the game logic's fiend exported, as
// some game functions need the model anyway (like animations), but this allows
// to differentiate a bit things that are mainly game-logic from the stuff that
// is more about ui.
type game struct {
Dungeon *dungeon
Player *player
Monsters []*monster
MonstersPosCache []int // monster (dungeon index + 1) / no monster (0)
Bands []bandInfo
Events *rl.EventQueue
EventIndex int
Depth int
ExploredLevels int
DepthPlayerTurn int
Turn int
Highlight map[gruid.Point]bool // highlighted positions (e.g. targeted ray)
Objects objects
Clouds map[gruid.Point]cloud
MagicalBarriers map[gruid.Point]cell
GeneratedLore map[int]bool
GeneratedMagaras []magaraKind
GeneratedCloaks []item
GeneratedAmulets []item
GenPlan [MaxDepth + 1]genFlavour
TerrainKnowledge map[gruid.Point]cell
ExclusionsMap map[gruid.Point]bool
Noise map[gruid.Point]bool
NoiseIllusion map[gruid.Point]bool
LastMonsterKnownAt map[gruid.Point]int
MonsterLOS map[gruid.Point]bool
MonsterTargLOS map[gruid.Point]bool
LightFOV *rl.FOV
RaysCache rayMap
Resting bool
RestingTurns int
Autoexploring bool
AutoexploreMapRebuild bool
AutoTarget gruid.Point
AutoDir gruid.Point
autoDirNeighbors dirNeighbors
autoDirChanged bool
AutoHalt bool
Log []logEntry
LogIndex int
LogNextTick int
InfoEntry string
Stats stats
Wizard bool
WizardMode wizardMode
Version string
Places places
Params startParams
//Opts startOpts
md *model // needed for animations and a few more cases
LiberatedShaedra bool
LiberatedArtifact bool
PlayerAgain bool
mfov *rl.FOV
PR *paths.PathRange
PRauto *paths.PathRange
autosources []gruid.Point // cache
nbs paths.Neighbors
rand *rand.Rand
}
type specialEvent int
const (
NormalLevel specialEvent = iota
UnstableLevel
EarthquakeLevel
MistLevel
)
const spEvMax = int(MistLevel)
type startParams struct {
Lore map[int]bool
Blocked map[int]bool
Special []specialRoom
Event map[int]specialEvent
Windows map[int]bool
Trees map[int]bool
Holes map[int]bool
Stones map[int]bool
Tables map[int]bool
NoMagara map[int]bool
FakeStair map[int]bool
ExtraBanana map[int]int
HealthPotion map[int]bool
MappingStone map[int]bool
CrazyImp int
}
type wizardMode int
const (
WizardNormal wizardMode = iota
WizardMap
WizardSeeAll
)
func (g *game) FreePassableCell() gruid.Point {
d := g.Dungeon
count := 0
for {
count++
if count > maxIterations {
panic("FreePassableCell")
}
x := RandInt(DungeonWidth)
y := RandInt(DungeonHeight)
p := gruid.Point{x, y}
c := d.Cell(p)
if !c.IsPassable() {
continue
}
if g.Player != nil && g.Player.P == p {
continue
}
mons := g.MonsterAt(p)
if mons.Exists() {
continue
}
return p
}
}
const MaxDepth = 11
const WinDepth = 8
const (
DungeonHeight = 21
DungeonWidth = 80
DungeonNCells = DungeonWidth * DungeonHeight
)
func (g *game) GenDungeon() {
ml := AutomataCave
switch g.Depth {
case 2, 6, 7:
ml = RandomWalkCave
if RandInt(3) == 0 {
ml = NaturalCave
}
case 4, 10, 11:
ml = RandomWalkTreeCave
if RandInt(4) == 0 && g.Depth < 11 {
ml = RandomSmallWalkCaveUrbanised
} else if g.Depth == 11 && RandInt(2) == 0 {
ml = RandomSmallWalkCaveUrbanised
}
case 9:
switch RandInt(4) {
case 0:
ml = NaturalCave
case 1:
ml = RandomWalkCave
}
default:
if RandInt(10) == 0 {
ml = RandomSmallWalkCaveUrbanised
} else if RandInt(10) == 0 {
ml = NaturalCave
}
}
g.GenRoomTunnels(ml)
}
func (g *game) InitPlayer() {
g.Player = &player{
HP: DefaultHealth,
MP: DefaultMPmax,
Bananas: 1,
}
g.Player.LOS = map[gruid.Point]bool{}
g.Player.Statuses = map[status]int{}
g.Player.Expire = map[status]int{}
g.Player.Magaras = []magara{
{},
{},
{},
{},
}
g.GeneratedMagaras = []magaraKind{}
g.Player.Magaras[0] = g.RandomStartingMagara()
g.GeneratedMagaras = append(g.GeneratedMagaras, g.Player.Magaras[0].Kind)
g.Player.Inventory.Misc = MarevorMagara
g.Player.FOV = rl.NewFOV(visionRange(g.Player.P, TreeRange))
// Testing
//g.Player.Magaras[1] = magara{Kind: DispersalMagara, Charges: 10}
//g.Player.Magaras[2] = magara{Kind: DelayedOricExplosionMagara, Charges: 10}
//g.Player.Magaras[2] = ConfusionMagara
}
type genFlavour int
const (
GenNothing genFlavour = iota
//GenWeapon
GenAmulet
GenCloak
)
func PutRandomLevels(m map[int]bool, n int) {
for i := 0; i < n; i++ {
j := 1 + RandInt(MaxDepth)
if !m[j] {
m[j] = true
} else {
i--
}
}
}
func (g *game) InitFirstLevel() {
g.Version = Version
g.Depth++ // start at 1
g.InitPlayer()
g.AutoTarget = invalidPos
g.RaysCache = rayMap{}
g.GeneratedLore = map[int]bool{}
g.Stats.KilledMons = map[monsterKind]int{}
g.Stats.UsedMagaras = map[magaraKind]int{}
g.Stats.Achievements = map[achievement]int{}
g.Stats.Lore = map[int]bool{}
g.Stats.Statuses = map[status]int{}
g.GenPlan = [MaxDepth + 1]genFlavour{
1: GenNothing,
2: GenCloak,
3: GenNothing,
4: GenAmulet,
5: GenNothing,
6: GenCloak,
7: GenNothing,
8: GenAmulet,
9: GenNothing,
10: GenCloak,
11: GenNothing,
}
g.Params.Lore = map[int]bool{}
PutRandomLevels(g.Params.Lore, 8)
g.Params.HealthPotion = map[int]bool{}
PutRandomLevels(g.Params.HealthPotion, 5)
g.Params.MappingStone = map[int]bool{}
PutRandomLevels(g.Params.MappingStone, 3)
g.Params.Blocked = map[int]bool{}
if RandInt(10) > 0 {
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
if RandInt(10) == 0 {
// a second one sometimes!
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
g.Params.Special = []specialRoom{
noSpecialRoom, // unused (depth 0)
noSpecialRoom,
noSpecialRoom,
roomMilfids,
roomCelmists,
roomVampires,
roomHarpies,
roomTreeMushrooms,
roomShaedra,
roomCelmists,
roomMirrorSpecters,
roomArtifact,
}
if RandInt(2) == 0 {
g.Params.Special[5] = roomNixes
}
if RandInt(4) == 0 {
if g.Params.Special[5] == roomNixes {
g.Params.Special[9] = roomVampires
} else {
g.Params.Special[9] = roomNixes
}
}
if RandInt(4) == 0 {
if RandInt(2) == 0 {
g.Params.Special[3] = roomFrogs
} else {
g.Params.Special[7] = roomFrogs
}
}
if RandInt(4) == 0 {
g.Params.Special[10], g.Params.Special[5] = g.Params.Special[5], g.Params.Special[10]
}
if RandInt(4) == 0 {
g.Params.Special[6], g.Params.Special[7] = g.Params.Special[7], g.Params.Special[6]
}
if RandInt(4) == 0 {
g.Params.Special[3], g.Params.Special[4] = g.Params.Special[4], g.Params.Special[3]
}
g.Params.Event = map[int]specialEvent{}
for i := 0; i < 2; i++ {
g.Params.Event[2+5*i+RandInt(5)] = specialEvent(1 + RandInt(spEvMax))
}
g.Params.Event[2+RandInt(MaxDepth-1)] = NormalLevel
g.Params.FakeStair = map[int]bool{}
if RandInt(MaxDepth) > 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) == 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
}
}
}
g.Params.ExtraBanana = map[int]int{}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]++
}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]--
}
g.Params.Windows = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Holes = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Trees = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Tables = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.NoMagara = map[int]bool{}
g.Params.NoMagara[WinDepth] = true
g.Params.Stones = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
}
}
permi := RandInt(WinDepth - 1)
switch permi {
case 0, 1, 2, 3:
g.GenPlan[permi+1], g.GenPlan[permi+2] = g.GenPlan[permi+2], g.GenPlan[permi+1]
}
if RandInt(4) == 0 { | }
if RandInt(4) == 0 {
g.GenPlan[MaxDepth-1], g.GenPlan[MaxDepth] = g.GenPlan[MaxDepth], g.GenPlan[MaxDepth-1]
}
g.Params.CrazyImp = 2 + RandInt(MaxDepth-2)
g.PR = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
g.PRauto = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
}
func (g *game) InitLevelStructures() {
g.MonstersPosCache = make([]int, DungeonNCells)
g.Noise = map[gruid.Point]bool{}
g.TerrainKnowledge = map[gruid.Point]cell{}
g.ExclusionsMap = map[gruid.Point]bool{}
g.MagicalBarriers = map[gruid.Point]cell{}
g.LastMonsterKnownAt = map[gruid.Point]int{}
g.Objects.Magaras = map[gruid.Point]magara{}
g.Objects.Lore = map[gruid.Point]int{}
g.Objects.Items = map[gruid.Point]item{}
g.Objects.Scrolls = map[gruid.Point]scroll{}
g.Objects.Stairs = map[gruid.Point]stair{}
g.Objects.Bananas = make(map[gruid.Point]bool, 2)
g.Objects.Barrels = map[gruid.Point]bool{}
g.Objects.Lights = map[gruid.Point]bool{}
g.Objects.FakeStairs = map[gruid.Point]bool{}
g.Objects.Potions = map[gruid.Point]potion{}
g.NoiseIllusion = map[gruid.Point]bool{}
g.Clouds = map[gruid.Point]cloud{}
g.MonsterLOS = map[gruid.Point]bool{}
g.Stats.AtNotablePos = map[gruid.Point]bool{}
}
var Testing = false
func (g *game) InitLevel() {
if g.rand == nil {
g.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Starting data
if g.Depth == 0 {
g.InitFirstLevel()
}
g.InitLevelStructures()
// Dungeon terrain
g.GenDungeon()
// Events
if g.Depth == 1 {
g.StoryPrintf("Started with %s", g.Player.Magaras[0])
g.Events = rl.NewEventQueue()
//g.PushEvent(&simpleEvent{ERank: 0, EAction: PlayerTurn})
} else {
g.CleanEvents()
for st := range g.Player.Statuses {
if st.Clean() {
g.Player.Statuses[st] = 0
}
}
}
monsters := make([]*monster, len(g.Monsters))
copy(monsters, g.Monsters)
rand.Shuffle(len(monsters), func(i, j int) {
monsters[i], monsters[j] = monsters[j], monsters[i]
})
for _, m := range monsters {
g.PushEvent(&monsterTurnEvent{Index: m.Index}, g.Turn)
}
switch g.Params.Event[g.Depth] {
case UnstableLevel:
g.PrintStyled("Uncontrolled oric magic fills the air on this level.", logSpecial)
g.StoryPrint("Special event: magically unstable level")
for i := 0; i < 7; i++ {
g.PushEvent(&posEvent{Action: ObstructionProgression},
g.Turn+DurationObstructionProgression+RandInt(DurationObstructionProgression/2))
}
case MistLevel:
g.PrintStyled("The air seems dense on this level.", logSpecial)
g.StoryPrint("Special event: mist level")
for i := 0; i < 20; i++ {
g.PushEvent(&posEvent{Action: MistProgression},
g.Turn+DurationMistProgression+RandInt(DurationMistProgression/2))
}
case EarthquakeLevel:
g.PushEvent(&posEvent{P: gruid.Point{DungeonWidth/2 - 15 + RandInt(30), DungeonHeight/2 - 5 + RandInt(10)}, Action: Earthquake},
g.Turn+10+RandInt(50))
}
// initialize LOS
if g.Depth == 1 {
g.PrintStyled("► Press ? for help on keys or use the mouse and [buttons].", logSpecial)
}
if g.Depth == WinDepth {
g.PrintStyled("Finally! Shaedra should be imprisoned somewhere around here.", logSpecial)
} else if g.Depth == MaxDepth {
g.PrintStyled("This the bottom floor, you now have to look for the artifact.", logSpecial)
}
g.ComputeLOS()
g.MakeMonstersAware()
g.ComputeMonsterLOS()
if !Testing { // disable when testing
g.md.updateStatusInfo()
}
}
func (g *game) CleanEvents() {
g.Events.Filter(func(ev rl.Event) bool {
switch ev.(type) {
case *monsterTurnEvent, *posEvent, *monsterStatusEvent, *playerEvent:
return false
default:
// keep player statuses events
return true
}
})
// finish current turn's other effects (like status progression)
turn := g.Turn
for !g.Events.Empty() {
ev, r := g.Events.PopR()
if r == turn {
e, ok := ev.(event)
if ok {
e.Handle(g)
}
continue
}
g.Events.PushFirst(ev, r)
break
}
g.Turn++
}
func (g *game) StairsSlice() []gruid.Point {
stairs := []gruid.Point{}
it := g.Dungeon.Grid.Iterator()
for it.Next() {
c := cell(it.Cell())
if (terrain(c) != StairCell && terrain(c) != FakeStairCell) || !explored(c) {
continue
}
stairs = append(stairs, it.P())
}
return stairs
}
type descendstyle int
const (
DescendNormal descendstyle = iota
DescendJump
DescendFall
)
func (g *game) Descend(style descendstyle) bool {
g.LevelStats()
if g.Stats.DUSpotted[g.Depth] < 3 {
AchStealthNovice.Get(g)
}
if g.Depth >= 3 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 {
AchInsomniaNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 {
AchInsomniaInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 && g.Stats.DRests[g.Depth-4] == 0 && g.Stats.DRests[g.Depth-5] == 0 {
AchInsomniaMaster.Get(g)
}
}
if g.Depth >= 3 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 {
AchAntimagicNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 {
AchAntimagicInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 && g.Stats.DMagaraUses[g.Depth-4] == 0 && g.Stats.DMagaraUses[g.Depth-5] == 0 {
AchAntimagicMaster.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 {
AchStealthInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DUSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 &&
g.Stats.DSpotted[g.Depth-3] < 3 {
AchStealthMaster.Get(g)
}
}
c := g.Dungeon.Cell(g.Player.P)
if terrain(c) == StairCell && g.Objects.Stairs[g.Player.P] == WinStair {
g.StoryPrint("Escaped!")
g.ExploredLevels = g.Depth
g.Depth = -1
return true
}
if style != DescendNormal {
g.md.AbyssFallAnimation()
g.PrintStyled("You fall into the abyss. It hurts!", logDamage)
g.StoryPrint("Fell into the abyss")
} else {
g.Print("You descend deeper in the dungeon.")
g.StoryPrint("Descended stairs")
}
g.Depth++
g.DepthPlayerTurn = 0
g.InitLevel()
g.Save()
return false
}
func (g *game) EnterWizardMode() {
g.Wizard = true
g.PrintStyled("Wizard mode activated: winner status disabled.", logSpecial)
g.StoryPrint("Entered wizard mode.")
}
func (g *game) ApplyRest() {
g.Player.HP = g.Player.HPMax()
g.Player.HPbonus = 0
g.Player.MP = g.Player.MPMax()
g.Stats.Rest++
g.Stats.DRests[g.Depth]++
g.PrintStyled("You feel fresh again after eating banana and sleeping.", logStatusEnd)
g.StoryPrintf("Rested in barrel (bananas: %d)", g.Player.Bananas)
if g.Stats.Rest == 10 {
AchSleepy.Get(g)
}
}
func (g *game) AutoPlayer() bool {
switch {
case g.Resting:
const enoughRestTurns = 25
if g.RestingTurns < enoughRestTurns {
g.RestingTurns++
return true
}
if g.RestingTurns >= enoughRestTurns {
g.ApplyRest()
}
g.Resting = false
case g.Autoexploring:
switch {
case g.AutoHalt:
// stop exploring
default:
var n *gruid.Point
var finished bool
if g.AutoexploreMapRebuild {
if g.AllExplored() {
g.Print("You finished exploring.")
break
}
sources := g.AutoexploreSources()
g.BuildAutoexploreMap(sources)
}
n, finished = g.NextAuto()
if finished {
n = nil
}
if finished && g.AllExplored() {
g.Print("You finished exploring.")
} else if n == nil {
g.Print("You could not safely reach some places.")
}
if n != nil {
again, err := g.PlayerBump(*n)
if err != nil {
g.Print(err.Error())
break
}
return !again
}
}
g.Autoexploring = false
case valid(g.AutoTarget):
if g.MoveToTarget() {
return true
}
g.AutoTarget = invalidPos
case g.AutoDir != ZP:
if g.AutoToDir() {
return true
}
g.AutoDir = ZP
}
return false
}
func (g *game) Died() bool {
if g.Player.HP <= 0 {
if g.Wizard {
g.Player.HP = g.Player.HPMax()
g.PrintStyled("You died.", logSpecial)
g.StoryPrint("You died (wizard mode)")
} else {
g.LevelStats()
return true
}
}
return false
}
type msgAuto int
func (g *game) EndTurn() {
g.Events.Push(endTurnAction, g.Turn+DurationTurn)
for {
if g.Died() {
return
}
if g.Events.Empty() {
return
}
ev, r := g.Events.PopR()
g.Turn = r
switch ev := ev.(type) {
case endTurnEvent:
return
case event:
ev.Handle(g)
default:
log.Printf("bad event: %v", ev)
}
}
}
func (g *game) checks() {
if !Testing {
return
}
for _, m := range g.Monsters {
mons := g.MonsterAt(m.P)
if !mons.Exists() && m.Exists() {
log.Printf("does not exist")
continue
}
if mons != m {
log.Printf("bad monster: %v vs %v", mons.Index, m.Index)
}
}
}
func (g *game) randInt(n int) int {
if n <= 0 {
return 0
}
return g.rand.Intn(n)
} | g.GenPlan[6], g.GenPlan[7] = g.GenPlan[7], g.GenPlan[6] | random_line_split |
gogl.go | package goglbackend
import (
"fmt"
"github.com/tfriedel6/canvas/backend/backendbase"
"github.com/tfriedel6/canvas/backend/goglbackend/gl"
)
const alphaTexSize = 2048
var zeroes [alphaTexSize]byte
// GLContext is a context that contains all the
// shaders and buffers necessary for rendering
type GLContext struct {
buf uint32
shadowBuf uint32
alphaTex uint32
shd unifiedShader
offscr1 offscreenBuffer
offscr2 offscreenBuffer
imageBufTex uint32
imageBuf []byte
ptsBuf []float32
}
// NewGLContext creates all the necessary GL resources,
// like shaders and buffers
func NewGLContext() (*GLContext, error) {
ctx := &GLContext{
ptsBuf: make([]float32, 0, 4096),
}
err := gl.Init()
if err != nil {
return nil, err
}
gl.GetError() // clear error state
err = loadShader(unifiedVS, unifiedFS, &ctx.shd.shaderProgram)
if err != nil {
return nil, err
}
ctx.shd.shaderProgram.mustLoadLocations(&ctx.shd)
if err = glError(); err != nil {
return nil, err
}
gl.GenBuffers(1, &ctx.buf)
if err = glError(); err != nil {
return nil, err
}
gl.GenBuffers(1, &ctx.shadowBuf)
if err = glError(); err != nil {
return nil, err
}
gl.ActiveTexture(gl.TEXTURE0)
gl.GenTextures(1, &ctx.alphaTex)
gl.BindTexture(gl.TEXTURE_2D, ctx.alphaTex)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.ALPHA, alphaTexSize, alphaTexSize, 0, gl.ALPHA, gl.UNSIGNED_BYTE, nil)
// todo should use gl.RED on OpenGL, gl.ALPHA on OpenGL ES
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.Enable(gl.STENCIL_TEST)
gl.StencilMask(0xFF)
gl.Clear(gl.STENCIL_BUFFER_BIT)
gl.StencilOp(gl.KEEP, gl.KEEP, gl.KEEP)
gl.StencilFunc(gl.EQUAL, 0, 0xFF)
gl.Disable(gl.SCISSOR_TEST)
return ctx, nil
}
// GoGLBackend is a canvas backend using Go-GL
type GoGLBackend struct {
x, y, w, h int
fx, fy, fw, fh float64
*GLContext
activateFn func()
disableTextureRenderTarget func()
}
type offscreenBuffer struct {
tex uint32
w int
h int
renderStencilBuf uint32
frameBuf uint32
alpha bool
}
// New returns a new canvas backend. x, y, w, h define the target
// rectangle in the window. ctx is a GLContext created with
// NewGLContext, but can be nil for a default one. It makes sense
// to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func New(x, y, w, h int, ctx *GLContext) (*GoGLBackend, error) {
if ctx == nil {
var err error
ctx, err = NewGLContext()
if err != nil {
return nil, err
}
}
b := &GoGLBackend{
w: w,
h: h,
fw: float64(w),
fh: float64(h),
GLContext: ctx,
}
b.activateFn = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
// todo reapply clipping since another application may have used the stencil buffer
}
b.disableTextureRenderTarget = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
}
return b, nil
}
// GoGLBackendOffscreen is a canvas backend using an offscreen
// texture
type GoGLBackendOffscreen struct {
GoGLBackend
TextureID uint32
offscrBuf offscreenBuffer
offscrImg Image
}
// NewOffscreen returns a new offscreen canvas backend. w, h define
// the size of the offscreen texture. ctx is a GLContext created
// with NewGLContext, but can be nil for a default one. It makes
// sense to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func NewOffscreen(w, h int, alpha bool, ctx *GLContext) (*GoGLBackendOffscreen, error) |
// SetBounds updates the bounds of the canvas. This would
// usually be called for example when the window is resized
func (b *GoGLBackend) SetBounds(x, y, w, h int) {
b.x, b.y = x, y
b.fx, b.fy = float64(x), float64(y)
b.w, b.h = w, h
b.fw, b.fh = float64(w), float64(h)
if b == activeContext {
gl.Viewport(0, 0, int32(b.w), int32(b.h))
gl.Clear(gl.STENCIL_BUFFER_BIT)
}
}
// SetSize updates the size of the offscreen texture
func (b *GoGLBackendOffscreen) SetSize(w, h int) {
b.GoGLBackend.SetBounds(0, 0, w, h)
b.offscrImg.w = b.offscrBuf.w
b.offscrImg.h = b.offscrBuf.h
}
// Size returns the size of the window or offscreen
// texture
func (b *GoGLBackend) Size() (int, int) {
return b.w, b.h
}
func glError() error {
glErr := gl.GetError()
if glErr != gl.NO_ERROR {
return fmt.Errorf("GL Error: %x", glErr)
}
return nil
}
// Activate only needs to be called if there is other
// code also using the GL state
func (b *GoGLBackend) Activate() {
b.activate()
}
var activeContext *GoGLBackend
func (b *GoGLBackend) activate() {
if activeContext != b {
activeContext = b
b.activateFn()
}
}
// Delete deletes the offscreen texture. After calling this
// the backend can no longer be used
func (b *GoGLBackendOffscreen) Delete() {
gl.DeleteTextures(1, &b.offscrBuf.tex)
gl.DeleteFramebuffers(1, &b.offscrBuf.frameBuf)
gl.DeleteRenderbuffers(1, &b.offscrBuf.renderStencilBuf)
}
// CanUseAsImage returns true if the given backend can be
// directly used by this backend to avoid a conversion.
// Used internally
func (b *GoGLBackend) CanUseAsImage(b2 backendbase.Backend) bool {
_, ok := b2.(*GoGLBackendOffscreen)
return ok
}
// AsImage returns nil, since this backend cannot be directly
// used as an image. Used internally
func (b *GoGLBackend) AsImage() backendbase.Image {
return nil
}
// AsImage returns an implementation of the Image interface
// that can be used to render this offscreen texture
// directly. Used internally
func (b *GoGLBackendOffscreen) AsImage() backendbase.Image {
return &b.offscrImg
}
func (b *GoGLBackend) useShader(style *backendbase.FillStyle, tf [9]float32, useAlpha bool, alphaTexSlot int32) (vertexLoc, alphaTexCoordLoc uint32) {
gl.UseProgram(b.shd.ID)
gl.Uniform2f(b.shd.CanvasSize, float32(b.fw), float32(b.fh))
gl.UniformMatrix3fv(b.shd.Matrix, 1, false, &tf[0])
if useAlpha {
gl.Uniform1i(b.shd.UseAlphaTex, 1)
gl.Uniform1i(b.shd.AlphaTex, alphaTexSlot)
} else {
gl.Uniform1i(b.shd.UseAlphaTex, 0)
}
gl.Uniform1f(b.shd.GlobalAlpha, float32(style.Color.A)/255)
if lg := style.LinearGradient; lg != nil {
lg := lg.(*LinearGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, lg.tex)
from := backendbase.Vec{style.Gradient.X0, style.Gradient.Y0}
to := backendbase.Vec{style.Gradient.X1, style.Gradient.Y1}
dir := to.Sub(from)
length := dir.Len()
dir = dir.Mulf(1 / length)
gl.Uniform2f(b.shd.From, float32(from[0]), float32(from[1]))
gl.Uniform2f(b.shd.Dir, float32(dir[0]), float32(dir[1]))
gl.Uniform1f(b.shd.Len, float32(length))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncLinearGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if rg := style.RadialGradient; rg != nil {
rg := rg.(*RadialGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, rg.tex)
gl.Uniform2f(b.shd.From, float32(style.Gradient.X0), float32(style.Gradient.Y0))
gl.Uniform2f(b.shd.To, float32(style.Gradient.X1), float32(style.Gradient.Y1))
gl.Uniform1f(b.shd.RadFrom, float32(style.Gradient.RadFrom))
gl.Uniform1f(b.shd.RadTo, float32(style.Gradient.RadTo))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncRadialGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if ip := style.ImagePattern; ip != nil {
ipd := ip.(*ImagePattern).data
img := ipd.Image.(*Image)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, img.tex)
gl.Uniform2f(b.shd.ImageSize, float32(img.w), float32(img.h))
gl.Uniform1i(b.shd.Image, 0)
var f32mat [9]float32
for i, v := range ipd.Transform {
f32mat[i] = float32(v)
}
gl.UniformMatrix3fv(b.shd.ImageTransform, 1, false, &f32mat[0])
switch ipd.Repeat {
case backendbase.Repeat:
gl.Uniform2f(b.shd.Repeat, 1, 1)
case backendbase.RepeatX:
gl.Uniform2f(b.shd.Repeat, 1, 0)
case backendbase.RepeatY:
gl.Uniform2f(b.shd.Repeat, 0, 1)
case backendbase.NoRepeat:
gl.Uniform2f(b.shd.Repeat, 0, 0)
}
gl.Uniform1i(b.shd.Func, shdFuncImagePattern)
return b.shd.Vertex, b.shd.TexCoord
}
cr := float32(style.Color.R) / 255
cg := float32(style.Color.G) / 255
cb := float32(style.Color.B) / 255
ca := float32(style.Color.A) / 255
gl.Uniform4f(b.shd.Color, cr, cg, cb, ca)
gl.Uniform1f(b.shd.GlobalAlpha, 1)
gl.Uniform1i(b.shd.Func, shdFuncSolid)
return b.shd.Vertex, b.shd.TexCoord
}
func (b *GoGLBackend) enableTextureRenderTarget(offscr *offscreenBuffer) {
if offscr.w == b.w && offscr.h == b.h {
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
return
}
if b.w == 0 || b.h == 0 {
return
}
if offscr.w != 0 && offscr.h != 0 {
gl.DeleteTextures(1, &offscr.tex)
gl.DeleteFramebuffers(1, &offscr.frameBuf)
gl.DeleteRenderbuffers(1, &offscr.renderStencilBuf)
}
offscr.w = b.w
offscr.h = b.h
gl.ActiveTexture(gl.TEXTURE0)
gl.GenTextures(1, &offscr.tex)
gl.BindTexture(gl.TEXTURE_2D, offscr.tex)
// todo do non-power-of-two textures work everywhere?
if offscr.alpha {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, int32(b.w), int32(b.h), 0, gl.RGBA, gl.UNSIGNED_BYTE, nil)
} else {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, int32(b.w), int32(b.h), 0, gl.RGB, gl.UNSIGNED_BYTE, nil)
}
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.GenFramebuffers(1, &offscr.frameBuf)
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
gl.GenRenderbuffers(1, &offscr.renderStencilBuf)
gl.BindRenderbuffer(gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.RenderbufferStorage(gl.RENDERBUFFER, gl.STENCIL_INDEX8, int32(b.w), int32(b.h))
gl.FramebufferRenderbuffer(gl.FRAMEBUFFER, gl.STENCIL_ATTACHMENT, gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, offscr.tex, 0)
if err := gl.CheckFramebufferStatus(gl.FRAMEBUFFER); err != gl.FRAMEBUFFER_COMPLETE {
// todo this should maybe not panic
panic(fmt.Sprintf("Failed to set up framebuffer for offscreen texture: %x", err))
}
gl.Clear(gl.COLOR_BUFFER_BIT | gl.STENCIL_BUFFER_BIT)
}
func mat3(m backendbase.Mat) (m3 [9]float32) {
m3[0] = float32(m[0])
m3[1] = float32(m[1])
m3[2] = 0
m3[3] = float32(m[2])
m3[4] = float32(m[3])
m3[5] = 0
m3[6] = float32(m[4])
m3[7] = float32(m[5])
m3[8] = 1
return
}
var mat3identity = [9]float32{1, 0, 0, 0, 1, 0, 0, 0, 1}
| {
b, err := New(0, 0, w, h, ctx)
if err != nil {
return nil, err
}
bo := &GoGLBackendOffscreen{GoGLBackend: *b}
bo.offscrBuf.alpha = alpha
bo.offscrImg.flip = true
bo.activateFn = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
gl.Viewport(0, 0, int32(bo.w), int32(bo.h))
bo.offscrImg.w = bo.offscrBuf.w
bo.offscrImg.h = bo.offscrBuf.h
bo.offscrImg.tex = bo.offscrBuf.tex
bo.TextureID = bo.offscrBuf.tex
}
bo.disableTextureRenderTarget = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
}
return bo, nil
} | identifier_body |
gogl.go | package goglbackend
import (
"fmt"
"github.com/tfriedel6/canvas/backend/backendbase"
"github.com/tfriedel6/canvas/backend/goglbackend/gl"
)
const alphaTexSize = 2048
var zeroes [alphaTexSize]byte
// GLContext is a context that contains all the
// shaders and buffers necessary for rendering
type GLContext struct {
buf uint32
shadowBuf uint32
alphaTex uint32
shd unifiedShader
offscr1 offscreenBuffer
offscr2 offscreenBuffer
imageBufTex uint32
imageBuf []byte
ptsBuf []float32
}
// NewGLContext creates all the necessary GL resources,
// like shaders and buffers
func NewGLContext() (*GLContext, error) {
ctx := &GLContext{
ptsBuf: make([]float32, 0, 4096),
}
err := gl.Init()
if err != nil {
return nil, err
}
gl.GetError() // clear error state
err = loadShader(unifiedVS, unifiedFS, &ctx.shd.shaderProgram)
if err != nil {
return nil, err
}
ctx.shd.shaderProgram.mustLoadLocations(&ctx.shd)
if err = glError(); err != nil {
return nil, err
}
gl.GenBuffers(1, &ctx.buf)
if err = glError(); err != nil {
return nil, err
}
gl.GenBuffers(1, &ctx.shadowBuf)
if err = glError(); err != nil {
return nil, err
}
gl.ActiveTexture(gl.TEXTURE0)
gl.GenTextures(1, &ctx.alphaTex)
gl.BindTexture(gl.TEXTURE_2D, ctx.alphaTex)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.ALPHA, alphaTexSize, alphaTexSize, 0, gl.ALPHA, gl.UNSIGNED_BYTE, nil)
// todo should use gl.RED on OpenGL, gl.ALPHA on OpenGL ES
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.Enable(gl.STENCIL_TEST)
gl.StencilMask(0xFF)
gl.Clear(gl.STENCIL_BUFFER_BIT)
gl.StencilOp(gl.KEEP, gl.KEEP, gl.KEEP)
gl.StencilFunc(gl.EQUAL, 0, 0xFF)
gl.Disable(gl.SCISSOR_TEST)
return ctx, nil
}
// GoGLBackend is a canvas backend using Go-GL
type GoGLBackend struct {
x, y, w, h int
fx, fy, fw, fh float64
*GLContext
activateFn func()
disableTextureRenderTarget func()
}
type offscreenBuffer struct {
tex uint32
w int
h int
renderStencilBuf uint32
frameBuf uint32
alpha bool
}
// New returns a new canvas backend. x, y, w, h define the target
// rectangle in the window. ctx is a GLContext created with
// NewGLContext, but can be nil for a default one. It makes sense
// to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func New(x, y, w, h int, ctx *GLContext) (*GoGLBackend, error) {
if ctx == nil {
var err error
ctx, err = NewGLContext()
if err != nil {
return nil, err
}
}
b := &GoGLBackend{
w: w,
h: h,
fw: float64(w),
fh: float64(h),
GLContext: ctx,
}
b.activateFn = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
// todo reapply clipping since another application may have used the stencil buffer
}
b.disableTextureRenderTarget = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
}
return b, nil
}
// GoGLBackendOffscreen is a canvas backend using an offscreen
// texture
type GoGLBackendOffscreen struct {
GoGLBackend
TextureID uint32
offscrBuf offscreenBuffer
offscrImg Image
}
// NewOffscreen returns a new offscreen canvas backend. w, h define
// the size of the offscreen texture. ctx is a GLContext created
// with NewGLContext, but can be nil for a default one. It makes
// sense to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func NewOffscreen(w, h int, alpha bool, ctx *GLContext) (*GoGLBackendOffscreen, error) {
b, err := New(0, 0, w, h, ctx)
if err != nil {
return nil, err
}
bo := &GoGLBackendOffscreen{GoGLBackend: *b}
bo.offscrBuf.alpha = alpha
bo.offscrImg.flip = true
bo.activateFn = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
gl.Viewport(0, 0, int32(bo.w), int32(bo.h))
bo.offscrImg.w = bo.offscrBuf.w
bo.offscrImg.h = bo.offscrBuf.h
bo.offscrImg.tex = bo.offscrBuf.tex
bo.TextureID = bo.offscrBuf.tex
}
bo.disableTextureRenderTarget = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
}
return bo, nil
}
// SetBounds updates the bounds of the canvas. This would
// usually be called for example when the window is resized
func (b *GoGLBackend) SetBounds(x, y, w, h int) {
b.x, b.y = x, y
b.fx, b.fy = float64(x), float64(y)
b.w, b.h = w, h
b.fw, b.fh = float64(w), float64(h)
if b == activeContext |
}
// SetSize updates the size of the offscreen texture
func (b *GoGLBackendOffscreen) SetSize(w, h int) {
b.GoGLBackend.SetBounds(0, 0, w, h)
b.offscrImg.w = b.offscrBuf.w
b.offscrImg.h = b.offscrBuf.h
}
// Size returns the size of the window or offscreen
// texture
func (b *GoGLBackend) Size() (int, int) {
return b.w, b.h
}
func glError() error {
glErr := gl.GetError()
if glErr != gl.NO_ERROR {
return fmt.Errorf("GL Error: %x", glErr)
}
return nil
}
// Activate only needs to be called if there is other
// code also using the GL state
func (b *GoGLBackend) Activate() {
b.activate()
}
var activeContext *GoGLBackend
func (b *GoGLBackend) activate() {
if activeContext != b {
activeContext = b
b.activateFn()
}
}
// Delete deletes the offscreen texture. After calling this
// the backend can no longer be used
func (b *GoGLBackendOffscreen) Delete() {
gl.DeleteTextures(1, &b.offscrBuf.tex)
gl.DeleteFramebuffers(1, &b.offscrBuf.frameBuf)
gl.DeleteRenderbuffers(1, &b.offscrBuf.renderStencilBuf)
}
// CanUseAsImage returns true if the given backend can be
// directly used by this backend to avoid a conversion.
// Used internally
func (b *GoGLBackend) CanUseAsImage(b2 backendbase.Backend) bool {
_, ok := b2.(*GoGLBackendOffscreen)
return ok
}
// AsImage returns nil, since this backend cannot be directly
// used as an image. Used internally
func (b *GoGLBackend) AsImage() backendbase.Image {
return nil
}
// AsImage returns an implementation of the Image interface
// that can be used to render this offscreen texture
// directly. Used internally
func (b *GoGLBackendOffscreen) AsImage() backendbase.Image {
return &b.offscrImg
}
func (b *GoGLBackend) useShader(style *backendbase.FillStyle, tf [9]float32, useAlpha bool, alphaTexSlot int32) (vertexLoc, alphaTexCoordLoc uint32) {
gl.UseProgram(b.shd.ID)
gl.Uniform2f(b.shd.CanvasSize, float32(b.fw), float32(b.fh))
gl.UniformMatrix3fv(b.shd.Matrix, 1, false, &tf[0])
if useAlpha {
gl.Uniform1i(b.shd.UseAlphaTex, 1)
gl.Uniform1i(b.shd.AlphaTex, alphaTexSlot)
} else {
gl.Uniform1i(b.shd.UseAlphaTex, 0)
}
gl.Uniform1f(b.shd.GlobalAlpha, float32(style.Color.A)/255)
if lg := style.LinearGradient; lg != nil {
lg := lg.(*LinearGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, lg.tex)
from := backendbase.Vec{style.Gradient.X0, style.Gradient.Y0}
to := backendbase.Vec{style.Gradient.X1, style.Gradient.Y1}
dir := to.Sub(from)
length := dir.Len()
dir = dir.Mulf(1 / length)
gl.Uniform2f(b.shd.From, float32(from[0]), float32(from[1]))
gl.Uniform2f(b.shd.Dir, float32(dir[0]), float32(dir[1]))
gl.Uniform1f(b.shd.Len, float32(length))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncLinearGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if rg := style.RadialGradient; rg != nil {
rg := rg.(*RadialGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, rg.tex)
gl.Uniform2f(b.shd.From, float32(style.Gradient.X0), float32(style.Gradient.Y0))
gl.Uniform2f(b.shd.To, float32(style.Gradient.X1), float32(style.Gradient.Y1))
gl.Uniform1f(b.shd.RadFrom, float32(style.Gradient.RadFrom))
gl.Uniform1f(b.shd.RadTo, float32(style.Gradient.RadTo))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncRadialGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if ip := style.ImagePattern; ip != nil {
ipd := ip.(*ImagePattern).data
img := ipd.Image.(*Image)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, img.tex)
gl.Uniform2f(b.shd.ImageSize, float32(img.w), float32(img.h))
gl.Uniform1i(b.shd.Image, 0)
var f32mat [9]float32
for i, v := range ipd.Transform {
f32mat[i] = float32(v)
}
gl.UniformMatrix3fv(b.shd.ImageTransform, 1, false, &f32mat[0])
switch ipd.Repeat {
case backendbase.Repeat:
gl.Uniform2f(b.shd.Repeat, 1, 1)
case backendbase.RepeatX:
gl.Uniform2f(b.shd.Repeat, 1, 0)
case backendbase.RepeatY:
gl.Uniform2f(b.shd.Repeat, 0, 1)
case backendbase.NoRepeat:
gl.Uniform2f(b.shd.Repeat, 0, 0)
}
gl.Uniform1i(b.shd.Func, shdFuncImagePattern)
return b.shd.Vertex, b.shd.TexCoord
}
cr := float32(style.Color.R) / 255
cg := float32(style.Color.G) / 255
cb := float32(style.Color.B) / 255
ca := float32(style.Color.A) / 255
gl.Uniform4f(b.shd.Color, cr, cg, cb, ca)
gl.Uniform1f(b.shd.GlobalAlpha, 1)
gl.Uniform1i(b.shd.Func, shdFuncSolid)
return b.shd.Vertex, b.shd.TexCoord
}
func (b *GoGLBackend) enableTextureRenderTarget(offscr *offscreenBuffer) {
if offscr.w == b.w && offscr.h == b.h {
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
return
}
if b.w == 0 || b.h == 0 {
return
}
if offscr.w != 0 && offscr.h != 0 {
gl.DeleteTextures(1, &offscr.tex)
gl.DeleteFramebuffers(1, &offscr.frameBuf)
gl.DeleteRenderbuffers(1, &offscr.renderStencilBuf)
}
offscr.w = b.w
offscr.h = b.h
gl.ActiveTexture(gl.TEXTURE0)
gl.GenTextures(1, &offscr.tex)
gl.BindTexture(gl.TEXTURE_2D, offscr.tex)
// todo do non-power-of-two textures work everywhere?
if offscr.alpha {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, int32(b.w), int32(b.h), 0, gl.RGBA, gl.UNSIGNED_BYTE, nil)
} else {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, int32(b.w), int32(b.h), 0, gl.RGB, gl.UNSIGNED_BYTE, nil)
}
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.GenFramebuffers(1, &offscr.frameBuf)
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
gl.GenRenderbuffers(1, &offscr.renderStencilBuf)
gl.BindRenderbuffer(gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.RenderbufferStorage(gl.RENDERBUFFER, gl.STENCIL_INDEX8, int32(b.w), int32(b.h))
gl.FramebufferRenderbuffer(gl.FRAMEBUFFER, gl.STENCIL_ATTACHMENT, gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, offscr.tex, 0)
if err := gl.CheckFramebufferStatus(gl.FRAMEBUFFER); err != gl.FRAMEBUFFER_COMPLETE {
// todo this should maybe not panic
panic(fmt.Sprintf("Failed to set up framebuffer for offscreen texture: %x", err))
}
gl.Clear(gl.COLOR_BUFFER_BIT | gl.STENCIL_BUFFER_BIT)
}
func mat3(m backendbase.Mat) (m3 [9]float32) {
m3[0] = float32(m[0])
m3[1] = float32(m[1])
m3[2] = 0
m3[3] = float32(m[2])
m3[4] = float32(m[3])
m3[5] = 0
m3[6] = float32(m[4])
m3[7] = float32(m[5])
m3[8] = 1
return
}
var mat3identity = [9]float32{1, 0, 0, 0, 1, 0, 0, 0, 1}
| {
gl.Viewport(0, 0, int32(b.w), int32(b.h))
gl.Clear(gl.STENCIL_BUFFER_BIT)
} | conditional_block |
gogl.go | package goglbackend
import (
"fmt"
"github.com/tfriedel6/canvas/backend/backendbase"
"github.com/tfriedel6/canvas/backend/goglbackend/gl"
)
const alphaTexSize = 2048
var zeroes [alphaTexSize]byte
// GLContext is a context that contains all the
// shaders and buffers necessary for rendering
type GLContext struct {
buf uint32
shadowBuf uint32
alphaTex uint32
shd unifiedShader
offscr1 offscreenBuffer
offscr2 offscreenBuffer
imageBufTex uint32
imageBuf []byte
ptsBuf []float32
}
// NewGLContext creates all the necessary GL resources,
// like shaders and buffers
func NewGLContext() (*GLContext, error) {
ctx := &GLContext{
ptsBuf: make([]float32, 0, 4096),
}
err := gl.Init()
if err != nil {
return nil, err
}
gl.GetError() // clear error state
err = loadShader(unifiedVS, unifiedFS, &ctx.shd.shaderProgram)
if err != nil {
return nil, err
}
ctx.shd.shaderProgram.mustLoadLocations(&ctx.shd)
if err = glError(); err != nil {
return nil, err
}
gl.GenBuffers(1, &ctx.buf)
if err = glError(); err != nil {
return nil, err
}
gl.GenBuffers(1, &ctx.shadowBuf)
if err = glError(); err != nil {
return nil, err
}
gl.ActiveTexture(gl.TEXTURE0)
gl.GenTextures(1, &ctx.alphaTex)
gl.BindTexture(gl.TEXTURE_2D, ctx.alphaTex)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.ALPHA, alphaTexSize, alphaTexSize, 0, gl.ALPHA, gl.UNSIGNED_BYTE, nil)
// todo should use gl.RED on OpenGL, gl.ALPHA on OpenGL ES
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.Enable(gl.STENCIL_TEST)
gl.StencilMask(0xFF)
gl.Clear(gl.STENCIL_BUFFER_BIT)
gl.StencilOp(gl.KEEP, gl.KEEP, gl.KEEP)
gl.StencilFunc(gl.EQUAL, 0, 0xFF)
gl.Disable(gl.SCISSOR_TEST)
return ctx, nil
}
// GoGLBackend is a canvas backend using Go-GL
type GoGLBackend struct {
x, y, w, h int
fx, fy, fw, fh float64
*GLContext
activateFn func()
disableTextureRenderTarget func()
}
type offscreenBuffer struct {
tex uint32
w int
h int
renderStencilBuf uint32
frameBuf uint32
alpha bool
}
// New returns a new canvas backend. x, y, w, h define the target
// rectangle in the window. ctx is a GLContext created with
// NewGLContext, but can be nil for a default one. It makes sense
// to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func | (x, y, w, h int, ctx *GLContext) (*GoGLBackend, error) {
if ctx == nil {
var err error
ctx, err = NewGLContext()
if err != nil {
return nil, err
}
}
b := &GoGLBackend{
w: w,
h: h,
fw: float64(w),
fh: float64(h),
GLContext: ctx,
}
b.activateFn = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
// todo reapply clipping since another application may have used the stencil buffer
}
b.disableTextureRenderTarget = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
}
return b, nil
}
// GoGLBackendOffscreen is a canvas backend using an offscreen
// texture
type GoGLBackendOffscreen struct {
GoGLBackend
TextureID uint32
offscrBuf offscreenBuffer
offscrImg Image
}
// NewOffscreen returns a new offscreen canvas backend. w, h define
// the size of the offscreen texture. ctx is a GLContext created
// with NewGLContext, but can be nil for a default one. It makes
// sense to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func NewOffscreen(w, h int, alpha bool, ctx *GLContext) (*GoGLBackendOffscreen, error) {
b, err := New(0, 0, w, h, ctx)
if err != nil {
return nil, err
}
bo := &GoGLBackendOffscreen{GoGLBackend: *b}
bo.offscrBuf.alpha = alpha
bo.offscrImg.flip = true
bo.activateFn = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
gl.Viewport(0, 0, int32(bo.w), int32(bo.h))
bo.offscrImg.w = bo.offscrBuf.w
bo.offscrImg.h = bo.offscrBuf.h
bo.offscrImg.tex = bo.offscrBuf.tex
bo.TextureID = bo.offscrBuf.tex
}
bo.disableTextureRenderTarget = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
}
return bo, nil
}
// SetBounds updates the bounds of the canvas. This would
// usually be called for example when the window is resized
func (b *GoGLBackend) SetBounds(x, y, w, h int) {
b.x, b.y = x, y
b.fx, b.fy = float64(x), float64(y)
b.w, b.h = w, h
b.fw, b.fh = float64(w), float64(h)
if b == activeContext {
gl.Viewport(0, 0, int32(b.w), int32(b.h))
gl.Clear(gl.STENCIL_BUFFER_BIT)
}
}
// SetSize updates the size of the offscreen texture
func (b *GoGLBackendOffscreen) SetSize(w, h int) {
b.GoGLBackend.SetBounds(0, 0, w, h)
b.offscrImg.w = b.offscrBuf.w
b.offscrImg.h = b.offscrBuf.h
}
// Size returns the size of the window or offscreen
// texture
func (b *GoGLBackend) Size() (int, int) {
return b.w, b.h
}
func glError() error {
glErr := gl.GetError()
if glErr != gl.NO_ERROR {
return fmt.Errorf("GL Error: %x", glErr)
}
return nil
}
// Activate only needs to be called if there is other
// code also using the GL state
func (b *GoGLBackend) Activate() {
b.activate()
}
var activeContext *GoGLBackend
func (b *GoGLBackend) activate() {
if activeContext != b {
activeContext = b
b.activateFn()
}
}
// Delete deletes the offscreen texture. After calling this
// the backend can no longer be used
func (b *GoGLBackendOffscreen) Delete() {
gl.DeleteTextures(1, &b.offscrBuf.tex)
gl.DeleteFramebuffers(1, &b.offscrBuf.frameBuf)
gl.DeleteRenderbuffers(1, &b.offscrBuf.renderStencilBuf)
}
// CanUseAsImage returns true if the given backend can be
// directly used by this backend to avoid a conversion.
// Used internally
func (b *GoGLBackend) CanUseAsImage(b2 backendbase.Backend) bool {
_, ok := b2.(*GoGLBackendOffscreen)
return ok
}
// AsImage returns nil, since this backend cannot be directly
// used as an image. Used internally
func (b *GoGLBackend) AsImage() backendbase.Image {
return nil
}
// AsImage returns an implementation of the Image interface
// that can be used to render this offscreen texture
// directly. Used internally
func (b *GoGLBackendOffscreen) AsImage() backendbase.Image {
return &b.offscrImg
}
func (b *GoGLBackend) useShader(style *backendbase.FillStyle, tf [9]float32, useAlpha bool, alphaTexSlot int32) (vertexLoc, alphaTexCoordLoc uint32) {
gl.UseProgram(b.shd.ID)
gl.Uniform2f(b.shd.CanvasSize, float32(b.fw), float32(b.fh))
gl.UniformMatrix3fv(b.shd.Matrix, 1, false, &tf[0])
if useAlpha {
gl.Uniform1i(b.shd.UseAlphaTex, 1)
gl.Uniform1i(b.shd.AlphaTex, alphaTexSlot)
} else {
gl.Uniform1i(b.shd.UseAlphaTex, 0)
}
gl.Uniform1f(b.shd.GlobalAlpha, float32(style.Color.A)/255)
if lg := style.LinearGradient; lg != nil {
lg := lg.(*LinearGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, lg.tex)
from := backendbase.Vec{style.Gradient.X0, style.Gradient.Y0}
to := backendbase.Vec{style.Gradient.X1, style.Gradient.Y1}
dir := to.Sub(from)
length := dir.Len()
dir = dir.Mulf(1 / length)
gl.Uniform2f(b.shd.From, float32(from[0]), float32(from[1]))
gl.Uniform2f(b.shd.Dir, float32(dir[0]), float32(dir[1]))
gl.Uniform1f(b.shd.Len, float32(length))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncLinearGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if rg := style.RadialGradient; rg != nil {
rg := rg.(*RadialGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, rg.tex)
gl.Uniform2f(b.shd.From, float32(style.Gradient.X0), float32(style.Gradient.Y0))
gl.Uniform2f(b.shd.To, float32(style.Gradient.X1), float32(style.Gradient.Y1))
gl.Uniform1f(b.shd.RadFrom, float32(style.Gradient.RadFrom))
gl.Uniform1f(b.shd.RadTo, float32(style.Gradient.RadTo))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncRadialGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if ip := style.ImagePattern; ip != nil {
ipd := ip.(*ImagePattern).data
img := ipd.Image.(*Image)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, img.tex)
gl.Uniform2f(b.shd.ImageSize, float32(img.w), float32(img.h))
gl.Uniform1i(b.shd.Image, 0)
var f32mat [9]float32
for i, v := range ipd.Transform {
f32mat[i] = float32(v)
}
gl.UniformMatrix3fv(b.shd.ImageTransform, 1, false, &f32mat[0])
switch ipd.Repeat {
case backendbase.Repeat:
gl.Uniform2f(b.shd.Repeat, 1, 1)
case backendbase.RepeatX:
gl.Uniform2f(b.shd.Repeat, 1, 0)
case backendbase.RepeatY:
gl.Uniform2f(b.shd.Repeat, 0, 1)
case backendbase.NoRepeat:
gl.Uniform2f(b.shd.Repeat, 0, 0)
}
gl.Uniform1i(b.shd.Func, shdFuncImagePattern)
return b.shd.Vertex, b.shd.TexCoord
}
cr := float32(style.Color.R) / 255
cg := float32(style.Color.G) / 255
cb := float32(style.Color.B) / 255
ca := float32(style.Color.A) / 255
gl.Uniform4f(b.shd.Color, cr, cg, cb, ca)
gl.Uniform1f(b.shd.GlobalAlpha, 1)
gl.Uniform1i(b.shd.Func, shdFuncSolid)
return b.shd.Vertex, b.shd.TexCoord
}
func (b *GoGLBackend) enableTextureRenderTarget(offscr *offscreenBuffer) {
if offscr.w == b.w && offscr.h == b.h {
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
return
}
if b.w == 0 || b.h == 0 {
return
}
if offscr.w != 0 && offscr.h != 0 {
gl.DeleteTextures(1, &offscr.tex)
gl.DeleteFramebuffers(1, &offscr.frameBuf)
gl.DeleteRenderbuffers(1, &offscr.renderStencilBuf)
}
offscr.w = b.w
offscr.h = b.h
gl.ActiveTexture(gl.TEXTURE0)
gl.GenTextures(1, &offscr.tex)
gl.BindTexture(gl.TEXTURE_2D, offscr.tex)
// todo do non-power-of-two textures work everywhere?
if offscr.alpha {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, int32(b.w), int32(b.h), 0, gl.RGBA, gl.UNSIGNED_BYTE, nil)
} else {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, int32(b.w), int32(b.h), 0, gl.RGB, gl.UNSIGNED_BYTE, nil)
}
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.GenFramebuffers(1, &offscr.frameBuf)
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
gl.GenRenderbuffers(1, &offscr.renderStencilBuf)
gl.BindRenderbuffer(gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.RenderbufferStorage(gl.RENDERBUFFER, gl.STENCIL_INDEX8, int32(b.w), int32(b.h))
gl.FramebufferRenderbuffer(gl.FRAMEBUFFER, gl.STENCIL_ATTACHMENT, gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, offscr.tex, 0)
if err := gl.CheckFramebufferStatus(gl.FRAMEBUFFER); err != gl.FRAMEBUFFER_COMPLETE {
// todo this should maybe not panic
panic(fmt.Sprintf("Failed to set up framebuffer for offscreen texture: %x", err))
}
gl.Clear(gl.COLOR_BUFFER_BIT | gl.STENCIL_BUFFER_BIT)
}
func mat3(m backendbase.Mat) (m3 [9]float32) {
m3[0] = float32(m[0])
m3[1] = float32(m[1])
m3[2] = 0
m3[3] = float32(m[2])
m3[4] = float32(m[3])
m3[5] = 0
m3[6] = float32(m[4])
m3[7] = float32(m[5])
m3[8] = 1
return
}
var mat3identity = [9]float32{1, 0, 0, 0, 1, 0, 0, 0, 1}
| New | identifier_name |
gogl.go | package goglbackend
import (
"fmt"
"github.com/tfriedel6/canvas/backend/backendbase"
"github.com/tfriedel6/canvas/backend/goglbackend/gl"
)
const alphaTexSize = 2048
var zeroes [alphaTexSize]byte
// GLContext is a context that contains all the
// shaders and buffers necessary for rendering
type GLContext struct {
buf uint32
shadowBuf uint32
alphaTex uint32
shd unifiedShader
offscr1 offscreenBuffer
offscr2 offscreenBuffer
imageBufTex uint32
imageBuf []byte
ptsBuf []float32
}
// NewGLContext creates all the necessary GL resources,
// like shaders and buffers
func NewGLContext() (*GLContext, error) {
ctx := &GLContext{
ptsBuf: make([]float32, 0, 4096),
}
err := gl.Init()
if err != nil {
return nil, err
}
gl.GetError() // clear error state
err = loadShader(unifiedVS, unifiedFS, &ctx.shd.shaderProgram)
if err != nil {
return nil, err
}
ctx.shd.shaderProgram.mustLoadLocations(&ctx.shd)
if err = glError(); err != nil {
return nil, err
}
gl.GenBuffers(1, &ctx.buf)
if err = glError(); err != nil {
return nil, err
}
gl.GenBuffers(1, &ctx.shadowBuf)
if err = glError(); err != nil {
return nil, err
}
gl.ActiveTexture(gl.TEXTURE0)
gl.GenTextures(1, &ctx.alphaTex)
gl.BindTexture(gl.TEXTURE_2D, ctx.alphaTex)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.ALPHA, alphaTexSize, alphaTexSize, 0, gl.ALPHA, gl.UNSIGNED_BYTE, nil)
// todo should use gl.RED on OpenGL, gl.ALPHA on OpenGL ES
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.Enable(gl.STENCIL_TEST)
gl.StencilMask(0xFF)
gl.Clear(gl.STENCIL_BUFFER_BIT)
gl.StencilOp(gl.KEEP, gl.KEEP, gl.KEEP)
gl.StencilFunc(gl.EQUAL, 0, 0xFF)
gl.Disable(gl.SCISSOR_TEST)
return ctx, nil
}
// GoGLBackend is a canvas backend using Go-GL
type GoGLBackend struct {
x, y, w, h int
fx, fy, fw, fh float64
*GLContext
activateFn func()
disableTextureRenderTarget func()
}
type offscreenBuffer struct {
tex uint32
w int
h int
renderStencilBuf uint32
frameBuf uint32
alpha bool
}
// New returns a new canvas backend. x, y, w, h define the target
// rectangle in the window. ctx is a GLContext created with
// NewGLContext, but can be nil for a default one. It makes sense
// to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func New(x, y, w, h int, ctx *GLContext) (*GoGLBackend, error) {
if ctx == nil {
var err error
ctx, err = NewGLContext()
if err != nil {
return nil, err
}
}
b := &GoGLBackend{
w: w,
h: h,
fw: float64(w),
fh: float64(h),
GLContext: ctx,
}
b.activateFn = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
// todo reapply clipping since another application may have used the stencil buffer
}
b.disableTextureRenderTarget = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
}
return b, nil
}
// GoGLBackendOffscreen is a canvas backend using an offscreen
// texture
type GoGLBackendOffscreen struct {
GoGLBackend
TextureID uint32
offscrBuf offscreenBuffer
offscrImg Image
}
// NewOffscreen returns a new offscreen canvas backend. w, h define
// the size of the offscreen texture. ctx is a GLContext created
// with NewGLContext, but can be nil for a default one. It makes
// sense to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func NewOffscreen(w, h int, alpha bool, ctx *GLContext) (*GoGLBackendOffscreen, error) {
b, err := New(0, 0, w, h, ctx)
if err != nil {
return nil, err
}
bo := &GoGLBackendOffscreen{GoGLBackend: *b}
bo.offscrBuf.alpha = alpha
bo.offscrImg.flip = true
bo.activateFn = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
gl.Viewport(0, 0, int32(bo.w), int32(bo.h))
bo.offscrImg.w = bo.offscrBuf.w
bo.offscrImg.h = bo.offscrBuf.h
bo.offscrImg.tex = bo.offscrBuf.tex
bo.TextureID = bo.offscrBuf.tex
}
bo.disableTextureRenderTarget = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
}
return bo, nil
}
// SetBounds updates the bounds of the canvas. This would
// usually be called for example when the window is resized
func (b *GoGLBackend) SetBounds(x, y, w, h int) {
b.x, b.y = x, y
b.fx, b.fy = float64(x), float64(y)
b.w, b.h = w, h
b.fw, b.fh = float64(w), float64(h)
if b == activeContext {
gl.Viewport(0, 0, int32(b.w), int32(b.h))
gl.Clear(gl.STENCIL_BUFFER_BIT)
}
}
// SetSize updates the size of the offscreen texture
func (b *GoGLBackendOffscreen) SetSize(w, h int) {
b.GoGLBackend.SetBounds(0, 0, w, h)
b.offscrImg.w = b.offscrBuf.w
b.offscrImg.h = b.offscrBuf.h
}
// Size returns the size of the window or offscreen
// texture
func (b *GoGLBackend) Size() (int, int) {
return b.w, b.h
}
func glError() error {
glErr := gl.GetError()
if glErr != gl.NO_ERROR {
return fmt.Errorf("GL Error: %x", glErr)
}
return nil
}
// Activate only needs to be called if there is other
// code also using the GL state
func (b *GoGLBackend) Activate() {
b.activate()
}
var activeContext *GoGLBackend
func (b *GoGLBackend) activate() {
if activeContext != b {
activeContext = b
b.activateFn()
}
}
// Delete deletes the offscreen texture. After calling this
// the backend can no longer be used
func (b *GoGLBackendOffscreen) Delete() {
gl.DeleteTextures(1, &b.offscrBuf.tex)
gl.DeleteFramebuffers(1, &b.offscrBuf.frameBuf)
gl.DeleteRenderbuffers(1, &b.offscrBuf.renderStencilBuf)
}
// CanUseAsImage returns true if the given backend can be
// directly used by this backend to avoid a conversion.
// Used internally
func (b *GoGLBackend) CanUseAsImage(b2 backendbase.Backend) bool {
_, ok := b2.(*GoGLBackendOffscreen)
return ok
}
// AsImage returns nil, since this backend cannot be directly
// used as an image. Used internally
func (b *GoGLBackend) AsImage() backendbase.Image {
return nil
}
// AsImage returns an implementation of the Image interface
// that can be used to render this offscreen texture
// directly. Used internally
func (b *GoGLBackendOffscreen) AsImage() backendbase.Image {
return &b.offscrImg
}
func (b *GoGLBackend) useShader(style *backendbase.FillStyle, tf [9]float32, useAlpha bool, alphaTexSlot int32) (vertexLoc, alphaTexCoordLoc uint32) {
gl.UseProgram(b.shd.ID)
gl.Uniform2f(b.shd.CanvasSize, float32(b.fw), float32(b.fh))
gl.UniformMatrix3fv(b.shd.Matrix, 1, false, &tf[0])
if useAlpha {
gl.Uniform1i(b.shd.UseAlphaTex, 1)
gl.Uniform1i(b.shd.AlphaTex, alphaTexSlot)
} else {
gl.Uniform1i(b.shd.UseAlphaTex, 0)
}
gl.Uniform1f(b.shd.GlobalAlpha, float32(style.Color.A)/255)
if lg := style.LinearGradient; lg != nil {
lg := lg.(*LinearGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, lg.tex)
from := backendbase.Vec{style.Gradient.X0, style.Gradient.Y0}
to := backendbase.Vec{style.Gradient.X1, style.Gradient.Y1}
dir := to.Sub(from)
length := dir.Len()
dir = dir.Mulf(1 / length)
gl.Uniform2f(b.shd.From, float32(from[0]), float32(from[1]))
gl.Uniform2f(b.shd.Dir, float32(dir[0]), float32(dir[1]))
gl.Uniform1f(b.shd.Len, float32(length))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncLinearGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if rg := style.RadialGradient; rg != nil {
rg := rg.(*RadialGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, rg.tex)
gl.Uniform2f(b.shd.From, float32(style.Gradient.X0), float32(style.Gradient.Y0))
gl.Uniform2f(b.shd.To, float32(style.Gradient.X1), float32(style.Gradient.Y1))
gl.Uniform1f(b.shd.RadFrom, float32(style.Gradient.RadFrom))
gl.Uniform1f(b.shd.RadTo, float32(style.Gradient.RadTo))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncRadialGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if ip := style.ImagePattern; ip != nil {
ipd := ip.(*ImagePattern).data
img := ipd.Image.(*Image)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, img.tex)
gl.Uniform2f(b.shd.ImageSize, float32(img.w), float32(img.h))
gl.Uniform1i(b.shd.Image, 0)
var f32mat [9]float32
for i, v := range ipd.Transform {
f32mat[i] = float32(v)
}
gl.UniformMatrix3fv(b.shd.ImageTransform, 1, false, &f32mat[0])
switch ipd.Repeat {
case backendbase.Repeat:
gl.Uniform2f(b.shd.Repeat, 1, 1)
case backendbase.RepeatX:
gl.Uniform2f(b.shd.Repeat, 1, 0)
case backendbase.RepeatY:
gl.Uniform2f(b.shd.Repeat, 0, 1)
case backendbase.NoRepeat:
gl.Uniform2f(b.shd.Repeat, 0, 0)
}
gl.Uniform1i(b.shd.Func, shdFuncImagePattern)
return b.shd.Vertex, b.shd.TexCoord
}
cr := float32(style.Color.R) / 255
cg := float32(style.Color.G) / 255
cb := float32(style.Color.B) / 255
ca := float32(style.Color.A) / 255
gl.Uniform4f(b.shd.Color, cr, cg, cb, ca)
gl.Uniform1f(b.shd.GlobalAlpha, 1)
gl.Uniform1i(b.shd.Func, shdFuncSolid)
return b.shd.Vertex, b.shd.TexCoord
}
func (b *GoGLBackend) enableTextureRenderTarget(offscr *offscreenBuffer) {
if offscr.w == b.w && offscr.h == b.h {
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
return
}
if b.w == 0 || b.h == 0 {
return
}
if offscr.w != 0 && offscr.h != 0 {
gl.DeleteTextures(1, &offscr.tex)
gl.DeleteFramebuffers(1, &offscr.frameBuf)
gl.DeleteRenderbuffers(1, &offscr.renderStencilBuf)
}
offscr.w = b.w
offscr.h = b.h
gl.ActiveTexture(gl.TEXTURE0)
gl.GenTextures(1, &offscr.tex)
gl.BindTexture(gl.TEXTURE_2D, offscr.tex)
// todo do non-power-of-two textures work everywhere?
if offscr.alpha {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, int32(b.w), int32(b.h), 0, gl.RGBA, gl.UNSIGNED_BYTE, nil)
} else {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, int32(b.w), int32(b.h), 0, gl.RGB, gl.UNSIGNED_BYTE, nil)
}
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.GenFramebuffers(1, &offscr.frameBuf)
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
gl.GenRenderbuffers(1, &offscr.renderStencilBuf)
gl.BindRenderbuffer(gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.RenderbufferStorage(gl.RENDERBUFFER, gl.STENCIL_INDEX8, int32(b.w), int32(b.h))
gl.FramebufferRenderbuffer(gl.FRAMEBUFFER, gl.STENCIL_ATTACHMENT, gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, offscr.tex, 0) |
if err := gl.CheckFramebufferStatus(gl.FRAMEBUFFER); err != gl.FRAMEBUFFER_COMPLETE {
// todo this should maybe not panic
panic(fmt.Sprintf("Failed to set up framebuffer for offscreen texture: %x", err))
}
gl.Clear(gl.COLOR_BUFFER_BIT | gl.STENCIL_BUFFER_BIT)
}
func mat3(m backendbase.Mat) (m3 [9]float32) {
m3[0] = float32(m[0])
m3[1] = float32(m[1])
m3[2] = 0
m3[3] = float32(m[2])
m3[4] = float32(m[3])
m3[5] = 0
m3[6] = float32(m[4])
m3[7] = float32(m[5])
m3[8] = 1
return
}
var mat3identity = [9]float32{1, 0, 0, 0, 1, 0, 0, 0, 1} | random_line_split | |
main-ipc.ts | import { BrowserWindow } from 'electron';
const dialog = require('electron').dialog;
const shell = require('electron').shell;
import * as path from 'path';
const fs = require('fs');
const trash = require('trash');
const exec = require('child_process').exec;
import { GLOBALS } from './main-globals';
import { ImageElement, FinalObject, InputSources } from '../interfaces/final-object.interface';
import { SettingsObject } from '../interfaces/settings-object.interface';
import { createDotPlsFile, writeVhaFileToDisk } from './main-support';
import { replaceThumbnailWithNewImage } from './main-extract';
import { closeWatcher, startWatcher, extractAnyMissingThumbs, removeThumbnailsNotInHub } from './main-extract-async';
/**
* Set up the listeners
* @param ipc
* @param win
* @param pathToAppData
* @param systemMessages
*/
export function setUpIpcMessages(ipc, win, pathToAppData, systemMessages) {
/**
* Un-Maximize the window
*/
ipc.on('un-maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().unmaximize();
}
});
/**
* Minimize the window
*/
ipc.on('minimize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().minimize();
}
});
/**
* Open the explorer to the relevant file
*/
ipc.on('open-in-explorer', (event, fullPath: string) => {
shell.showItemInFolder(fullPath);
});
/**
* Open a URL in system's default browser
*/
ipc.on('please-open-url', (event, urlToOpen: string): void => {
shell.openExternal(urlToOpen, { activate: true });
});
/**
* Maximize the window
*/
ipc.on('maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().maximize();
}
});
/**
* Open a particular video file clicked inside Angular
*/
ipc.on('open-media-file', (event, fullFilePath) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
shell.openItem(path.normalize(fullFilePath)); // normalize because on windows, the path sometimes is mixing `\` and `/`
// shell.openPath(path.normalize(fullFilePath)); // Electron 9
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Open a particular video file clicked inside Angular at particular timestamp
*/
ipc.on('open-media-file-at-timestamp', (event, executablePath, fullFilePath: string, args: string) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
const cmdline: string = `"${path.normalize(executablePath)}" "${path.normalize(fullFilePath)}" ${args}`;
console.log(cmdline);
exec(cmdline);
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Handle dragging a file out of VHA into a video editor (e.g. Vegas or Premiere)
*/
ipc.on('drag-video-out-of-electron', (event, filePath): void => {
console.log(filePath);
event.sender.startDrag({
file: filePath,
icon: './src/assets/logo.png'
});
});
/**
* Select default video player
*/
ipc.on('select-default-video-player', (event) => {
console.log('asking for default video player');
dialog.showOpenDialog(win, {
title: systemMessages.selectDefaultPlayer, // TODO: check if errors out now that this is in `main-ipc.ts`
filters: [
{
name: 'Executable', // TODO: i18n fixme
extensions: ['exe', 'app']
}, {
name: 'All files', // TODO: i18n fixme
extensions: ['*']
}
],
properties: ['openFile']
}).then(result => {
const executablePath: string = result.filePaths[0];
if (executablePath) {
event.sender.send('preferred-video-player-returning', executablePath);
}
}).catch(err => {});
});
/**
* Create and play the playlist
* 1. filter out *FOLDER*
* 2. save .pls file
* 3. ask OS to open the .pls file
*/
ipc.on('please-create-playlist', (event, playlist: ImageElement[], sourceFolderMap: InputSources, execPath: string) => {
const cleanPlaylist: ImageElement[] = playlist.filter((element: ImageElement) => {
return element.cleanName !== '*FOLDER*';
});
const savePath: string = path.join(GLOBALS.settingsPath, 'temp.pls');
if (cleanPlaylist.length) {
createDotPlsFile(savePath, cleanPlaylist, sourceFolderMap, () => {
if (execPath) { // if `preferredVideoPlayer` is sent
const cmdline: string = `"${path.normalize(execPath)}" "${path.normalize(savePath)}"`;
console.log(cmdline);
exec(cmdline);
} else {
shell.openItem(savePath);
// shell.openPath(savePath); // Electron 9
}
});
}
});
/**
* Delete file from computer (send to recycling bin / trash) or dangerously delete (bypass trash)
*/
ipc.on('delete-video-file', (event, basePath: string, item: ImageElement, dangerousDelete: boolean): void => {
const fileToDelete = path.join(basePath, item.partialPath, item.fileName);
if (dangerousDelete) {
fs.unlink(fileToDelete, (err) => {
if (err) {
console.log('ERROR:', fileToDelete + ' was NOT deleted');
} else {
notifyFileDeleted(event, fileToDelete, item);
}
});
} else {
(async () => {
await trash(fileToDelete);
notifyFileDeleted(event, fileToDelete, item);
})();
}
});
/**
* Helper function for `delete-video-file`
* @param event
* @param fileToDelete
* @param item
*/
function notifyFileDeleted(event, fileToDelete, item) |
/**
* Method to replace thumbnail of a particular item
*/
ipc.on('replace-thumbnail', (event, pathToIncomingJpg: string, item: ImageElement) => {
const fileToReplace: string = path.join(
GLOBALS.selectedOutputFolder,
'vha-' + GLOBALS.hubName,
'thumbnails',
item.hash + '.jpg'
);
const height: number = GLOBALS.screenshotSettings.height;
replaceThumbnailWithNewImage(fileToReplace, pathToIncomingJpg, height)
.then(success => {
if (success) {
event.sender.send('thumbnail-replaced');
}
})
.catch((err) => {});
});
/**
* Summon system modal to choose INPUT directory
* where all the videos are located
*/
ipc.on('choose-input', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('input-folder-chosen', inputDirPath);
}
}).catch(err => {});
});
/**
* Summon system modal to choose NEW input directory for a now-disconnected folder
* where all the videos are located
*/
ipc.on('reconnect-this-folder', (event, inputSource: number) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('old-folder-reconnected', inputSource, inputDirPath);
}
}).catch(err => {});
});
/**
* Stop watching a particular folder
*/
ipc.on('stop-watching-folder', (event, watchedFolderIndex: number) => {
console.log('stop watching:', watchedFolderIndex);
closeWatcher(watchedFolderIndex);
});
/**
* Stop watching a particular folder
*/
ipc.on('start-watching-folder', (event, watchedFolderIndex: string, path: string, persistent: boolean) => {
// annoyingly it's not a number : ^^^^^^^^^^^^^^^^^^ -- because object keys are strings :(
console.log('start watching:', watchedFolderIndex, path, persistent);
startWatcher(parseInt(watchedFolderIndex, 10), path, persistent);
});
/**
* extract any missing thumbnails
*/
ipc.on('add-missing-thumbnails', (event, finalArray: ImageElement[], extractClips: boolean) => {
extractAnyMissingThumbs(finalArray);
});
/**
* Remove any thumbnails for files no longer present in the hub
*/
ipc.on('clean-old-thumbnails', (event, finalArray: ImageElement[]) => {
// !!! WARNING
const screenshotOutputFolder: string = path.join(GLOBALS.selectedOutputFolder, 'vha-' + GLOBALS.hubName);
// !! ^^^^^^^^^^^^^^^^^^^^^^ - make sure this points to the folder with screenshots only!
const allHashes: Map<string, 1> = new Map();
finalArray
.filter((element: ImageElement) => { return !element.deleted })
.forEach((element: ImageElement) => {
allHashes.set(element.hash, 1);
});
removeThumbnailsNotInHub(allHashes, screenshotOutputFolder); // WARNING !!! this function will delete stuff
});
/**
* Summon system modal to choose OUTPUT directory
* where the final .vha2 file, vha-folder, and all screenshots will be saved
*/
ipc.on('choose-output', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const outputDirPath: string = result.filePaths[0];
if (outputDirPath) {
event.sender.send('output-folder-chosen', outputDirPath);
}
}).catch(err => {});
});
/**
* Try to rename the particular file
*/
ipc.on('try-to-rename-this-file', (event, sourceFolder: string, relPath: string, file: string, renameTo: string, index: number): void => {
console.log('renaming file:');
const original: string = path.join(sourceFolder, relPath, file);
const newName: string = path.join(sourceFolder, relPath, renameTo);
console.log(original);
console.log(newName);
let success = true;
let errMsg: string;
// check if already exists first
if (fs.existsSync(newName)) {
console.log('some file already EXISTS WITH THAT NAME !!!');
success = false;
errMsg = 'RIGHTCLICK.errorFileNameExists';
} else {
try {
fs.renameSync(original, newName);
} catch (err) {
success = false;
console.log(err);
if (err.code === 'ENOENT') {
// const pathObj = path.parse(err.path);
// console.log(pathObj);
errMsg = 'RIGHTCLICK.errorFileNotFound';
} else {
errMsg = 'RIGHTCLICK.errorSomeError';
}
}
}
event.sender.send('rename-file-response', index, success, renameTo, file, errMsg);
});
/**
* Close the window / quit / exit the app
*/
ipc.on('close-window', (event, settingsToSave: SettingsObject, finalObjectToSave: FinalObject) => {
// convert shortcuts map to object
// someday when node stops giving error: Property 'fromEntries' does not exist on type 'ObjectConstructor'
// settingsToSave.shortcuts = <any>Object.fromEntries(settingsToSave.shortcuts);
// until then: https://gist.github.com/lukehorvat/133e2293ba6ae96a35ba#gistcomment-2600839
let obj = Array.from(settingsToSave.shortcuts).reduce((obj, [key, value]) => {
obj[key] = value;
return obj;
}, {});
settingsToSave.shortcuts = <any>obj;
const json = JSON.stringify(settingsToSave);
try {
fs.statSync(path.join(pathToAppData, 'video-hub-app-2'));
} catch (e) {
fs.mkdirSync(path.join(pathToAppData, 'video-hub-app-2'));
}
// TODO -- catch bug if user closes before selecting the output folder ?!??
fs.writeFile(path.join(GLOBALS.settingsPath, 'settings.json'), json, 'utf8', () => {
if (finalObjectToSave !== null) {
writeVhaFileToDisk(finalObjectToSave, GLOBALS.currentlyOpenVhaFile, () => {
try {
BrowserWindow.getFocusedWindow().close();
} catch {}
});
} else {
try {
BrowserWindow.getFocusedWindow().close();
} catch {}
}
});
});
}
| {
fs.access(fileToDelete, fs.constants.F_OK, (err: any) => {
if (err) {
console.log('FILE DELETED SUCCESS !!!')
event.sender.send('file-deleted', item);
}
});
} | identifier_body |
main-ipc.ts | import { BrowserWindow } from 'electron';
const dialog = require('electron').dialog;
const shell = require('electron').shell;
import * as path from 'path';
const fs = require('fs');
const trash = require('trash');
const exec = require('child_process').exec;
import { GLOBALS } from './main-globals';
import { ImageElement, FinalObject, InputSources } from '../interfaces/final-object.interface';
import { SettingsObject } from '../interfaces/settings-object.interface';
import { createDotPlsFile, writeVhaFileToDisk } from './main-support';
import { replaceThumbnailWithNewImage } from './main-extract';
import { closeWatcher, startWatcher, extractAnyMissingThumbs, removeThumbnailsNotInHub } from './main-extract-async';
/**
* Set up the listeners
* @param ipc
* @param win
* @param pathToAppData
* @param systemMessages
*/
export function setUpIpcMessages(ipc, win, pathToAppData, systemMessages) {
/**
* Un-Maximize the window
*/
ipc.on('un-maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().unmaximize();
}
});
/**
* Minimize the window
*/
ipc.on('minimize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().minimize();
}
});
/**
* Open the explorer to the relevant file
*/
ipc.on('open-in-explorer', (event, fullPath: string) => {
shell.showItemInFolder(fullPath);
});
/**
* Open a URL in system's default browser
*/
ipc.on('please-open-url', (event, urlToOpen: string): void => {
shell.openExternal(urlToOpen, { activate: true });
});
/**
* Maximize the window
*/
ipc.on('maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().maximize();
}
});
/**
* Open a particular video file clicked inside Angular
*/
ipc.on('open-media-file', (event, fullFilePath) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
shell.openItem(path.normalize(fullFilePath)); // normalize because on windows, the path sometimes is mixing `\` and `/`
// shell.openPath(path.normalize(fullFilePath)); // Electron 9
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Open a particular video file clicked inside Angular at particular timestamp
*/
ipc.on('open-media-file-at-timestamp', (event, executablePath, fullFilePath: string, args: string) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
const cmdline: string = `"${path.normalize(executablePath)}" "${path.normalize(fullFilePath)}" ${args}`;
console.log(cmdline);
exec(cmdline);
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Handle dragging a file out of VHA into a video editor (e.g. Vegas or Premiere)
*/
ipc.on('drag-video-out-of-electron', (event, filePath): void => {
console.log(filePath);
event.sender.startDrag({
file: filePath,
icon: './src/assets/logo.png'
});
});
/**
* Select default video player
*/
ipc.on('select-default-video-player', (event) => {
console.log('asking for default video player');
dialog.showOpenDialog(win, {
title: systemMessages.selectDefaultPlayer, // TODO: check if errors out now that this is in `main-ipc.ts`
filters: [
{
name: 'Executable', // TODO: i18n fixme
extensions: ['exe', 'app']
}, {
name: 'All files', // TODO: i18n fixme
extensions: ['*']
}
],
properties: ['openFile']
}).then(result => {
const executablePath: string = result.filePaths[0];
if (executablePath) {
event.sender.send('preferred-video-player-returning', executablePath);
}
}).catch(err => {});
});
/**
* Create and play the playlist
* 1. filter out *FOLDER*
* 2. save .pls file
* 3. ask OS to open the .pls file
*/
ipc.on('please-create-playlist', (event, playlist: ImageElement[], sourceFolderMap: InputSources, execPath: string) => {
const cleanPlaylist: ImageElement[] = playlist.filter((element: ImageElement) => {
return element.cleanName !== '*FOLDER*';
});
const savePath: string = path.join(GLOBALS.settingsPath, 'temp.pls');
if (cleanPlaylist.length) {
createDotPlsFile(savePath, cleanPlaylist, sourceFolderMap, () => {
if (execPath) { // if `preferredVideoPlayer` is sent
const cmdline: string = `"${path.normalize(execPath)}" "${path.normalize(savePath)}"`;
console.log(cmdline);
exec(cmdline);
} else {
shell.openItem(savePath);
// shell.openPath(savePath); // Electron 9
}
});
}
});
/**
* Delete file from computer (send to recycling bin / trash) or dangerously delete (bypass trash)
*/
ipc.on('delete-video-file', (event, basePath: string, item: ImageElement, dangerousDelete: boolean): void => {
const fileToDelete = path.join(basePath, item.partialPath, item.fileName);
if (dangerousDelete) {
fs.unlink(fileToDelete, (err) => {
if (err) {
console.log('ERROR:', fileToDelete + ' was NOT deleted');
} else {
notifyFileDeleted(event, fileToDelete, item);
}
});
} else {
(async () => {
await trash(fileToDelete);
notifyFileDeleted(event, fileToDelete, item);
})();
}
});
/**
* Helper function for `delete-video-file`
* @param event
* @param fileToDelete
* @param item
*/
function notifyFileDeleted(event, fileToDelete, item) {
fs.access(fileToDelete, fs.constants.F_OK, (err: any) => {
if (err) {
console.log('FILE DELETED SUCCESS !!!')
event.sender.send('file-deleted', item); | * Method to replace thumbnail of a particular item
*/
ipc.on('replace-thumbnail', (event, pathToIncomingJpg: string, item: ImageElement) => {
const fileToReplace: string = path.join(
GLOBALS.selectedOutputFolder,
'vha-' + GLOBALS.hubName,
'thumbnails',
item.hash + '.jpg'
);
const height: number = GLOBALS.screenshotSettings.height;
replaceThumbnailWithNewImage(fileToReplace, pathToIncomingJpg, height)
.then(success => {
if (success) {
event.sender.send('thumbnail-replaced');
}
})
.catch((err) => {});
});
/**
* Summon system modal to choose INPUT directory
* where all the videos are located
*/
ipc.on('choose-input', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('input-folder-chosen', inputDirPath);
}
}).catch(err => {});
});
/**
* Summon system modal to choose NEW input directory for a now-disconnected folder
* where all the videos are located
*/
ipc.on('reconnect-this-folder', (event, inputSource: number) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('old-folder-reconnected', inputSource, inputDirPath);
}
}).catch(err => {});
});
/**
* Stop watching a particular folder
*/
ipc.on('stop-watching-folder', (event, watchedFolderIndex: number) => {
console.log('stop watching:', watchedFolderIndex);
closeWatcher(watchedFolderIndex);
});
/**
* Stop watching a particular folder
*/
ipc.on('start-watching-folder', (event, watchedFolderIndex: string, path: string, persistent: boolean) => {
// annoyingly it's not a number : ^^^^^^^^^^^^^^^^^^ -- because object keys are strings :(
console.log('start watching:', watchedFolderIndex, path, persistent);
startWatcher(parseInt(watchedFolderIndex, 10), path, persistent);
});
/**
* extract any missing thumbnails
*/
ipc.on('add-missing-thumbnails', (event, finalArray: ImageElement[], extractClips: boolean) => {
extractAnyMissingThumbs(finalArray);
});
/**
* Remove any thumbnails for files no longer present in the hub
*/
ipc.on('clean-old-thumbnails', (event, finalArray: ImageElement[]) => {
// !!! WARNING
const screenshotOutputFolder: string = path.join(GLOBALS.selectedOutputFolder, 'vha-' + GLOBALS.hubName);
// !! ^^^^^^^^^^^^^^^^^^^^^^ - make sure this points to the folder with screenshots only!
const allHashes: Map<string, 1> = new Map();
finalArray
.filter((element: ImageElement) => { return !element.deleted })
.forEach((element: ImageElement) => {
allHashes.set(element.hash, 1);
});
removeThumbnailsNotInHub(allHashes, screenshotOutputFolder); // WARNING !!! this function will delete stuff
});
/**
* Summon system modal to choose OUTPUT directory
* where the final .vha2 file, vha-folder, and all screenshots will be saved
*/
ipc.on('choose-output', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const outputDirPath: string = result.filePaths[0];
if (outputDirPath) {
event.sender.send('output-folder-chosen', outputDirPath);
}
}).catch(err => {});
});
/**
* Try to rename the particular file
*/
ipc.on('try-to-rename-this-file', (event, sourceFolder: string, relPath: string, file: string, renameTo: string, index: number): void => {
console.log('renaming file:');
const original: string = path.join(sourceFolder, relPath, file);
const newName: string = path.join(sourceFolder, relPath, renameTo);
console.log(original);
console.log(newName);
let success = true;
let errMsg: string;
// check if already exists first
if (fs.existsSync(newName)) {
console.log('some file already EXISTS WITH THAT NAME !!!');
success = false;
errMsg = 'RIGHTCLICK.errorFileNameExists';
} else {
try {
fs.renameSync(original, newName);
} catch (err) {
success = false;
console.log(err);
if (err.code === 'ENOENT') {
// const pathObj = path.parse(err.path);
// console.log(pathObj);
errMsg = 'RIGHTCLICK.errorFileNotFound';
} else {
errMsg = 'RIGHTCLICK.errorSomeError';
}
}
}
event.sender.send('rename-file-response', index, success, renameTo, file, errMsg);
});
/**
* Close the window / quit / exit the app
*/
ipc.on('close-window', (event, settingsToSave: SettingsObject, finalObjectToSave: FinalObject) => {
// convert shortcuts map to object
// someday when node stops giving error: Property 'fromEntries' does not exist on type 'ObjectConstructor'
// settingsToSave.shortcuts = <any>Object.fromEntries(settingsToSave.shortcuts);
// until then: https://gist.github.com/lukehorvat/133e2293ba6ae96a35ba#gistcomment-2600839
let obj = Array.from(settingsToSave.shortcuts).reduce((obj, [key, value]) => {
obj[key] = value;
return obj;
}, {});
settingsToSave.shortcuts = <any>obj;
const json = JSON.stringify(settingsToSave);
try {
fs.statSync(path.join(pathToAppData, 'video-hub-app-2'));
} catch (e) {
fs.mkdirSync(path.join(pathToAppData, 'video-hub-app-2'));
}
// TODO -- catch bug if user closes before selecting the output folder ?!??
fs.writeFile(path.join(GLOBALS.settingsPath, 'settings.json'), json, 'utf8', () => {
if (finalObjectToSave !== null) {
writeVhaFileToDisk(finalObjectToSave, GLOBALS.currentlyOpenVhaFile, () => {
try {
BrowserWindow.getFocusedWindow().close();
} catch {}
});
} else {
try {
BrowserWindow.getFocusedWindow().close();
} catch {}
}
});
});
} | }
});
}
/** | random_line_split |
main-ipc.ts | import { BrowserWindow } from 'electron';
const dialog = require('electron').dialog;
const shell = require('electron').shell;
import * as path from 'path';
const fs = require('fs');
const trash = require('trash');
const exec = require('child_process').exec;
import { GLOBALS } from './main-globals';
import { ImageElement, FinalObject, InputSources } from '../interfaces/final-object.interface';
import { SettingsObject } from '../interfaces/settings-object.interface';
import { createDotPlsFile, writeVhaFileToDisk } from './main-support';
import { replaceThumbnailWithNewImage } from './main-extract';
import { closeWatcher, startWatcher, extractAnyMissingThumbs, removeThumbnailsNotInHub } from './main-extract-async';
/**
* Set up the listeners
* @param ipc
* @param win
* @param pathToAppData
* @param systemMessages
*/
export function setUpIpcMessages(ipc, win, pathToAppData, systemMessages) {
/**
* Un-Maximize the window
*/
ipc.on('un-maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().unmaximize();
}
});
/**
* Minimize the window
*/
ipc.on('minimize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().minimize();
}
});
/**
* Open the explorer to the relevant file
*/
ipc.on('open-in-explorer', (event, fullPath: string) => {
shell.showItemInFolder(fullPath);
});
/**
* Open a URL in system's default browser
*/
ipc.on('please-open-url', (event, urlToOpen: string): void => {
shell.openExternal(urlToOpen, { activate: true });
});
/**
* Maximize the window
*/
ipc.on('maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().maximize();
}
});
/**
* Open a particular video file clicked inside Angular
*/
ipc.on('open-media-file', (event, fullFilePath) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
shell.openItem(path.normalize(fullFilePath)); // normalize because on windows, the path sometimes is mixing `\` and `/`
// shell.openPath(path.normalize(fullFilePath)); // Electron 9
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Open a particular video file clicked inside Angular at particular timestamp
*/
ipc.on('open-media-file-at-timestamp', (event, executablePath, fullFilePath: string, args: string) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
const cmdline: string = `"${path.normalize(executablePath)}" "${path.normalize(fullFilePath)}" ${args}`;
console.log(cmdline);
exec(cmdline);
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Handle dragging a file out of VHA into a video editor (e.g. Vegas or Premiere)
*/
ipc.on('drag-video-out-of-electron', (event, filePath): void => {
console.log(filePath);
event.sender.startDrag({
file: filePath,
icon: './src/assets/logo.png'
});
});
/**
* Select default video player
*/
ipc.on('select-default-video-player', (event) => {
console.log('asking for default video player');
dialog.showOpenDialog(win, {
title: systemMessages.selectDefaultPlayer, // TODO: check if errors out now that this is in `main-ipc.ts`
filters: [
{
name: 'Executable', // TODO: i18n fixme
extensions: ['exe', 'app']
}, {
name: 'All files', // TODO: i18n fixme
extensions: ['*']
}
],
properties: ['openFile']
}).then(result => {
const executablePath: string = result.filePaths[0];
if (executablePath) {
event.sender.send('preferred-video-player-returning', executablePath);
}
}).catch(err => {});
});
/**
* Create and play the playlist
* 1. filter out *FOLDER*
* 2. save .pls file
* 3. ask OS to open the .pls file
*/
ipc.on('please-create-playlist', (event, playlist: ImageElement[], sourceFolderMap: InputSources, execPath: string) => {
const cleanPlaylist: ImageElement[] = playlist.filter((element: ImageElement) => {
return element.cleanName !== '*FOLDER*';
});
const savePath: string = path.join(GLOBALS.settingsPath, 'temp.pls');
if (cleanPlaylist.length) {
createDotPlsFile(savePath, cleanPlaylist, sourceFolderMap, () => {
if (execPath) { // if `preferredVideoPlayer` is sent
const cmdline: string = `"${path.normalize(execPath)}" "${path.normalize(savePath)}"`;
console.log(cmdline);
exec(cmdline);
} else {
shell.openItem(savePath);
// shell.openPath(savePath); // Electron 9
}
});
}
});
/**
* Delete file from computer (send to recycling bin / trash) or dangerously delete (bypass trash)
*/
ipc.on('delete-video-file', (event, basePath: string, item: ImageElement, dangerousDelete: boolean): void => {
const fileToDelete = path.join(basePath, item.partialPath, item.fileName);
if (dangerousDelete) {
fs.unlink(fileToDelete, (err) => {
if (err) {
console.log('ERROR:', fileToDelete + ' was NOT deleted');
} else {
notifyFileDeleted(event, fileToDelete, item);
}
});
} else {
(async () => {
await trash(fileToDelete);
notifyFileDeleted(event, fileToDelete, item);
})();
}
});
/**
* Helper function for `delete-video-file`
* @param event
* @param fileToDelete
* @param item
*/
function | (event, fileToDelete, item) {
fs.access(fileToDelete, fs.constants.F_OK, (err: any) => {
if (err) {
console.log('FILE DELETED SUCCESS !!!')
event.sender.send('file-deleted', item);
}
});
}
/**
* Method to replace thumbnail of a particular item
*/
ipc.on('replace-thumbnail', (event, pathToIncomingJpg: string, item: ImageElement) => {
const fileToReplace: string = path.join(
GLOBALS.selectedOutputFolder,
'vha-' + GLOBALS.hubName,
'thumbnails',
item.hash + '.jpg'
);
const height: number = GLOBALS.screenshotSettings.height;
replaceThumbnailWithNewImage(fileToReplace, pathToIncomingJpg, height)
.then(success => {
if (success) {
event.sender.send('thumbnail-replaced');
}
})
.catch((err) => {});
});
/**
* Summon system modal to choose INPUT directory
* where all the videos are located
*/
ipc.on('choose-input', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('input-folder-chosen', inputDirPath);
}
}).catch(err => {});
});
/**
* Summon system modal to choose NEW input directory for a now-disconnected folder
* where all the videos are located
*/
ipc.on('reconnect-this-folder', (event, inputSource: number) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('old-folder-reconnected', inputSource, inputDirPath);
}
}).catch(err => {});
});
/**
* Stop watching a particular folder
*/
ipc.on('stop-watching-folder', (event, watchedFolderIndex: number) => {
console.log('stop watching:', watchedFolderIndex);
closeWatcher(watchedFolderIndex);
});
/**
* Stop watching a particular folder
*/
ipc.on('start-watching-folder', (event, watchedFolderIndex: string, path: string, persistent: boolean) => {
// annoyingly it's not a number : ^^^^^^^^^^^^^^^^^^ -- because object keys are strings :(
console.log('start watching:', watchedFolderIndex, path, persistent);
startWatcher(parseInt(watchedFolderIndex, 10), path, persistent);
});
/**
* extract any missing thumbnails
*/
ipc.on('add-missing-thumbnails', (event, finalArray: ImageElement[], extractClips: boolean) => {
extractAnyMissingThumbs(finalArray);
});
/**
* Remove any thumbnails for files no longer present in the hub
*/
ipc.on('clean-old-thumbnails', (event, finalArray: ImageElement[]) => {
// !!! WARNING
const screenshotOutputFolder: string = path.join(GLOBALS.selectedOutputFolder, 'vha-' + GLOBALS.hubName);
// !! ^^^^^^^^^^^^^^^^^^^^^^ - make sure this points to the folder with screenshots only!
const allHashes: Map<string, 1> = new Map();
finalArray
.filter((element: ImageElement) => { return !element.deleted })
.forEach((element: ImageElement) => {
allHashes.set(element.hash, 1);
});
removeThumbnailsNotInHub(allHashes, screenshotOutputFolder); // WARNING !!! this function will delete stuff
});
/**
* Summon system modal to choose OUTPUT directory
* where the final .vha2 file, vha-folder, and all screenshots will be saved
*/
ipc.on('choose-output', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const outputDirPath: string = result.filePaths[0];
if (outputDirPath) {
event.sender.send('output-folder-chosen', outputDirPath);
}
}).catch(err => {});
});
/**
* Try to rename the particular file
*/
ipc.on('try-to-rename-this-file', (event, sourceFolder: string, relPath: string, file: string, renameTo: string, index: number): void => {
console.log('renaming file:');
const original: string = path.join(sourceFolder, relPath, file);
const newName: string = path.join(sourceFolder, relPath, renameTo);
console.log(original);
console.log(newName);
let success = true;
let errMsg: string;
// check if already exists first
if (fs.existsSync(newName)) {
console.log('some file already EXISTS WITH THAT NAME !!!');
success = false;
errMsg = 'RIGHTCLICK.errorFileNameExists';
} else {
try {
fs.renameSync(original, newName);
} catch (err) {
success = false;
console.log(err);
if (err.code === 'ENOENT') {
// const pathObj = path.parse(err.path);
// console.log(pathObj);
errMsg = 'RIGHTCLICK.errorFileNotFound';
} else {
errMsg = 'RIGHTCLICK.errorSomeError';
}
}
}
event.sender.send('rename-file-response', index, success, renameTo, file, errMsg);
});
/**
* Close the window / quit / exit the app
*/
ipc.on('close-window', (event, settingsToSave: SettingsObject, finalObjectToSave: FinalObject) => {
// convert shortcuts map to object
// someday when node stops giving error: Property 'fromEntries' does not exist on type 'ObjectConstructor'
// settingsToSave.shortcuts = <any>Object.fromEntries(settingsToSave.shortcuts);
// until then: https://gist.github.com/lukehorvat/133e2293ba6ae96a35ba#gistcomment-2600839
let obj = Array.from(settingsToSave.shortcuts).reduce((obj, [key, value]) => {
obj[key] = value;
return obj;
}, {});
settingsToSave.shortcuts = <any>obj;
const json = JSON.stringify(settingsToSave);
try {
fs.statSync(path.join(pathToAppData, 'video-hub-app-2'));
} catch (e) {
fs.mkdirSync(path.join(pathToAppData, 'video-hub-app-2'));
}
// TODO -- catch bug if user closes before selecting the output folder ?!??
fs.writeFile(path.join(GLOBALS.settingsPath, 'settings.json'), json, 'utf8', () => {
if (finalObjectToSave !== null) {
writeVhaFileToDisk(finalObjectToSave, GLOBALS.currentlyOpenVhaFile, () => {
try {
BrowserWindow.getFocusedWindow().close();
} catch {}
});
} else {
try {
BrowserWindow.getFocusedWindow().close();
} catch {}
}
});
});
}
| notifyFileDeleted | identifier_name |
main-ipc.ts | import { BrowserWindow } from 'electron';
const dialog = require('electron').dialog;
const shell = require('electron').shell;
import * as path from 'path';
const fs = require('fs');
const trash = require('trash');
const exec = require('child_process').exec;
import { GLOBALS } from './main-globals';
import { ImageElement, FinalObject, InputSources } from '../interfaces/final-object.interface';
import { SettingsObject } from '../interfaces/settings-object.interface';
import { createDotPlsFile, writeVhaFileToDisk } from './main-support';
import { replaceThumbnailWithNewImage } from './main-extract';
import { closeWatcher, startWatcher, extractAnyMissingThumbs, removeThumbnailsNotInHub } from './main-extract-async';
/**
* Set up the listeners
* @param ipc
* @param win
* @param pathToAppData
* @param systemMessages
*/
export function setUpIpcMessages(ipc, win, pathToAppData, systemMessages) {
/**
* Un-Maximize the window
*/
ipc.on('un-maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().unmaximize();
}
});
/**
* Minimize the window
*/
ipc.on('minimize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().minimize();
}
});
/**
* Open the explorer to the relevant file
*/
ipc.on('open-in-explorer', (event, fullPath: string) => {
shell.showItemInFolder(fullPath);
});
/**
* Open a URL in system's default browser
*/
ipc.on('please-open-url', (event, urlToOpen: string): void => {
shell.openExternal(urlToOpen, { activate: true });
});
/**
* Maximize the window
*/
ipc.on('maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().maximize();
}
});
/**
* Open a particular video file clicked inside Angular
*/
ipc.on('open-media-file', (event, fullFilePath) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
shell.openItem(path.normalize(fullFilePath)); // normalize because on windows, the path sometimes is mixing `\` and `/`
// shell.openPath(path.normalize(fullFilePath)); // Electron 9
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Open a particular video file clicked inside Angular at particular timestamp
*/
ipc.on('open-media-file-at-timestamp', (event, executablePath, fullFilePath: string, args: string) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
const cmdline: string = `"${path.normalize(executablePath)}" "${path.normalize(fullFilePath)}" ${args}`;
console.log(cmdline);
exec(cmdline);
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Handle dragging a file out of VHA into a video editor (e.g. Vegas or Premiere)
*/
ipc.on('drag-video-out-of-electron', (event, filePath): void => {
console.log(filePath);
event.sender.startDrag({
file: filePath,
icon: './src/assets/logo.png'
});
});
/**
* Select default video player
*/
ipc.on('select-default-video-player', (event) => {
console.log('asking for default video player');
dialog.showOpenDialog(win, {
title: systemMessages.selectDefaultPlayer, // TODO: check if errors out now that this is in `main-ipc.ts`
filters: [
{
name: 'Executable', // TODO: i18n fixme
extensions: ['exe', 'app']
}, {
name: 'All files', // TODO: i18n fixme
extensions: ['*']
}
],
properties: ['openFile']
}).then(result => {
const executablePath: string = result.filePaths[0];
if (executablePath) {
event.sender.send('preferred-video-player-returning', executablePath);
}
}).catch(err => {});
});
/**
* Create and play the playlist
* 1. filter out *FOLDER*
* 2. save .pls file
* 3. ask OS to open the .pls file
*/
ipc.on('please-create-playlist', (event, playlist: ImageElement[], sourceFolderMap: InputSources, execPath: string) => {
const cleanPlaylist: ImageElement[] = playlist.filter((element: ImageElement) => {
return element.cleanName !== '*FOLDER*';
});
const savePath: string = path.join(GLOBALS.settingsPath, 'temp.pls');
if (cleanPlaylist.length) {
createDotPlsFile(savePath, cleanPlaylist, sourceFolderMap, () => {
if (execPath) { // if `preferredVideoPlayer` is sent
const cmdline: string = `"${path.normalize(execPath)}" "${path.normalize(savePath)}"`;
console.log(cmdline);
exec(cmdline);
} else {
shell.openItem(savePath);
// shell.openPath(savePath); // Electron 9
}
});
}
});
/**
* Delete file from computer (send to recycling bin / trash) or dangerously delete (bypass trash)
*/
ipc.on('delete-video-file', (event, basePath: string, item: ImageElement, dangerousDelete: boolean): void => {
const fileToDelete = path.join(basePath, item.partialPath, item.fileName);
if (dangerousDelete) {
fs.unlink(fileToDelete, (err) => {
if (err) {
console.log('ERROR:', fileToDelete + ' was NOT deleted');
} else {
notifyFileDeleted(event, fileToDelete, item);
}
});
} else {
(async () => {
await trash(fileToDelete);
notifyFileDeleted(event, fileToDelete, item);
})();
}
});
/**
* Helper function for `delete-video-file`
* @param event
* @param fileToDelete
* @param item
*/
function notifyFileDeleted(event, fileToDelete, item) {
fs.access(fileToDelete, fs.constants.F_OK, (err: any) => {
if (err) {
console.log('FILE DELETED SUCCESS !!!')
event.sender.send('file-deleted', item);
}
});
}
/**
* Method to replace thumbnail of a particular item
*/
ipc.on('replace-thumbnail', (event, pathToIncomingJpg: string, item: ImageElement) => {
const fileToReplace: string = path.join(
GLOBALS.selectedOutputFolder,
'vha-' + GLOBALS.hubName,
'thumbnails',
item.hash + '.jpg'
);
const height: number = GLOBALS.screenshotSettings.height;
replaceThumbnailWithNewImage(fileToReplace, pathToIncomingJpg, height)
.then(success => {
if (success) {
event.sender.send('thumbnail-replaced');
}
})
.catch((err) => {});
});
/**
* Summon system modal to choose INPUT directory
* where all the videos are located
*/
ipc.on('choose-input', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('input-folder-chosen', inputDirPath);
}
}).catch(err => {});
});
/**
* Summon system modal to choose NEW input directory for a now-disconnected folder
* where all the videos are located
*/
ipc.on('reconnect-this-folder', (event, inputSource: number) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('old-folder-reconnected', inputSource, inputDirPath);
}
}).catch(err => {});
});
/**
* Stop watching a particular folder
*/
ipc.on('stop-watching-folder', (event, watchedFolderIndex: number) => {
console.log('stop watching:', watchedFolderIndex);
closeWatcher(watchedFolderIndex);
});
/**
* Stop watching a particular folder
*/
ipc.on('start-watching-folder', (event, watchedFolderIndex: string, path: string, persistent: boolean) => {
// annoyingly it's not a number : ^^^^^^^^^^^^^^^^^^ -- because object keys are strings :(
console.log('start watching:', watchedFolderIndex, path, persistent);
startWatcher(parseInt(watchedFolderIndex, 10), path, persistent);
});
/**
* extract any missing thumbnails
*/
ipc.on('add-missing-thumbnails', (event, finalArray: ImageElement[], extractClips: boolean) => {
extractAnyMissingThumbs(finalArray);
});
/**
* Remove any thumbnails for files no longer present in the hub
*/
ipc.on('clean-old-thumbnails', (event, finalArray: ImageElement[]) => {
// !!! WARNING
const screenshotOutputFolder: string = path.join(GLOBALS.selectedOutputFolder, 'vha-' + GLOBALS.hubName);
// !! ^^^^^^^^^^^^^^^^^^^^^^ - make sure this points to the folder with screenshots only!
const allHashes: Map<string, 1> = new Map();
finalArray
.filter((element: ImageElement) => { return !element.deleted })
.forEach((element: ImageElement) => {
allHashes.set(element.hash, 1);
});
removeThumbnailsNotInHub(allHashes, screenshotOutputFolder); // WARNING !!! this function will delete stuff
});
/**
* Summon system modal to choose OUTPUT directory
* where the final .vha2 file, vha-folder, and all screenshots will be saved
*/
ipc.on('choose-output', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const outputDirPath: string = result.filePaths[0];
if (outputDirPath) {
event.sender.send('output-folder-chosen', outputDirPath);
}
}).catch(err => {});
});
/**
* Try to rename the particular file
*/
ipc.on('try-to-rename-this-file', (event, sourceFolder: string, relPath: string, file: string, renameTo: string, index: number): void => {
console.log('renaming file:');
const original: string = path.join(sourceFolder, relPath, file);
const newName: string = path.join(sourceFolder, relPath, renameTo);
console.log(original);
console.log(newName);
let success = true;
let errMsg: string;
// check if already exists first
if (fs.existsSync(newName)) {
console.log('some file already EXISTS WITH THAT NAME !!!');
success = false;
errMsg = 'RIGHTCLICK.errorFileNameExists';
} else {
try {
fs.renameSync(original, newName);
} catch (err) {
success = false;
console.log(err);
if (err.code === 'ENOENT') {
// const pathObj = path.parse(err.path);
// console.log(pathObj);
errMsg = 'RIGHTCLICK.errorFileNotFound';
} else |
}
}
event.sender.send('rename-file-response', index, success, renameTo, file, errMsg);
});
/**
* Close the window / quit / exit the app
*/
ipc.on('close-window', (event, settingsToSave: SettingsObject, finalObjectToSave: FinalObject) => {
// convert shortcuts map to object
// someday when node stops giving error: Property 'fromEntries' does not exist on type 'ObjectConstructor'
// settingsToSave.shortcuts = <any>Object.fromEntries(settingsToSave.shortcuts);
// until then: https://gist.github.com/lukehorvat/133e2293ba6ae96a35ba#gistcomment-2600839
let obj = Array.from(settingsToSave.shortcuts).reduce((obj, [key, value]) => {
obj[key] = value;
return obj;
}, {});
settingsToSave.shortcuts = <any>obj;
const json = JSON.stringify(settingsToSave);
try {
fs.statSync(path.join(pathToAppData, 'video-hub-app-2'));
} catch (e) {
fs.mkdirSync(path.join(pathToAppData, 'video-hub-app-2'));
}
// TODO -- catch bug if user closes before selecting the output folder ?!??
fs.writeFile(path.join(GLOBALS.settingsPath, 'settings.json'), json, 'utf8', () => {
if (finalObjectToSave !== null) {
writeVhaFileToDisk(finalObjectToSave, GLOBALS.currentlyOpenVhaFile, () => {
try {
BrowserWindow.getFocusedWindow().close();
} catch {}
});
} else {
try {
BrowserWindow.getFocusedWindow().close();
} catch {}
}
});
});
}
| {
errMsg = 'RIGHTCLICK.errorSomeError';
} | conditional_block |
helpers.go | /**
Copyright (c) 2020 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ocm
import (
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
"os"
"regexp"
"strings"
awssdk "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/service/ec2"
semver "github.com/hashicorp/go-version"
"github.com/openshift/rosa/pkg/aws"
"github.com/openshift/rosa/pkg/output"
"github.com/openshift/rosa/pkg/reporter"
errors "github.com/zgalor/weberr"
amsv1 "github.com/openshift-online/ocm-sdk-go/accountsmgmt/v1"
cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
ocmerrors "github.com/openshift-online/ocm-sdk-go/errors"
"github.com/openshift/rosa/pkg/helper"
)
const (
ANY = "any"
HibernateCapability = "capability.organization.hibernate_cluster"
//Pendo Events
Success = "Success"
Failure = "Failure"
Response = "Response"
ClusterID = "ClusterID"
OperatorRolesPrefix = "OperatorRolePrefix"
Version = "Version"
Username = "Username"
URL = "URL"
IsThrottle = "IsThrottle"
OCMRoleLabel = "sts_ocm_role"
USERRoleLabel = "sts_user_role"
maxClusterNameLength = 15
)
// Regular expression to used to make sure that the identifier or name given by the user is
// safe and that it there is no risk of SQL injection:
var clusterKeyRE = regexp.MustCompile(`^(\w|-)+$`)
// Cluster names must be valid DNS-1035 labels, so they must consist of lower case alphanumeric
// characters or '-', start with an alphabetic character, and end with an alphanumeric character
var clusterNameRE = regexp.MustCompile(`^[a-z]([-a-z0-9]{0,13}[a-z0-9])?$`)
var badUsernameRE = regexp.MustCompile(`^(~|\.?\.|.*[:\/%].*)$`)
func IsValidClusterKey(clusterKey string) bool {
return clusterKeyRE.MatchString(clusterKey)
}
func IsValidClusterName(clusterName string) bool {
return clusterNameRE.MatchString(clusterName)
}
func ClusterNameValidator(name interface{}) error {
if str, ok := name.(string); ok {
str := strings.Trim(str, " \t")
if !IsValidClusterName(str) {
return fmt.Errorf("Cluster name must consist of no more than 15 lowercase " +
"alphanumeric characters or '-', start with a letter, and end with an " +
"alphanumeric character.")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", name)
}
func ValidateHTTPProxy(val interface{}) error {
if httpProxy, ok := val.(string); ok {
if httpProxy == "" {
return nil
}
url, err := url.ParseRequestURI(httpProxy)
if err != nil {
return fmt.Errorf("Invalid http-proxy value '%s'", httpProxy)
}
if url.Scheme != "http" {
return errors.Errorf("%s", "Expected http-proxy to have an http:// scheme")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", val)
}
func ValidateAdditionalTrustBundle(val interface{}) error {
if additionalTrustBundleFile, ok := val.(string); ok {
if additionalTrustBundleFile == "" {
return nil
}
cert, err := os.ReadFile(additionalTrustBundleFile)
if err != nil {
return err
}
additionalTrustBundle := string(cert)
if additionalTrustBundle == "" {
return errors.Errorf("%s", "Trust bundle file is empty")
}
additionalTrustBundleBytes := []byte(additionalTrustBundle)
if !x509.NewCertPool().AppendCertsFromPEM(additionalTrustBundleBytes) {
return errors.Errorf("%s", "Failed to parse additional trust bundle")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", val)
}
func IsValidUsername(username string) bool {
return !badUsernameRE.MatchString(username)
}
func IsEmptyCIDR(cidr net.IPNet) bool {
return cidr.String() == "<nil>"
}
// Determine whether a resources is compatible with ROSA clusters in general
func isCompatible(relatedResource *amsv1.RelatedResource) bool {
product := strings.ToLower(relatedResource.Product())
cloudProvider := strings.ToLower(relatedResource.CloudProvider())
byoc := strings.ToLower(relatedResource.BYOC())
// nolint:goconst
return (product == ANY || product == "rosa" || product == "moa") &&
(cloudProvider == ANY || cloudProvider == "aws") &&
(byoc == ANY || byoc == "byoc")
}
func handleErr(res *ocmerrors.Error, err error) error {
msg := res.Reason()
if msg == "" {
msg = err.Error()
}
// Hack to always display the correct terms and conditions message
if res.Code() == "CLUSTERS-MGMT-451" {
msg = "You must accept the Terms and Conditions in order to continue.\n" +
"Go to https://www.redhat.com/wapps/tnc/ackrequired?site=ocm&event=register\n" +
"Once you accept the terms, you will need to retry the action that was blocked."
}
errType := errors.ErrorType(res.Status())
return errType.Set(errors.Errorf("%s", msg))
}
func (c *Client) GetDefaultClusterFlavors(flavour string) (dMachinecidr *net.IPNet, dPodcidr *net.IPNet,
dServicecidr *net.IPNet, dhostPrefix int, computeInstanceType string) {
flavourGetResponse, err := c.ocm.ClustersMgmt().V1().Flavours().Flavour(flavour).Get().Send()
if err != nil {
flavourGetResponse, _ = c.ocm.ClustersMgmt().V1().Flavours().Flavour("osd-4").Get().Send()
}
aws, ok := flavourGetResponse.Body().GetAWS()
if !ok {
return nil, nil, nil, 0, ""
}
computeInstanceType = aws.ComputeInstanceType()
network, ok := flavourGetResponse.Body().GetNetwork()
if !ok {
return nil, nil, nil, 0, computeInstanceType
}
_, dMachinecidr, err = net.ParseCIDR(network.MachineCIDR())
if err != nil {
dMachinecidr = nil
}
_, dPodcidr, err = net.ParseCIDR(network.PodCIDR())
if err != nil {
dPodcidr = nil
}
_, dServicecidr, err = net.ParseCIDR(network.ServiceCIDR())
if err != nil {
dServicecidr = nil
}
dhostPrefix, _ = network.GetHostPrefix()
return dMachinecidr, dPodcidr, dServicecidr, dhostPrefix, computeInstanceType
}
func (c *Client) LogEvent(key string, body map[string]string) {
event, err := cmv1.NewEvent().Key(key).Body(body).Build()
if err == nil {
_, _ = c.ocm.ClustersMgmt().V1().
Events().
Add().
Body(event).
Send()
}
}
func (c *Client) GetCurrentAccount() (*amsv1.Account, error) {
response, err := c.ocm.AccountsMgmt().V1().
CurrentAccount().
Get().
Send()
if err != nil {
if response.Status() == http.StatusNotFound {
return nil, nil
}
return nil, handleErr(response.Error(), err)
}
return response.Body(), nil
}
func (c *Client) GetCurrentOrganization() (id string, externalID string, err error) {
acctResponse, err := c.GetCurrentAccount()
if err != nil {
return
}
id = acctResponse.Organization().ID() | func (c *Client) IsCapabilityEnabled(capability string) (enabled bool, err error) {
organizationID, _, err := c.GetCurrentOrganization()
if err != nil {
return
}
isCapabilityEnable, err := c.isCapabilityEnabled(capability, organizationID)
if err != nil {
return
}
if !isCapabilityEnable {
return false, nil
}
return true, nil
}
func (c *Client) isCapabilityEnabled(capabilityName string, orgID string) (bool, error) {
capabilityResponse, err := c.ocm.AccountsMgmt().V1().Organizations().
Organization(orgID).Get().Parameter("fetchCapabilities", true).Send()
if err != nil {
return false, handleErr(capabilityResponse.Error(), err)
}
if len(capabilityResponse.Body().Capabilities()) > 0 {
for _, capability := range capabilityResponse.Body().Capabilities() {
if capability.Name() == capabilityName {
return capability.Value() == "true", nil
}
}
}
return false, nil
}
func (c *Client) UnlinkUserRoleFromAccount(accountID string, roleARN string) error {
linkedRoles, err := c.GetAccountLinkedUserRoles(accountID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(USERRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role ARN '%s' is not linked with the current account '%s'", roleARN, accountID)
}
func (c *Client) LinkAccountRole(accountID string, roleARN string) error {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels("sts_user_role").Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return errors.Forbidden.UserErrorf("%v", err)
}
return handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
if value == roleARN {
exists = true
break
}
}
}
}
if exists {
return nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key("sts_user_role").Value(roleARN).Build()
if err != nil {
return err
}
_, err = c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
return err
}
func (c *Client) UnlinkOCMRoleFromOrg(orgID string, roleARN string) error {
linkedRoles, err := c.GetOrganizationLinkedOCMRoles(orgID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role-arn '%s' is not linked with the organization account '%s'", roleARN, orgID)
}
func (c *Client) LinkOrgToRole(orgID string, roleARN string) (bool, error) {
parsedARN, err := arn.Parse(roleARN)
if err != nil {
return false, err
}
exists, existingARN, selectedARN, err := c.CheckIfAWSAccountExists(orgID, parsedARN.AccountID)
if err != nil {
return false, err
}
if exists {
if selectedARN != roleARN {
return false, errors.UserErrorf("User organization '%s' has role-arn '%s' associated. "+
"Only one role can be linked per AWS account per organization", orgID, selectedARN)
}
return false, nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(roleARN).Build()
if err != nil {
return false, err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return false, handleErr(resp.Error(), err)
}
return true, nil
}
func (c *Client) GetAccountLinkedUserRoles(accountID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels(USERRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, handleErr(resp.Error(), err)
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) GetOrganizationLinkedOCMRoles(orgID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, err
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) CheckIfAWSAccountExists(orgID string, awsAccountID string) (bool, string, string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return false, "", "", errors.Forbidden.UserErrorf("%v", err)
}
return false, "", "", handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
selectedARN := ""
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
parsedARN, err := arn.Parse(value)
if err != nil {
return false, "", "", err
}
if parsedARN.AccountID == awsAccountID {
exists = true
selectedARN = value
break
}
}
}
}
return exists, existingARN, selectedARN, nil
}
/*
We should allow only one role per aws account per organization
If the user request same ocm role we should let them proceed to ensure they can add admin role
if not exists or attach policies or link etc
if the user request diff ocm role name we error out
*/
func (c *Client) CheckRoleExists(orgID string, roleName string, awsAccountID string) (bool, string, string, error) {
exists, _, selectedARN, err := c.CheckIfAWSAccountExists(orgID, awsAccountID)
if err != nil {
return false, "", "", err
}
if !exists {
return false, "", "", nil
}
existingRole := strings.SplitN(selectedARN, "/", 2)
if len(existingRole) > 1 && existingRole[1] == roleName {
return false, "", "", nil
}
return true, existingRole[1], selectedARN, nil
}
func GetVersionMinor(ver string) string {
rawID := strings.Replace(ver, "openshift-v", "", 1)
version, err := semver.NewVersion(rawID)
if err != nil {
segments := strings.Split(rawID, ".")
return fmt.Sprintf("%s.%s", segments[0], segments[1])
}
segments := version.Segments()
return fmt.Sprintf("%d.%d", segments[0], segments[1])
}
func CheckSupportedVersion(clusterVersion string, operatorVersion string) (bool, error) {
v1, err := semver.NewVersion(clusterVersion)
if err != nil {
return false, err
}
v2, err := semver.NewVersion(operatorVersion)
if err != nil {
return false, err
}
//Cluster version is greater than or equal to operator version
return v1.GreaterThanOrEqual(v2), nil
}
func (c *Client) GetPolicies(policyType string) (map[string]*cmv1.AWSSTSPolicy, error) {
query := fmt.Sprintf("policy_type = '%s'", policyType)
m := make(map[string]*cmv1.AWSSTSPolicy)
stmt := c.ocm.ClustersMgmt().V1().AWSInquiries().STSPolicies().List()
if policyType != "" {
stmt = stmt.Search(query)
}
accountRolePoliciesResponse, err := stmt.Send()
if err != nil {
return m, handleErr(accountRolePoliciesResponse.Error(), err)
}
accountRolePoliciesResponse.Items().Each(func(awsPolicy *cmv1.AWSSTSPolicy) bool {
m[awsPolicy.ID()] = awsPolicy
return true
})
return m, nil
}
// The actual values might differ from classic to hcp
// prefer using GetCredRequests(isHypershift bool) when there is prior knowledge of the topology
func (c *Client) GetAllCredRequests() (map[string]*cmv1.STSOperator, error) {
result := make(map[string]*cmv1.STSOperator)
classic, err := c.GetCredRequests(false)
if err != nil {
return result, err
}
hcp, err := c.GetCredRequests(true)
if err != nil {
return result, err
}
for key, value := range classic {
result[key] = value
}
for key, value := range hcp {
result[key] = value
}
return result, nil
}
func (c *Client) GetCredRequests(isHypershift bool) (map[string]*cmv1.STSOperator, error) {
m := make(map[string]*cmv1.STSOperator)
stsCredentialResponse, err := c.ocm.ClustersMgmt().
V1().
AWSInquiries().
STSCredentialRequests().
List().
Parameter("is_hypershift", isHypershift).
Send()
if err != nil {
return m, handleErr(stsCredentialResponse.Error(), err)
}
stsCredentialResponse.Items().Each(func(stsCredentialRequest *cmv1.STSCredentialRequest) bool {
m[stsCredentialRequest.Name()] = stsCredentialRequest.Operator()
return true
})
return m, nil
}
func (c *Client) FindMissingOperatorRolesForUpgrade(cluster *cmv1.Cluster,
newMinorVersion string) (map[string]*cmv1.STSOperator, error) {
missingRoles := make(map[string]*cmv1.STSOperator)
credRequests, err := c.GetCredRequests(cluster.Hypershift().Enabled())
if err != nil {
return nil, errors.Errorf("Error getting operator credential request from OCM %s", err)
}
for credRequest, operator := range credRequests {
if operator.MinVersion() != "" {
clusterUpgradeVersion, err := semver.NewVersion(newMinorVersion)
if err != nil {
return nil, err
}
operatorMinVersion, err := semver.NewVersion(operator.MinVersion())
if err != nil {
return nil, err
}
if clusterUpgradeVersion.GreaterThanOrEqual(operatorMinVersion) {
if !isOperatorRoleAlreadyExist(cluster, operator) {
missingRoles[credRequest] = operator
}
}
}
}
return missingRoles, nil
}
func (c *Client) createCloudProviderDataBuilder(roleARN string, awsClient aws.Client,
externalID string) (*cmv1.CloudProviderDataBuilder, error) {
var awsBuilder *cmv1.AWSBuilder
if roleARN != "" {
stsBuilder := cmv1.NewSTS().RoleARN(roleARN)
if externalID != "" {
stsBuilder = stsBuilder.ExternalID(externalID)
}
awsBuilder = cmv1.NewAWS().STS(stsBuilder)
} else {
accessKeys, err := awsClient.GetAWSAccessKeys()
if err != nil {
return &cmv1.CloudProviderDataBuilder{}, err
}
awsBuilder = cmv1.NewAWS().AccessKeyID(accessKeys.AccessKeyID).SecretAccessKey(accessKeys.SecretAccessKey)
}
return cmv1.NewCloudProviderData().AWS(awsBuilder), nil
}
func isOperatorRoleAlreadyExist(cluster *cmv1.Cluster, operator *cmv1.STSOperator) bool {
for _, role := range cluster.AWS().STS().OperatorIAMRoles() {
//FIXME: Check it does not exist on AWS itself too
// the iam roles will only return up to the version of the cluster
if role.Namespace() == operator.Namespace() && role.Name() == operator.Name() {
return true
}
}
return false
}
const (
BYOVPCSingleAZSubnetsCount = 2
BYOVPCMultiAZSubnetsCount = 6
privateLinkSingleAZSubnetsCount = 1
privateLinkMultiAZSubnetsCount = 3
)
func ValidateSubnetsCount(multiAZ bool, privateLink bool, subnetsInputCount int) error {
if privateLink {
if multiAZ && subnetsInputCount != privateLinkMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ private link cluster should be %d, "+
"instead received: %d", privateLinkMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != privateLinkSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ private link cluster should be %d, "+
"instead received: %d", privateLinkSingleAZSubnetsCount, subnetsInputCount)
}
} else {
if multiAZ && subnetsInputCount != BYOVPCMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ cluster should be %d, "+
"instead received: %d", BYOVPCMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != BYOVPCSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ cluster should be %d, "+
"instead received: %d", BYOVPCSingleAZSubnetsCount, subnetsInputCount)
}
}
return nil
}
func ValidateHostedClusterSubnets(awsClient aws.Client, isPrivate bool, subnetIDs []string) (int, error) {
if isPrivate && len(subnetIDs) < 1 {
return 0, fmt.Errorf("The number of subnets for a private hosted cluster should be at least one")
}
if !isPrivate && len(subnetIDs) < 2 {
return 0, fmt.Errorf("The number of subnets for a public hosted cluster should be at least two")
}
vpcSubnets, vpcSubnetsErr := awsClient.GetVPCSubnets(subnetIDs[0])
if vpcSubnetsErr != nil {
return 0, vpcSubnetsErr
}
var subnets []*ec2.Subnet
for _, subnet := range vpcSubnets {
for _, subnetId := range subnetIDs {
if awssdk.StringValue(subnet.SubnetId) == subnetId {
subnets = append(subnets, subnet)
break
}
}
}
privateSubnets, privateSubnetsErr := awsClient.FilterVPCsPrivateSubnets(subnets)
if privateSubnetsErr != nil {
return 0, privateSubnetsErr
}
privateSubnetCount := len(privateSubnets)
publicSubnetsCount := len(subnets) - privateSubnetCount
if isPrivate {
if publicSubnetsCount > 0 {
return 0, fmt.Errorf("The number of public subnets for a private hosted cluster should be zero")
}
} else {
if publicSubnetsCount == 0 {
return 0, fmt.Errorf("The number of public subnets for a public hosted " +
"cluster should be at least one")
}
}
return privateSubnetCount, nil
}
const (
singleAZCount = 1
MultiAZCount = 3
)
func ValidateAvailabilityZonesCount(multiAZ bool, availabilityZonesCount int) error {
if multiAZ && availabilityZonesCount != MultiAZCount {
return fmt.Errorf("The number of availability zones for a multi AZ cluster should be %d, "+
"instead received: %d", MultiAZCount, availabilityZonesCount)
}
if !multiAZ && availabilityZonesCount != singleAZCount {
return fmt.Errorf("The number of availability zones for a single AZ cluster should be %d, "+
"instead received: %d", singleAZCount, availabilityZonesCount)
}
return nil
}
func (c *Client) CheckUpgradeClusterVersion(
availableUpgrades []string,
clusterUpgradeVersion string,
cluster *cmv1.Cluster,
) (err error) {
clusterVersion := cluster.OpenshiftVersion()
if clusterVersion == "" {
clusterVersion = cluster.Version().RawID()
}
validVersion := false
for _, v := range availableUpgrades {
isValidVersion, err := IsValidVersion(clusterUpgradeVersion, v, clusterVersion)
if err != nil {
return err
}
if isValidVersion {
validVersion = true
break
}
}
if !validVersion {
return errors.Errorf(
"Expected a valid version to upgrade cluster to.\nValid versions: %s",
helper.SliceToSortedString(availableUpgrades),
)
}
return nil
}
func (c *Client) GetPolicyVersion(userRequestedVersion string, channelGroup string) (string, error) {
versionList, err := c.GetVersionsList(channelGroup)
if err != nil {
err := fmt.Errorf("%v", err)
return userRequestedVersion, err
}
if userRequestedVersion == "" {
return versionList[0], nil
}
hasVersion := false
for _, vs := range versionList {
if vs == userRequestedVersion {
hasVersion = true
break
}
}
if !hasVersion {
versionSet := helper.SliceToMap(versionList)
err := errors.Errorf(
"A valid policy version number must be specified\nValid versions: %v",
helper.MapKeysToString(versionSet),
)
return userRequestedVersion, err
}
return userRequestedVersion, nil
}
func ParseVersion(version string) (string, error) {
parsedVersion, err := semver.NewVersion(version)
if err != nil {
return "", err
}
versionSplit := parsedVersion.Segments64()
return fmt.Sprintf("%d.%d", versionSplit[0], versionSplit[1]), nil
}
func (c *Client) GetVersionsList(channelGroup string) ([]string, error) {
response, err := c.GetVersions(channelGroup)
if err != nil {
err := fmt.Errorf("error getting versions: %s", err)
return make([]string, 0), err
}
versionList := make([]string, 0)
for _, v := range response {
if !HasSTSSupport(v.RawID(), v.ChannelGroup()) {
continue
}
parsedVersion, err := ParseVersion(v.RawID())
if err != nil {
err = fmt.Errorf("error parsing version")
return versionList, err
}
versionList = append(versionList, parsedVersion)
}
if len(versionList) == 0 {
err = fmt.Errorf("could not find versions for the provided channel-group: '%s'", channelGroup)
return versionList, err
}
return versionList, nil
}
func ValidateOperatorRolesMatchOidcProvider(reporter *reporter.Object, awsClient aws.Client,
operatorIAMRoleList []OperatorIAMRole, oidcEndpointUrl string,
clusterVersion string, expectedOperatorRolePath string) error {
operatorIAMRoles := operatorIAMRoleList
parsedUrl, err := url.Parse(oidcEndpointUrl)
if err != nil {
return err
}
if reporter.IsTerminal() && !output.HasFlag() {
reporter.Infof("Reusable OIDC Configuration detected. Validating trusted relationships to operator roles: ")
}
for _, operatorIAMRole := range operatorIAMRoles {
roleObject, err := awsClient.GetRoleByARN(operatorIAMRole.RoleARN)
if err != nil {
return err
}
roleARN := *roleObject.Arn
pathFromArn, err := aws.GetPathFromARN(roleARN)
if err != nil {
return err
}
if pathFromArn != expectedOperatorRolePath {
return errors.Errorf("Operator Role '%s' does not match the path from Installer Role, "+
"please choose correct Installer Role and try again.", roleARN)
}
if roleARN != operatorIAMRole.RoleARN {
return errors.Errorf("Computed Operator Role '%s' does not match role ARN found in AWS '%s', "+
"please check if the correct parameters have been supplied.", operatorIAMRole.RoleARN, roleARN)
}
err = validateIssuerUrlMatchesAssumePolicyDocument(
roleARN, parsedUrl, *roleObject.AssumeRolePolicyDocument)
if err != nil {
return err
}
hasManagedPolicies, err := awsClient.HasManagedPolicies(roleARN)
if err != nil {
return err
}
if hasManagedPolicies {
// Managed policies should be compatible with all versions
continue
}
policiesDetails, err := awsClient.GetAttachedPolicy(roleObject.RoleName)
if err != nil {
return err
}
for _, policyDetails := range policiesDetails {
if policyDetails.PolicType == aws.Inline {
continue
}
isCompatible, err := awsClient.IsPolicyCompatible(policyDetails.PolicyArn, clusterVersion)
if err != nil {
return err
}
if !isCompatible {
return errors.Errorf("Operator role '%s' is not compatible with cluster version '%s'", roleARN, clusterVersion)
}
}
if reporter.IsTerminal() && !output.HasFlag() {
reporter.Infof("Using '%s'", *roleObject.Arn)
}
}
return nil
}
func validateIssuerUrlMatchesAssumePolicyDocument(
roleArn string, parsedUrl *url.URL, assumePolicyDocument string) error {
issuerUrl := parsedUrl.Host
if parsedUrl.Path != "" {
issuerUrl += parsedUrl.Path
}
decodedAssumePolicyDocument, err := url.QueryUnescape(assumePolicyDocument)
if err != nil {
return err
}
if !strings.Contains(decodedAssumePolicyDocument, issuerUrl) {
return errors.Errorf("Operator role '%s' does not have trusted relationship to '%s' issuer URL",
roleArn, issuerUrl)
}
return nil
} | externalID = acctResponse.Organization().ExternalID()
return
}
| random_line_split |
helpers.go | /**
Copyright (c) 2020 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ocm
import (
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
"os"
"regexp"
"strings"
awssdk "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/service/ec2"
semver "github.com/hashicorp/go-version"
"github.com/openshift/rosa/pkg/aws"
"github.com/openshift/rosa/pkg/output"
"github.com/openshift/rosa/pkg/reporter"
errors "github.com/zgalor/weberr"
amsv1 "github.com/openshift-online/ocm-sdk-go/accountsmgmt/v1"
cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
ocmerrors "github.com/openshift-online/ocm-sdk-go/errors"
"github.com/openshift/rosa/pkg/helper"
)
const (
ANY = "any"
HibernateCapability = "capability.organization.hibernate_cluster"
//Pendo Events
Success = "Success"
Failure = "Failure"
Response = "Response"
ClusterID = "ClusterID"
OperatorRolesPrefix = "OperatorRolePrefix"
Version = "Version"
Username = "Username"
URL = "URL"
IsThrottle = "IsThrottle"
OCMRoleLabel = "sts_ocm_role"
USERRoleLabel = "sts_user_role"
maxClusterNameLength = 15
)
// Regular expression to used to make sure that the identifier or name given by the user is
// safe and that it there is no risk of SQL injection:
var clusterKeyRE = regexp.MustCompile(`^(\w|-)+$`)
// Cluster names must be valid DNS-1035 labels, so they must consist of lower case alphanumeric
// characters or '-', start with an alphabetic character, and end with an alphanumeric character
var clusterNameRE = regexp.MustCompile(`^[a-z]([-a-z0-9]{0,13}[a-z0-9])?$`)
var badUsernameRE = regexp.MustCompile(`^(~|\.?\.|.*[:\/%].*)$`)
func IsValidClusterKey(clusterKey string) bool {
return clusterKeyRE.MatchString(clusterKey)
}
func IsValidClusterName(clusterName string) bool {
return clusterNameRE.MatchString(clusterName)
}
func ClusterNameValidator(name interface{}) error {
if str, ok := name.(string); ok {
str := strings.Trim(str, " \t")
if !IsValidClusterName(str) {
return fmt.Errorf("Cluster name must consist of no more than 15 lowercase " +
"alphanumeric characters or '-', start with a letter, and end with an " +
"alphanumeric character.")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", name)
}
func ValidateHTTPProxy(val interface{}) error {
if httpProxy, ok := val.(string); ok {
if httpProxy == "" {
return nil
}
url, err := url.ParseRequestURI(httpProxy)
if err != nil {
return fmt.Errorf("Invalid http-proxy value '%s'", httpProxy)
}
if url.Scheme != "http" {
return errors.Errorf("%s", "Expected http-proxy to have an http:// scheme")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", val)
}
func ValidateAdditionalTrustBundle(val interface{}) error {
if additionalTrustBundleFile, ok := val.(string); ok {
if additionalTrustBundleFile == "" {
return nil
}
cert, err := os.ReadFile(additionalTrustBundleFile)
if err != nil {
return err
}
additionalTrustBundle := string(cert)
if additionalTrustBundle == "" {
return errors.Errorf("%s", "Trust bundle file is empty")
}
additionalTrustBundleBytes := []byte(additionalTrustBundle)
if !x509.NewCertPool().AppendCertsFromPEM(additionalTrustBundleBytes) {
return errors.Errorf("%s", "Failed to parse additional trust bundle")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", val)
}
func IsValidUsername(username string) bool {
return !badUsernameRE.MatchString(username)
}
func IsEmptyCIDR(cidr net.IPNet) bool {
return cidr.String() == "<nil>"
}
// Determine whether a resources is compatible with ROSA clusters in general
func isCompatible(relatedResource *amsv1.RelatedResource) bool {
product := strings.ToLower(relatedResource.Product())
cloudProvider := strings.ToLower(relatedResource.CloudProvider())
byoc := strings.ToLower(relatedResource.BYOC())
// nolint:goconst
return (product == ANY || product == "rosa" || product == "moa") &&
(cloudProvider == ANY || cloudProvider == "aws") &&
(byoc == ANY || byoc == "byoc")
}
func handleErr(res *ocmerrors.Error, err error) error {
msg := res.Reason()
if msg == "" {
msg = err.Error()
}
// Hack to always display the correct terms and conditions message
if res.Code() == "CLUSTERS-MGMT-451" {
msg = "You must accept the Terms and Conditions in order to continue.\n" +
"Go to https://www.redhat.com/wapps/tnc/ackrequired?site=ocm&event=register\n" +
"Once you accept the terms, you will need to retry the action that was blocked."
}
errType := errors.ErrorType(res.Status())
return errType.Set(errors.Errorf("%s", msg))
}
func (c *Client) GetDefaultClusterFlavors(flavour string) (dMachinecidr *net.IPNet, dPodcidr *net.IPNet,
dServicecidr *net.IPNet, dhostPrefix int, computeInstanceType string) {
flavourGetResponse, err := c.ocm.ClustersMgmt().V1().Flavours().Flavour(flavour).Get().Send()
if err != nil {
flavourGetResponse, _ = c.ocm.ClustersMgmt().V1().Flavours().Flavour("osd-4").Get().Send()
}
aws, ok := flavourGetResponse.Body().GetAWS()
if !ok {
return nil, nil, nil, 0, ""
}
computeInstanceType = aws.ComputeInstanceType()
network, ok := flavourGetResponse.Body().GetNetwork()
if !ok {
return nil, nil, nil, 0, computeInstanceType
}
_, dMachinecidr, err = net.ParseCIDR(network.MachineCIDR())
if err != nil {
dMachinecidr = nil
}
_, dPodcidr, err = net.ParseCIDR(network.PodCIDR())
if err != nil {
dPodcidr = nil
}
_, dServicecidr, err = net.ParseCIDR(network.ServiceCIDR())
if err != nil {
dServicecidr = nil
}
dhostPrefix, _ = network.GetHostPrefix()
return dMachinecidr, dPodcidr, dServicecidr, dhostPrefix, computeInstanceType
}
func (c *Client) LogEvent(key string, body map[string]string) {
event, err := cmv1.NewEvent().Key(key).Body(body).Build()
if err == nil {
_, _ = c.ocm.ClustersMgmt().V1().
Events().
Add().
Body(event).
Send()
}
}
func (c *Client) GetCurrentAccount() (*amsv1.Account, error) {
response, err := c.ocm.AccountsMgmt().V1().
CurrentAccount().
Get().
Send()
if err != nil {
if response.Status() == http.StatusNotFound {
return nil, nil
}
return nil, handleErr(response.Error(), err)
}
return response.Body(), nil
}
func (c *Client) GetCurrentOrganization() (id string, externalID string, err error) {
acctResponse, err := c.GetCurrentAccount()
if err != nil {
return
}
id = acctResponse.Organization().ID()
externalID = acctResponse.Organization().ExternalID()
return
}
func (c *Client) IsCapabilityEnabled(capability string) (enabled bool, err error) {
organizationID, _, err := c.GetCurrentOrganization()
if err != nil {
return
}
isCapabilityEnable, err := c.isCapabilityEnabled(capability, organizationID)
if err != nil {
return
}
if !isCapabilityEnable {
return false, nil
}
return true, nil
}
func (c *Client) isCapabilityEnabled(capabilityName string, orgID string) (bool, error) {
capabilityResponse, err := c.ocm.AccountsMgmt().V1().Organizations().
Organization(orgID).Get().Parameter("fetchCapabilities", true).Send()
if err != nil {
return false, handleErr(capabilityResponse.Error(), err)
}
if len(capabilityResponse.Body().Capabilities()) > 0 {
for _, capability := range capabilityResponse.Body().Capabilities() {
if capability.Name() == capabilityName {
return capability.Value() == "true", nil
}
}
}
return false, nil
}
func (c *Client) UnlinkUserRoleFromAccount(accountID string, roleARN string) error {
linkedRoles, err := c.GetAccountLinkedUserRoles(accountID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(USERRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role ARN '%s' is not linked with the current account '%s'", roleARN, accountID)
}
func (c *Client) LinkAccountRole(accountID string, roleARN string) error {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels("sts_user_role").Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return errors.Forbidden.UserErrorf("%v", err)
}
return handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
if value == roleARN {
exists = true
break
}
}
}
}
if exists {
return nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key("sts_user_role").Value(roleARN).Build()
if err != nil {
return err
}
_, err = c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
return err
}
func (c *Client) UnlinkOCMRoleFromOrg(orgID string, roleARN string) error {
linkedRoles, err := c.GetOrganizationLinkedOCMRoles(orgID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role-arn '%s' is not linked with the organization account '%s'", roleARN, orgID)
}
func (c *Client) LinkOrgToRole(orgID string, roleARN string) (bool, error) {
parsedARN, err := arn.Parse(roleARN)
if err != nil {
return false, err
}
exists, existingARN, selectedARN, err := c.CheckIfAWSAccountExists(orgID, parsedARN.AccountID)
if err != nil {
return false, err
}
if exists {
if selectedARN != roleARN {
return false, errors.UserErrorf("User organization '%s' has role-arn '%s' associated. "+
"Only one role can be linked per AWS account per organization", orgID, selectedARN)
}
return false, nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(roleARN).Build()
if err != nil {
return false, err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return false, handleErr(resp.Error(), err)
}
return true, nil
}
func (c *Client) GetAccountLinkedUserRoles(accountID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels(USERRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, handleErr(resp.Error(), err)
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) GetOrganizationLinkedOCMRoles(orgID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, err
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) CheckIfAWSAccountExists(orgID string, awsAccountID string) (bool, string, string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return false, "", "", errors.Forbidden.UserErrorf("%v", err)
}
return false, "", "", handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
selectedARN := ""
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
parsedARN, err := arn.Parse(value)
if err != nil {
return false, "", "", err
}
if parsedARN.AccountID == awsAccountID {
exists = true
selectedARN = value
break
}
}
}
}
return exists, existingARN, selectedARN, nil
}
/*
We should allow only one role per aws account per organization
If the user request same ocm role we should let them proceed to ensure they can add admin role
if not exists or attach policies or link etc
if the user request diff ocm role name we error out
*/
func (c *Client) CheckRoleExists(orgID string, roleName string, awsAccountID string) (bool, string, string, error) {
exists, _, selectedARN, err := c.CheckIfAWSAccountExists(orgID, awsAccountID)
if err != nil {
return false, "", "", err
}
if !exists {
return false, "", "", nil
}
existingRole := strings.SplitN(selectedARN, "/", 2)
if len(existingRole) > 1 && existingRole[1] == roleName {
return false, "", "", nil
}
return true, existingRole[1], selectedARN, nil
}
func GetVersionMinor(ver string) string {
rawID := strings.Replace(ver, "openshift-v", "", 1)
version, err := semver.NewVersion(rawID)
if err != nil {
segments := strings.Split(rawID, ".")
return fmt.Sprintf("%s.%s", segments[0], segments[1])
}
segments := version.Segments()
return fmt.Sprintf("%d.%d", segments[0], segments[1])
}
func CheckSupportedVersion(clusterVersion string, operatorVersion string) (bool, error) {
v1, err := semver.NewVersion(clusterVersion)
if err != nil {
return false, err
}
v2, err := semver.NewVersion(operatorVersion)
if err != nil {
return false, err
}
//Cluster version is greater than or equal to operator version
return v1.GreaterThanOrEqual(v2), nil
}
func (c *Client) GetPolicies(policyType string) (map[string]*cmv1.AWSSTSPolicy, error) {
query := fmt.Sprintf("policy_type = '%s'", policyType)
m := make(map[string]*cmv1.AWSSTSPolicy)
stmt := c.ocm.ClustersMgmt().V1().AWSInquiries().STSPolicies().List()
if policyType != "" {
stmt = stmt.Search(query)
}
accountRolePoliciesResponse, err := stmt.Send()
if err != nil {
return m, handleErr(accountRolePoliciesResponse.Error(), err)
}
accountRolePoliciesResponse.Items().Each(func(awsPolicy *cmv1.AWSSTSPolicy) bool {
m[awsPolicy.ID()] = awsPolicy
return true
})
return m, nil
}
// The actual values might differ from classic to hcp
// prefer using GetCredRequests(isHypershift bool) when there is prior knowledge of the topology
func (c *Client) GetAllCredRequests() (map[string]*cmv1.STSOperator, error) {
result := make(map[string]*cmv1.STSOperator)
classic, err := c.GetCredRequests(false)
if err != nil {
return result, err
}
hcp, err := c.GetCredRequests(true)
if err != nil {
return result, err
}
for key, value := range classic {
result[key] = value
}
for key, value := range hcp {
result[key] = value
}
return result, nil
}
func (c *Client) GetCredRequests(isHypershift bool) (map[string]*cmv1.STSOperator, error) {
m := make(map[string]*cmv1.STSOperator)
stsCredentialResponse, err := c.ocm.ClustersMgmt().
V1().
AWSInquiries().
STSCredentialRequests().
List().
Parameter("is_hypershift", isHypershift).
Send()
if err != nil {
return m, handleErr(stsCredentialResponse.Error(), err)
}
stsCredentialResponse.Items().Each(func(stsCredentialRequest *cmv1.STSCredentialRequest) bool {
m[stsCredentialRequest.Name()] = stsCredentialRequest.Operator()
return true
})
return m, nil
}
func (c *Client) FindMissingOperatorRolesForUpgrade(cluster *cmv1.Cluster,
newMinorVersion string) (map[string]*cmv1.STSOperator, error) {
missingRoles := make(map[string]*cmv1.STSOperator)
credRequests, err := c.GetCredRequests(cluster.Hypershift().Enabled())
if err != nil {
return nil, errors.Errorf("Error getting operator credential request from OCM %s", err)
}
for credRequest, operator := range credRequests {
if operator.MinVersion() != "" {
clusterUpgradeVersion, err := semver.NewVersion(newMinorVersion)
if err != nil {
return nil, err
}
operatorMinVersion, err := semver.NewVersion(operator.MinVersion())
if err != nil {
return nil, err
}
if clusterUpgradeVersion.GreaterThanOrEqual(operatorMinVersion) {
if !isOperatorRoleAlreadyExist(cluster, operator) {
missingRoles[credRequest] = operator
}
}
}
}
return missingRoles, nil
}
func (c *Client) createCloudProviderDataBuilder(roleARN string, awsClient aws.Client,
externalID string) (*cmv1.CloudProviderDataBuilder, error) {
var awsBuilder *cmv1.AWSBuilder
if roleARN != "" {
stsBuilder := cmv1.NewSTS().RoleARN(roleARN)
if externalID != "" {
stsBuilder = stsBuilder.ExternalID(externalID)
}
awsBuilder = cmv1.NewAWS().STS(stsBuilder)
} else {
accessKeys, err := awsClient.GetAWSAccessKeys()
if err != nil {
return &cmv1.CloudProviderDataBuilder{}, err
}
awsBuilder = cmv1.NewAWS().AccessKeyID(accessKeys.AccessKeyID).SecretAccessKey(accessKeys.SecretAccessKey)
}
return cmv1.NewCloudProviderData().AWS(awsBuilder), nil
}
func isOperatorRoleAlreadyExist(cluster *cmv1.Cluster, operator *cmv1.STSOperator) bool {
for _, role := range cluster.AWS().STS().OperatorIAMRoles() {
//FIXME: Check it does not exist on AWS itself too
// the iam roles will only return up to the version of the cluster
if role.Namespace() == operator.Namespace() && role.Name() == operator.Name() {
return true
}
}
return false
}
const (
BYOVPCSingleAZSubnetsCount = 2
BYOVPCMultiAZSubnetsCount = 6
privateLinkSingleAZSubnetsCount = 1
privateLinkMultiAZSubnetsCount = 3
)
func ValidateSubnetsCount(multiAZ bool, privateLink bool, subnetsInputCount int) error {
if privateLink {
if multiAZ && subnetsInputCount != privateLinkMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ private link cluster should be %d, "+
"instead received: %d", privateLinkMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != privateLinkSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ private link cluster should be %d, "+
"instead received: %d", privateLinkSingleAZSubnetsCount, subnetsInputCount)
}
} else {
if multiAZ && subnetsInputCount != BYOVPCMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ cluster should be %d, "+
"instead received: %d", BYOVPCMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != BYOVPCSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ cluster should be %d, "+
"instead received: %d", BYOVPCSingleAZSubnetsCount, subnetsInputCount)
}
}
return nil
}
func ValidateHostedClusterSubnets(awsClient aws.Client, isPrivate bool, subnetIDs []string) (int, error) {
if isPrivate && len(subnetIDs) < 1 {
return 0, fmt.Errorf("The number of subnets for a private hosted cluster should be at least one")
}
if !isPrivate && len(subnetIDs) < 2 {
return 0, fmt.Errorf("The number of subnets for a public hosted cluster should be at least two")
}
vpcSubnets, vpcSubnetsErr := awsClient.GetVPCSubnets(subnetIDs[0])
if vpcSubnetsErr != nil {
return 0, vpcSubnetsErr
}
var subnets []*ec2.Subnet
for _, subnet := range vpcSubnets {
for _, subnetId := range subnetIDs {
if awssdk.StringValue(subnet.SubnetId) == subnetId {
subnets = append(subnets, subnet)
break
}
}
}
privateSubnets, privateSubnetsErr := awsClient.FilterVPCsPrivateSubnets(subnets)
if privateSubnetsErr != nil {
return 0, privateSubnetsErr
}
privateSubnetCount := len(privateSubnets)
publicSubnetsCount := len(subnets) - privateSubnetCount
if isPrivate {
if publicSubnetsCount > 0 {
return 0, fmt.Errorf("The number of public subnets for a private hosted cluster should be zero")
}
} else {
if publicSubnetsCount == 0 {
return 0, fmt.Errorf("The number of public subnets for a public hosted " +
"cluster should be at least one")
}
}
return privateSubnetCount, nil
}
const (
singleAZCount = 1
MultiAZCount = 3
)
func ValidateAvailabilityZonesCount(multiAZ bool, availabilityZonesCount int) error {
if multiAZ && availabilityZonesCount != MultiAZCount {
return fmt.Errorf("The number of availability zones for a multi AZ cluster should be %d, "+
"instead received: %d", MultiAZCount, availabilityZonesCount)
}
if !multiAZ && availabilityZonesCount != singleAZCount |
return nil
}
func (c *Client) CheckUpgradeClusterVersion(
availableUpgrades []string,
clusterUpgradeVersion string,
cluster *cmv1.Cluster,
) (err error) {
clusterVersion := cluster.OpenshiftVersion()
if clusterVersion == "" {
clusterVersion = cluster.Version().RawID()
}
validVersion := false
for _, v := range availableUpgrades {
isValidVersion, err := IsValidVersion(clusterUpgradeVersion, v, clusterVersion)
if err != nil {
return err
}
if isValidVersion {
validVersion = true
break
}
}
if !validVersion {
return errors.Errorf(
"Expected a valid version to upgrade cluster to.\nValid versions: %s",
helper.SliceToSortedString(availableUpgrades),
)
}
return nil
}
func (c *Client) GetPolicyVersion(userRequestedVersion string, channelGroup string) (string, error) {
versionList, err := c.GetVersionsList(channelGroup)
if err != nil {
err := fmt.Errorf("%v", err)
return userRequestedVersion, err
}
if userRequestedVersion == "" {
return versionList[0], nil
}
hasVersion := false
for _, vs := range versionList {
if vs == userRequestedVersion {
hasVersion = true
break
}
}
if !hasVersion {
versionSet := helper.SliceToMap(versionList)
err := errors.Errorf(
"A valid policy version number must be specified\nValid versions: %v",
helper.MapKeysToString(versionSet),
)
return userRequestedVersion, err
}
return userRequestedVersion, nil
}
func ParseVersion(version string) (string, error) {
parsedVersion, err := semver.NewVersion(version)
if err != nil {
return "", err
}
versionSplit := parsedVersion.Segments64()
return fmt.Sprintf("%d.%d", versionSplit[0], versionSplit[1]), nil
}
func (c *Client) GetVersionsList(channelGroup string) ([]string, error) {
response, err := c.GetVersions(channelGroup)
if err != nil {
err := fmt.Errorf("error getting versions: %s", err)
return make([]string, 0), err
}
versionList := make([]string, 0)
for _, v := range response {
if !HasSTSSupport(v.RawID(), v.ChannelGroup()) {
continue
}
parsedVersion, err := ParseVersion(v.RawID())
if err != nil {
err = fmt.Errorf("error parsing version")
return versionList, err
}
versionList = append(versionList, parsedVersion)
}
if len(versionList) == 0 {
err = fmt.Errorf("could not find versions for the provided channel-group: '%s'", channelGroup)
return versionList, err
}
return versionList, nil
}
func ValidateOperatorRolesMatchOidcProvider(reporter *reporter.Object, awsClient aws.Client,
operatorIAMRoleList []OperatorIAMRole, oidcEndpointUrl string,
clusterVersion string, expectedOperatorRolePath string) error {
operatorIAMRoles := operatorIAMRoleList
parsedUrl, err := url.Parse(oidcEndpointUrl)
if err != nil {
return err
}
if reporter.IsTerminal() && !output.HasFlag() {
reporter.Infof("Reusable OIDC Configuration detected. Validating trusted relationships to operator roles: ")
}
for _, operatorIAMRole := range operatorIAMRoles {
roleObject, err := awsClient.GetRoleByARN(operatorIAMRole.RoleARN)
if err != nil {
return err
}
roleARN := *roleObject.Arn
pathFromArn, err := aws.GetPathFromARN(roleARN)
if err != nil {
return err
}
if pathFromArn != expectedOperatorRolePath {
return errors.Errorf("Operator Role '%s' does not match the path from Installer Role, "+
"please choose correct Installer Role and try again.", roleARN)
}
if roleARN != operatorIAMRole.RoleARN {
return errors.Errorf("Computed Operator Role '%s' does not match role ARN found in AWS '%s', "+
"please check if the correct parameters have been supplied.", operatorIAMRole.RoleARN, roleARN)
}
err = validateIssuerUrlMatchesAssumePolicyDocument(
roleARN, parsedUrl, *roleObject.AssumeRolePolicyDocument)
if err != nil {
return err
}
hasManagedPolicies, err := awsClient.HasManagedPolicies(roleARN)
if err != nil {
return err
}
if hasManagedPolicies {
// Managed policies should be compatible with all versions
continue
}
policiesDetails, err := awsClient.GetAttachedPolicy(roleObject.RoleName)
if err != nil {
return err
}
for _, policyDetails := range policiesDetails {
if policyDetails.PolicType == aws.Inline {
continue
}
isCompatible, err := awsClient.IsPolicyCompatible(policyDetails.PolicyArn, clusterVersion)
if err != nil {
return err
}
if !isCompatible {
return errors.Errorf("Operator role '%s' is not compatible with cluster version '%s'", roleARN, clusterVersion)
}
}
if reporter.IsTerminal() && !output.HasFlag() {
reporter.Infof("Using '%s'", *roleObject.Arn)
}
}
return nil
}
func validateIssuerUrlMatchesAssumePolicyDocument(
roleArn string, parsedUrl *url.URL, assumePolicyDocument string) error {
issuerUrl := parsedUrl.Host
if parsedUrl.Path != "" {
issuerUrl += parsedUrl.Path
}
decodedAssumePolicyDocument, err := url.QueryUnescape(assumePolicyDocument)
if err != nil {
return err
}
if !strings.Contains(decodedAssumePolicyDocument, issuerUrl) {
return errors.Errorf("Operator role '%s' does not have trusted relationship to '%s' issuer URL",
roleArn, issuerUrl)
}
return nil
}
| {
return fmt.Errorf("The number of availability zones for a single AZ cluster should be %d, "+
"instead received: %d", singleAZCount, availabilityZonesCount)
} | conditional_block |
helpers.go | /**
Copyright (c) 2020 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ocm
import (
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
"os"
"regexp"
"strings"
awssdk "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/service/ec2"
semver "github.com/hashicorp/go-version"
"github.com/openshift/rosa/pkg/aws"
"github.com/openshift/rosa/pkg/output"
"github.com/openshift/rosa/pkg/reporter"
errors "github.com/zgalor/weberr"
amsv1 "github.com/openshift-online/ocm-sdk-go/accountsmgmt/v1"
cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
ocmerrors "github.com/openshift-online/ocm-sdk-go/errors"
"github.com/openshift/rosa/pkg/helper"
)
const (
ANY = "any"
HibernateCapability = "capability.organization.hibernate_cluster"
//Pendo Events
Success = "Success"
Failure = "Failure"
Response = "Response"
ClusterID = "ClusterID"
OperatorRolesPrefix = "OperatorRolePrefix"
Version = "Version"
Username = "Username"
URL = "URL"
IsThrottle = "IsThrottle"
OCMRoleLabel = "sts_ocm_role"
USERRoleLabel = "sts_user_role"
maxClusterNameLength = 15
)
// Regular expression to used to make sure that the identifier or name given by the user is
// safe and that it there is no risk of SQL injection:
var clusterKeyRE = regexp.MustCompile(`^(\w|-)+$`)
// Cluster names must be valid DNS-1035 labels, so they must consist of lower case alphanumeric
// characters or '-', start with an alphabetic character, and end with an alphanumeric character
var clusterNameRE = regexp.MustCompile(`^[a-z]([-a-z0-9]{0,13}[a-z0-9])?$`)
var badUsernameRE = regexp.MustCompile(`^(~|\.?\.|.*[:\/%].*)$`)
func IsValidClusterKey(clusterKey string) bool {
return clusterKeyRE.MatchString(clusterKey)
}
func IsValidClusterName(clusterName string) bool {
return clusterNameRE.MatchString(clusterName)
}
func ClusterNameValidator(name interface{}) error {
if str, ok := name.(string); ok {
str := strings.Trim(str, " \t")
if !IsValidClusterName(str) {
return fmt.Errorf("Cluster name must consist of no more than 15 lowercase " +
"alphanumeric characters or '-', start with a letter, and end with an " +
"alphanumeric character.")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", name)
}
func ValidateHTTPProxy(val interface{}) error {
if httpProxy, ok := val.(string); ok {
if httpProxy == "" {
return nil
}
url, err := url.ParseRequestURI(httpProxy)
if err != nil {
return fmt.Errorf("Invalid http-proxy value '%s'", httpProxy)
}
if url.Scheme != "http" {
return errors.Errorf("%s", "Expected http-proxy to have an http:// scheme")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", val)
}
func ValidateAdditionalTrustBundle(val interface{}) error {
if additionalTrustBundleFile, ok := val.(string); ok {
if additionalTrustBundleFile == "" {
return nil
}
cert, err := os.ReadFile(additionalTrustBundleFile)
if err != nil {
return err
}
additionalTrustBundle := string(cert)
if additionalTrustBundle == "" {
return errors.Errorf("%s", "Trust bundle file is empty")
}
additionalTrustBundleBytes := []byte(additionalTrustBundle)
if !x509.NewCertPool().AppendCertsFromPEM(additionalTrustBundleBytes) {
return errors.Errorf("%s", "Failed to parse additional trust bundle")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", val)
}
func IsValidUsername(username string) bool {
return !badUsernameRE.MatchString(username)
}
func IsEmptyCIDR(cidr net.IPNet) bool {
return cidr.String() == "<nil>"
}
// Determine whether a resources is compatible with ROSA clusters in general
func isCompatible(relatedResource *amsv1.RelatedResource) bool {
product := strings.ToLower(relatedResource.Product())
cloudProvider := strings.ToLower(relatedResource.CloudProvider())
byoc := strings.ToLower(relatedResource.BYOC())
// nolint:goconst
return (product == ANY || product == "rosa" || product == "moa") &&
(cloudProvider == ANY || cloudProvider == "aws") &&
(byoc == ANY || byoc == "byoc")
}
func handleErr(res *ocmerrors.Error, err error) error {
msg := res.Reason()
if msg == "" {
msg = err.Error()
}
// Hack to always display the correct terms and conditions message
if res.Code() == "CLUSTERS-MGMT-451" {
msg = "You must accept the Terms and Conditions in order to continue.\n" +
"Go to https://www.redhat.com/wapps/tnc/ackrequired?site=ocm&event=register\n" +
"Once you accept the terms, you will need to retry the action that was blocked."
}
errType := errors.ErrorType(res.Status())
return errType.Set(errors.Errorf("%s", msg))
}
func (c *Client) GetDefaultClusterFlavors(flavour string) (dMachinecidr *net.IPNet, dPodcidr *net.IPNet,
dServicecidr *net.IPNet, dhostPrefix int, computeInstanceType string) {
flavourGetResponse, err := c.ocm.ClustersMgmt().V1().Flavours().Flavour(flavour).Get().Send()
if err != nil {
flavourGetResponse, _ = c.ocm.ClustersMgmt().V1().Flavours().Flavour("osd-4").Get().Send()
}
aws, ok := flavourGetResponse.Body().GetAWS()
if !ok {
return nil, nil, nil, 0, ""
}
computeInstanceType = aws.ComputeInstanceType()
network, ok := flavourGetResponse.Body().GetNetwork()
if !ok {
return nil, nil, nil, 0, computeInstanceType
}
_, dMachinecidr, err = net.ParseCIDR(network.MachineCIDR())
if err != nil {
dMachinecidr = nil
}
_, dPodcidr, err = net.ParseCIDR(network.PodCIDR())
if err != nil {
dPodcidr = nil
}
_, dServicecidr, err = net.ParseCIDR(network.ServiceCIDR())
if err != nil {
dServicecidr = nil
}
dhostPrefix, _ = network.GetHostPrefix()
return dMachinecidr, dPodcidr, dServicecidr, dhostPrefix, computeInstanceType
}
func (c *Client) LogEvent(key string, body map[string]string) |
func (c *Client) GetCurrentAccount() (*amsv1.Account, error) {
response, err := c.ocm.AccountsMgmt().V1().
CurrentAccount().
Get().
Send()
if err != nil {
if response.Status() == http.StatusNotFound {
return nil, nil
}
return nil, handleErr(response.Error(), err)
}
return response.Body(), nil
}
func (c *Client) GetCurrentOrganization() (id string, externalID string, err error) {
acctResponse, err := c.GetCurrentAccount()
if err != nil {
return
}
id = acctResponse.Organization().ID()
externalID = acctResponse.Organization().ExternalID()
return
}
func (c *Client) IsCapabilityEnabled(capability string) (enabled bool, err error) {
organizationID, _, err := c.GetCurrentOrganization()
if err != nil {
return
}
isCapabilityEnable, err := c.isCapabilityEnabled(capability, organizationID)
if err != nil {
return
}
if !isCapabilityEnable {
return false, nil
}
return true, nil
}
func (c *Client) isCapabilityEnabled(capabilityName string, orgID string) (bool, error) {
capabilityResponse, err := c.ocm.AccountsMgmt().V1().Organizations().
Organization(orgID).Get().Parameter("fetchCapabilities", true).Send()
if err != nil {
return false, handleErr(capabilityResponse.Error(), err)
}
if len(capabilityResponse.Body().Capabilities()) > 0 {
for _, capability := range capabilityResponse.Body().Capabilities() {
if capability.Name() == capabilityName {
return capability.Value() == "true", nil
}
}
}
return false, nil
}
func (c *Client) UnlinkUserRoleFromAccount(accountID string, roleARN string) error {
linkedRoles, err := c.GetAccountLinkedUserRoles(accountID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(USERRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role ARN '%s' is not linked with the current account '%s'", roleARN, accountID)
}
func (c *Client) LinkAccountRole(accountID string, roleARN string) error {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels("sts_user_role").Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return errors.Forbidden.UserErrorf("%v", err)
}
return handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
if value == roleARN {
exists = true
break
}
}
}
}
if exists {
return nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key("sts_user_role").Value(roleARN).Build()
if err != nil {
return err
}
_, err = c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
return err
}
func (c *Client) UnlinkOCMRoleFromOrg(orgID string, roleARN string) error {
linkedRoles, err := c.GetOrganizationLinkedOCMRoles(orgID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role-arn '%s' is not linked with the organization account '%s'", roleARN, orgID)
}
func (c *Client) LinkOrgToRole(orgID string, roleARN string) (bool, error) {
parsedARN, err := arn.Parse(roleARN)
if err != nil {
return false, err
}
exists, existingARN, selectedARN, err := c.CheckIfAWSAccountExists(orgID, parsedARN.AccountID)
if err != nil {
return false, err
}
if exists {
if selectedARN != roleARN {
return false, errors.UserErrorf("User organization '%s' has role-arn '%s' associated. "+
"Only one role can be linked per AWS account per organization", orgID, selectedARN)
}
return false, nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(roleARN).Build()
if err != nil {
return false, err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return false, handleErr(resp.Error(), err)
}
return true, nil
}
func (c *Client) GetAccountLinkedUserRoles(accountID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels(USERRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, handleErr(resp.Error(), err)
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) GetOrganizationLinkedOCMRoles(orgID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, err
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) CheckIfAWSAccountExists(orgID string, awsAccountID string) (bool, string, string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return false, "", "", errors.Forbidden.UserErrorf("%v", err)
}
return false, "", "", handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
selectedARN := ""
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
parsedARN, err := arn.Parse(value)
if err != nil {
return false, "", "", err
}
if parsedARN.AccountID == awsAccountID {
exists = true
selectedARN = value
break
}
}
}
}
return exists, existingARN, selectedARN, nil
}
/*
We should allow only one role per aws account per organization
If the user request same ocm role we should let them proceed to ensure they can add admin role
if not exists or attach policies or link etc
if the user request diff ocm role name we error out
*/
func (c *Client) CheckRoleExists(orgID string, roleName string, awsAccountID string) (bool, string, string, error) {
exists, _, selectedARN, err := c.CheckIfAWSAccountExists(orgID, awsAccountID)
if err != nil {
return false, "", "", err
}
if !exists {
return false, "", "", nil
}
existingRole := strings.SplitN(selectedARN, "/", 2)
if len(existingRole) > 1 && existingRole[1] == roleName {
return false, "", "", nil
}
return true, existingRole[1], selectedARN, nil
}
func GetVersionMinor(ver string) string {
rawID := strings.Replace(ver, "openshift-v", "", 1)
version, err := semver.NewVersion(rawID)
if err != nil {
segments := strings.Split(rawID, ".")
return fmt.Sprintf("%s.%s", segments[0], segments[1])
}
segments := version.Segments()
return fmt.Sprintf("%d.%d", segments[0], segments[1])
}
func CheckSupportedVersion(clusterVersion string, operatorVersion string) (bool, error) {
v1, err := semver.NewVersion(clusterVersion)
if err != nil {
return false, err
}
v2, err := semver.NewVersion(operatorVersion)
if err != nil {
return false, err
}
//Cluster version is greater than or equal to operator version
return v1.GreaterThanOrEqual(v2), nil
}
func (c *Client) GetPolicies(policyType string) (map[string]*cmv1.AWSSTSPolicy, error) {
query := fmt.Sprintf("policy_type = '%s'", policyType)
m := make(map[string]*cmv1.AWSSTSPolicy)
stmt := c.ocm.ClustersMgmt().V1().AWSInquiries().STSPolicies().List()
if policyType != "" {
stmt = stmt.Search(query)
}
accountRolePoliciesResponse, err := stmt.Send()
if err != nil {
return m, handleErr(accountRolePoliciesResponse.Error(), err)
}
accountRolePoliciesResponse.Items().Each(func(awsPolicy *cmv1.AWSSTSPolicy) bool {
m[awsPolicy.ID()] = awsPolicy
return true
})
return m, nil
}
// The actual values might differ from classic to hcp
// prefer using GetCredRequests(isHypershift bool) when there is prior knowledge of the topology
func (c *Client) GetAllCredRequests() (map[string]*cmv1.STSOperator, error) {
result := make(map[string]*cmv1.STSOperator)
classic, err := c.GetCredRequests(false)
if err != nil {
return result, err
}
hcp, err := c.GetCredRequests(true)
if err != nil {
return result, err
}
for key, value := range classic {
result[key] = value
}
for key, value := range hcp {
result[key] = value
}
return result, nil
}
func (c *Client) GetCredRequests(isHypershift bool) (map[string]*cmv1.STSOperator, error) {
m := make(map[string]*cmv1.STSOperator)
stsCredentialResponse, err := c.ocm.ClustersMgmt().
V1().
AWSInquiries().
STSCredentialRequests().
List().
Parameter("is_hypershift", isHypershift).
Send()
if err != nil {
return m, handleErr(stsCredentialResponse.Error(), err)
}
stsCredentialResponse.Items().Each(func(stsCredentialRequest *cmv1.STSCredentialRequest) bool {
m[stsCredentialRequest.Name()] = stsCredentialRequest.Operator()
return true
})
return m, nil
}
func (c *Client) FindMissingOperatorRolesForUpgrade(cluster *cmv1.Cluster,
newMinorVersion string) (map[string]*cmv1.STSOperator, error) {
missingRoles := make(map[string]*cmv1.STSOperator)
credRequests, err := c.GetCredRequests(cluster.Hypershift().Enabled())
if err != nil {
return nil, errors.Errorf("Error getting operator credential request from OCM %s", err)
}
for credRequest, operator := range credRequests {
if operator.MinVersion() != "" {
clusterUpgradeVersion, err := semver.NewVersion(newMinorVersion)
if err != nil {
return nil, err
}
operatorMinVersion, err := semver.NewVersion(operator.MinVersion())
if err != nil {
return nil, err
}
if clusterUpgradeVersion.GreaterThanOrEqual(operatorMinVersion) {
if !isOperatorRoleAlreadyExist(cluster, operator) {
missingRoles[credRequest] = operator
}
}
}
}
return missingRoles, nil
}
func (c *Client) createCloudProviderDataBuilder(roleARN string, awsClient aws.Client,
externalID string) (*cmv1.CloudProviderDataBuilder, error) {
var awsBuilder *cmv1.AWSBuilder
if roleARN != "" {
stsBuilder := cmv1.NewSTS().RoleARN(roleARN)
if externalID != "" {
stsBuilder = stsBuilder.ExternalID(externalID)
}
awsBuilder = cmv1.NewAWS().STS(stsBuilder)
} else {
accessKeys, err := awsClient.GetAWSAccessKeys()
if err != nil {
return &cmv1.CloudProviderDataBuilder{}, err
}
awsBuilder = cmv1.NewAWS().AccessKeyID(accessKeys.AccessKeyID).SecretAccessKey(accessKeys.SecretAccessKey)
}
return cmv1.NewCloudProviderData().AWS(awsBuilder), nil
}
func isOperatorRoleAlreadyExist(cluster *cmv1.Cluster, operator *cmv1.STSOperator) bool {
for _, role := range cluster.AWS().STS().OperatorIAMRoles() {
//FIXME: Check it does not exist on AWS itself too
// the iam roles will only return up to the version of the cluster
if role.Namespace() == operator.Namespace() && role.Name() == operator.Name() {
return true
}
}
return false
}
const (
BYOVPCSingleAZSubnetsCount = 2
BYOVPCMultiAZSubnetsCount = 6
privateLinkSingleAZSubnetsCount = 1
privateLinkMultiAZSubnetsCount = 3
)
func ValidateSubnetsCount(multiAZ bool, privateLink bool, subnetsInputCount int) error {
if privateLink {
if multiAZ && subnetsInputCount != privateLinkMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ private link cluster should be %d, "+
"instead received: %d", privateLinkMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != privateLinkSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ private link cluster should be %d, "+
"instead received: %d", privateLinkSingleAZSubnetsCount, subnetsInputCount)
}
} else {
if multiAZ && subnetsInputCount != BYOVPCMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ cluster should be %d, "+
"instead received: %d", BYOVPCMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != BYOVPCSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ cluster should be %d, "+
"instead received: %d", BYOVPCSingleAZSubnetsCount, subnetsInputCount)
}
}
return nil
}
func ValidateHostedClusterSubnets(awsClient aws.Client, isPrivate bool, subnetIDs []string) (int, error) {
if isPrivate && len(subnetIDs) < 1 {
return 0, fmt.Errorf("The number of subnets for a private hosted cluster should be at least one")
}
if !isPrivate && len(subnetIDs) < 2 {
return 0, fmt.Errorf("The number of subnets for a public hosted cluster should be at least two")
}
vpcSubnets, vpcSubnetsErr := awsClient.GetVPCSubnets(subnetIDs[0])
if vpcSubnetsErr != nil {
return 0, vpcSubnetsErr
}
var subnets []*ec2.Subnet
for _, subnet := range vpcSubnets {
for _, subnetId := range subnetIDs {
if awssdk.StringValue(subnet.SubnetId) == subnetId {
subnets = append(subnets, subnet)
break
}
}
}
privateSubnets, privateSubnetsErr := awsClient.FilterVPCsPrivateSubnets(subnets)
if privateSubnetsErr != nil {
return 0, privateSubnetsErr
}
privateSubnetCount := len(privateSubnets)
publicSubnetsCount := len(subnets) - privateSubnetCount
if isPrivate {
if publicSubnetsCount > 0 {
return 0, fmt.Errorf("The number of public subnets for a private hosted cluster should be zero")
}
} else {
if publicSubnetsCount == 0 {
return 0, fmt.Errorf("The number of public subnets for a public hosted " +
"cluster should be at least one")
}
}
return privateSubnetCount, nil
}
const (
singleAZCount = 1
MultiAZCount = 3
)
func ValidateAvailabilityZonesCount(multiAZ bool, availabilityZonesCount int) error {
if multiAZ && availabilityZonesCount != MultiAZCount {
return fmt.Errorf("The number of availability zones for a multi AZ cluster should be %d, "+
"instead received: %d", MultiAZCount, availabilityZonesCount)
}
if !multiAZ && availabilityZonesCount != singleAZCount {
return fmt.Errorf("The number of availability zones for a single AZ cluster should be %d, "+
"instead received: %d", singleAZCount, availabilityZonesCount)
}
return nil
}
func (c *Client) CheckUpgradeClusterVersion(
availableUpgrades []string,
clusterUpgradeVersion string,
cluster *cmv1.Cluster,
) (err error) {
clusterVersion := cluster.OpenshiftVersion()
if clusterVersion == "" {
clusterVersion = cluster.Version().RawID()
}
validVersion := false
for _, v := range availableUpgrades {
isValidVersion, err := IsValidVersion(clusterUpgradeVersion, v, clusterVersion)
if err != nil {
return err
}
if isValidVersion {
validVersion = true
break
}
}
if !validVersion {
return errors.Errorf(
"Expected a valid version to upgrade cluster to.\nValid versions: %s",
helper.SliceToSortedString(availableUpgrades),
)
}
return nil
}
func (c *Client) GetPolicyVersion(userRequestedVersion string, channelGroup string) (string, error) {
versionList, err := c.GetVersionsList(channelGroup)
if err != nil {
err := fmt.Errorf("%v", err)
return userRequestedVersion, err
}
if userRequestedVersion == "" {
return versionList[0], nil
}
hasVersion := false
for _, vs := range versionList {
if vs == userRequestedVersion {
hasVersion = true
break
}
}
if !hasVersion {
versionSet := helper.SliceToMap(versionList)
err := errors.Errorf(
"A valid policy version number must be specified\nValid versions: %v",
helper.MapKeysToString(versionSet),
)
return userRequestedVersion, err
}
return userRequestedVersion, nil
}
func ParseVersion(version string) (string, error) {
parsedVersion, err := semver.NewVersion(version)
if err != nil {
return "", err
}
versionSplit := parsedVersion.Segments64()
return fmt.Sprintf("%d.%d", versionSplit[0], versionSplit[1]), nil
}
func (c *Client) GetVersionsList(channelGroup string) ([]string, error) {
response, err := c.GetVersions(channelGroup)
if err != nil {
err := fmt.Errorf("error getting versions: %s", err)
return make([]string, 0), err
}
versionList := make([]string, 0)
for _, v := range response {
if !HasSTSSupport(v.RawID(), v.ChannelGroup()) {
continue
}
parsedVersion, err := ParseVersion(v.RawID())
if err != nil {
err = fmt.Errorf("error parsing version")
return versionList, err
}
versionList = append(versionList, parsedVersion)
}
if len(versionList) == 0 {
err = fmt.Errorf("could not find versions for the provided channel-group: '%s'", channelGroup)
return versionList, err
}
return versionList, nil
}
func ValidateOperatorRolesMatchOidcProvider(reporter *reporter.Object, awsClient aws.Client,
operatorIAMRoleList []OperatorIAMRole, oidcEndpointUrl string,
clusterVersion string, expectedOperatorRolePath string) error {
operatorIAMRoles := operatorIAMRoleList
parsedUrl, err := url.Parse(oidcEndpointUrl)
if err != nil {
return err
}
if reporter.IsTerminal() && !output.HasFlag() {
reporter.Infof("Reusable OIDC Configuration detected. Validating trusted relationships to operator roles: ")
}
for _, operatorIAMRole := range operatorIAMRoles {
roleObject, err := awsClient.GetRoleByARN(operatorIAMRole.RoleARN)
if err != nil {
return err
}
roleARN := *roleObject.Arn
pathFromArn, err := aws.GetPathFromARN(roleARN)
if err != nil {
return err
}
if pathFromArn != expectedOperatorRolePath {
return errors.Errorf("Operator Role '%s' does not match the path from Installer Role, "+
"please choose correct Installer Role and try again.", roleARN)
}
if roleARN != operatorIAMRole.RoleARN {
return errors.Errorf("Computed Operator Role '%s' does not match role ARN found in AWS '%s', "+
"please check if the correct parameters have been supplied.", operatorIAMRole.RoleARN, roleARN)
}
err = validateIssuerUrlMatchesAssumePolicyDocument(
roleARN, parsedUrl, *roleObject.AssumeRolePolicyDocument)
if err != nil {
return err
}
hasManagedPolicies, err := awsClient.HasManagedPolicies(roleARN)
if err != nil {
return err
}
if hasManagedPolicies {
// Managed policies should be compatible with all versions
continue
}
policiesDetails, err := awsClient.GetAttachedPolicy(roleObject.RoleName)
if err != nil {
return err
}
for _, policyDetails := range policiesDetails {
if policyDetails.PolicType == aws.Inline {
continue
}
isCompatible, err := awsClient.IsPolicyCompatible(policyDetails.PolicyArn, clusterVersion)
if err != nil {
return err
}
if !isCompatible {
return errors.Errorf("Operator role '%s' is not compatible with cluster version '%s'", roleARN, clusterVersion)
}
}
if reporter.IsTerminal() && !output.HasFlag() {
reporter.Infof("Using '%s'", *roleObject.Arn)
}
}
return nil
}
func validateIssuerUrlMatchesAssumePolicyDocument(
roleArn string, parsedUrl *url.URL, assumePolicyDocument string) error {
issuerUrl := parsedUrl.Host
if parsedUrl.Path != "" {
issuerUrl += parsedUrl.Path
}
decodedAssumePolicyDocument, err := url.QueryUnescape(assumePolicyDocument)
if err != nil {
return err
}
if !strings.Contains(decodedAssumePolicyDocument, issuerUrl) {
return errors.Errorf("Operator role '%s' does not have trusted relationship to '%s' issuer URL",
roleArn, issuerUrl)
}
return nil
}
| {
event, err := cmv1.NewEvent().Key(key).Body(body).Build()
if err == nil {
_, _ = c.ocm.ClustersMgmt().V1().
Events().
Add().
Body(event).
Send()
}
} | identifier_body |
helpers.go | /**
Copyright (c) 2020 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ocm
import (
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
"os"
"regexp"
"strings"
awssdk "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/service/ec2"
semver "github.com/hashicorp/go-version"
"github.com/openshift/rosa/pkg/aws"
"github.com/openshift/rosa/pkg/output"
"github.com/openshift/rosa/pkg/reporter"
errors "github.com/zgalor/weberr"
amsv1 "github.com/openshift-online/ocm-sdk-go/accountsmgmt/v1"
cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
ocmerrors "github.com/openshift-online/ocm-sdk-go/errors"
"github.com/openshift/rosa/pkg/helper"
)
const (
ANY = "any"
HibernateCapability = "capability.organization.hibernate_cluster"
//Pendo Events
Success = "Success"
Failure = "Failure"
Response = "Response"
ClusterID = "ClusterID"
OperatorRolesPrefix = "OperatorRolePrefix"
Version = "Version"
Username = "Username"
URL = "URL"
IsThrottle = "IsThrottle"
OCMRoleLabel = "sts_ocm_role"
USERRoleLabel = "sts_user_role"
maxClusterNameLength = 15
)
// Regular expression to used to make sure that the identifier or name given by the user is
// safe and that it there is no risk of SQL injection:
var clusterKeyRE = regexp.MustCompile(`^(\w|-)+$`)
// Cluster names must be valid DNS-1035 labels, so they must consist of lower case alphanumeric
// characters or '-', start with an alphabetic character, and end with an alphanumeric character
var clusterNameRE = regexp.MustCompile(`^[a-z]([-a-z0-9]{0,13}[a-z0-9])?$`)
var badUsernameRE = regexp.MustCompile(`^(~|\.?\.|.*[:\/%].*)$`)
func IsValidClusterKey(clusterKey string) bool {
return clusterKeyRE.MatchString(clusterKey)
}
func IsValidClusterName(clusterName string) bool {
return clusterNameRE.MatchString(clusterName)
}
func ClusterNameValidator(name interface{}) error {
if str, ok := name.(string); ok {
str := strings.Trim(str, " \t")
if !IsValidClusterName(str) {
return fmt.Errorf("Cluster name must consist of no more than 15 lowercase " +
"alphanumeric characters or '-', start with a letter, and end with an " +
"alphanumeric character.")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", name)
}
func ValidateHTTPProxy(val interface{}) error {
if httpProxy, ok := val.(string); ok {
if httpProxy == "" {
return nil
}
url, err := url.ParseRequestURI(httpProxy)
if err != nil {
return fmt.Errorf("Invalid http-proxy value '%s'", httpProxy)
}
if url.Scheme != "http" {
return errors.Errorf("%s", "Expected http-proxy to have an http:// scheme")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", val)
}
func ValidateAdditionalTrustBundle(val interface{}) error {
if additionalTrustBundleFile, ok := val.(string); ok {
if additionalTrustBundleFile == "" {
return nil
}
cert, err := os.ReadFile(additionalTrustBundleFile)
if err != nil {
return err
}
additionalTrustBundle := string(cert)
if additionalTrustBundle == "" {
return errors.Errorf("%s", "Trust bundle file is empty")
}
additionalTrustBundleBytes := []byte(additionalTrustBundle)
if !x509.NewCertPool().AppendCertsFromPEM(additionalTrustBundleBytes) {
return errors.Errorf("%s", "Failed to parse additional trust bundle")
}
return nil
}
return fmt.Errorf("can only validate strings, got %v", val)
}
func IsValidUsername(username string) bool {
return !badUsernameRE.MatchString(username)
}
func IsEmptyCIDR(cidr net.IPNet) bool {
return cidr.String() == "<nil>"
}
// Determine whether a resources is compatible with ROSA clusters in general
func isCompatible(relatedResource *amsv1.RelatedResource) bool {
product := strings.ToLower(relatedResource.Product())
cloudProvider := strings.ToLower(relatedResource.CloudProvider())
byoc := strings.ToLower(relatedResource.BYOC())
// nolint:goconst
return (product == ANY || product == "rosa" || product == "moa") &&
(cloudProvider == ANY || cloudProvider == "aws") &&
(byoc == ANY || byoc == "byoc")
}
func handleErr(res *ocmerrors.Error, err error) error {
msg := res.Reason()
if msg == "" {
msg = err.Error()
}
// Hack to always display the correct terms and conditions message
if res.Code() == "CLUSTERS-MGMT-451" {
msg = "You must accept the Terms and Conditions in order to continue.\n" +
"Go to https://www.redhat.com/wapps/tnc/ackrequired?site=ocm&event=register\n" +
"Once you accept the terms, you will need to retry the action that was blocked."
}
errType := errors.ErrorType(res.Status())
return errType.Set(errors.Errorf("%s", msg))
}
func (c *Client) GetDefaultClusterFlavors(flavour string) (dMachinecidr *net.IPNet, dPodcidr *net.IPNet,
dServicecidr *net.IPNet, dhostPrefix int, computeInstanceType string) {
flavourGetResponse, err := c.ocm.ClustersMgmt().V1().Flavours().Flavour(flavour).Get().Send()
if err != nil {
flavourGetResponse, _ = c.ocm.ClustersMgmt().V1().Flavours().Flavour("osd-4").Get().Send()
}
aws, ok := flavourGetResponse.Body().GetAWS()
if !ok {
return nil, nil, nil, 0, ""
}
computeInstanceType = aws.ComputeInstanceType()
network, ok := flavourGetResponse.Body().GetNetwork()
if !ok {
return nil, nil, nil, 0, computeInstanceType
}
_, dMachinecidr, err = net.ParseCIDR(network.MachineCIDR())
if err != nil {
dMachinecidr = nil
}
_, dPodcidr, err = net.ParseCIDR(network.PodCIDR())
if err != nil {
dPodcidr = nil
}
_, dServicecidr, err = net.ParseCIDR(network.ServiceCIDR())
if err != nil {
dServicecidr = nil
}
dhostPrefix, _ = network.GetHostPrefix()
return dMachinecidr, dPodcidr, dServicecidr, dhostPrefix, computeInstanceType
}
func (c *Client) LogEvent(key string, body map[string]string) {
event, err := cmv1.NewEvent().Key(key).Body(body).Build()
if err == nil {
_, _ = c.ocm.ClustersMgmt().V1().
Events().
Add().
Body(event).
Send()
}
}
func (c *Client) GetCurrentAccount() (*amsv1.Account, error) {
response, err := c.ocm.AccountsMgmt().V1().
CurrentAccount().
Get().
Send()
if err != nil {
if response.Status() == http.StatusNotFound {
return nil, nil
}
return nil, handleErr(response.Error(), err)
}
return response.Body(), nil
}
func (c *Client) GetCurrentOrganization() (id string, externalID string, err error) {
acctResponse, err := c.GetCurrentAccount()
if err != nil {
return
}
id = acctResponse.Organization().ID()
externalID = acctResponse.Organization().ExternalID()
return
}
func (c *Client) IsCapabilityEnabled(capability string) (enabled bool, err error) {
organizationID, _, err := c.GetCurrentOrganization()
if err != nil {
return
}
isCapabilityEnable, err := c.isCapabilityEnabled(capability, organizationID)
if err != nil {
return
}
if !isCapabilityEnable {
return false, nil
}
return true, nil
}
func (c *Client) isCapabilityEnabled(capabilityName string, orgID string) (bool, error) {
capabilityResponse, err := c.ocm.AccountsMgmt().V1().Organizations().
Organization(orgID).Get().Parameter("fetchCapabilities", true).Send()
if err != nil {
return false, handleErr(capabilityResponse.Error(), err)
}
if len(capabilityResponse.Body().Capabilities()) > 0 {
for _, capability := range capabilityResponse.Body().Capabilities() {
if capability.Name() == capabilityName {
return capability.Value() == "true", nil
}
}
}
return false, nil
}
func (c *Client) UnlinkUserRoleFromAccount(accountID string, roleARN string) error {
linkedRoles, err := c.GetAccountLinkedUserRoles(accountID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(USERRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role ARN '%s' is not linked with the current account '%s'", roleARN, accountID)
}
func (c *Client) LinkAccountRole(accountID string, roleARN string) error {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels("sts_user_role").Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return errors.Forbidden.UserErrorf("%v", err)
}
return handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
if value == roleARN {
exists = true
break
}
}
}
}
if exists {
return nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key("sts_user_role").Value(roleARN).Build()
if err != nil {
return err
}
_, err = c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
return err
}
func (c *Client) UnlinkOCMRoleFromOrg(orgID string, roleARN string) error {
linkedRoles, err := c.GetOrganizationLinkedOCMRoles(orgID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role-arn '%s' is not linked with the organization account '%s'", roleARN, orgID)
}
func (c *Client) LinkOrgToRole(orgID string, roleARN string) (bool, error) {
parsedARN, err := arn.Parse(roleARN)
if err != nil {
return false, err
}
exists, existingARN, selectedARN, err := c.CheckIfAWSAccountExists(orgID, parsedARN.AccountID)
if err != nil {
return false, err
}
if exists {
if selectedARN != roleARN {
return false, errors.UserErrorf("User organization '%s' has role-arn '%s' associated. "+
"Only one role can be linked per AWS account per organization", orgID, selectedARN)
}
return false, nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(roleARN).Build()
if err != nil {
return false, err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return false, handleErr(resp.Error(), err)
}
return true, nil
}
func (c *Client) GetAccountLinkedUserRoles(accountID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels(USERRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, handleErr(resp.Error(), err)
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) GetOrganizationLinkedOCMRoles(orgID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, err
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) CheckIfAWSAccountExists(orgID string, awsAccountID string) (bool, string, string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return false, "", "", errors.Forbidden.UserErrorf("%v", err)
}
return false, "", "", handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
selectedARN := ""
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
parsedARN, err := arn.Parse(value)
if err != nil {
return false, "", "", err
}
if parsedARN.AccountID == awsAccountID {
exists = true
selectedARN = value
break
}
}
}
}
return exists, existingARN, selectedARN, nil
}
/*
We should allow only one role per aws account per organization
If the user request same ocm role we should let them proceed to ensure they can add admin role
if not exists or attach policies or link etc
if the user request diff ocm role name we error out
*/
func (c *Client) | (orgID string, roleName string, awsAccountID string) (bool, string, string, error) {
exists, _, selectedARN, err := c.CheckIfAWSAccountExists(orgID, awsAccountID)
if err != nil {
return false, "", "", err
}
if !exists {
return false, "", "", nil
}
existingRole := strings.SplitN(selectedARN, "/", 2)
if len(existingRole) > 1 && existingRole[1] == roleName {
return false, "", "", nil
}
return true, existingRole[1], selectedARN, nil
}
func GetVersionMinor(ver string) string {
rawID := strings.Replace(ver, "openshift-v", "", 1)
version, err := semver.NewVersion(rawID)
if err != nil {
segments := strings.Split(rawID, ".")
return fmt.Sprintf("%s.%s", segments[0], segments[1])
}
segments := version.Segments()
return fmt.Sprintf("%d.%d", segments[0], segments[1])
}
func CheckSupportedVersion(clusterVersion string, operatorVersion string) (bool, error) {
v1, err := semver.NewVersion(clusterVersion)
if err != nil {
return false, err
}
v2, err := semver.NewVersion(operatorVersion)
if err != nil {
return false, err
}
//Cluster version is greater than or equal to operator version
return v1.GreaterThanOrEqual(v2), nil
}
func (c *Client) GetPolicies(policyType string) (map[string]*cmv1.AWSSTSPolicy, error) {
query := fmt.Sprintf("policy_type = '%s'", policyType)
m := make(map[string]*cmv1.AWSSTSPolicy)
stmt := c.ocm.ClustersMgmt().V1().AWSInquiries().STSPolicies().List()
if policyType != "" {
stmt = stmt.Search(query)
}
accountRolePoliciesResponse, err := stmt.Send()
if err != nil {
return m, handleErr(accountRolePoliciesResponse.Error(), err)
}
accountRolePoliciesResponse.Items().Each(func(awsPolicy *cmv1.AWSSTSPolicy) bool {
m[awsPolicy.ID()] = awsPolicy
return true
})
return m, nil
}
// The actual values might differ from classic to hcp
// prefer using GetCredRequests(isHypershift bool) when there is prior knowledge of the topology
func (c *Client) GetAllCredRequests() (map[string]*cmv1.STSOperator, error) {
result := make(map[string]*cmv1.STSOperator)
classic, err := c.GetCredRequests(false)
if err != nil {
return result, err
}
hcp, err := c.GetCredRequests(true)
if err != nil {
return result, err
}
for key, value := range classic {
result[key] = value
}
for key, value := range hcp {
result[key] = value
}
return result, nil
}
func (c *Client) GetCredRequests(isHypershift bool) (map[string]*cmv1.STSOperator, error) {
m := make(map[string]*cmv1.STSOperator)
stsCredentialResponse, err := c.ocm.ClustersMgmt().
V1().
AWSInquiries().
STSCredentialRequests().
List().
Parameter("is_hypershift", isHypershift).
Send()
if err != nil {
return m, handleErr(stsCredentialResponse.Error(), err)
}
stsCredentialResponse.Items().Each(func(stsCredentialRequest *cmv1.STSCredentialRequest) bool {
m[stsCredentialRequest.Name()] = stsCredentialRequest.Operator()
return true
})
return m, nil
}
func (c *Client) FindMissingOperatorRolesForUpgrade(cluster *cmv1.Cluster,
newMinorVersion string) (map[string]*cmv1.STSOperator, error) {
missingRoles := make(map[string]*cmv1.STSOperator)
credRequests, err := c.GetCredRequests(cluster.Hypershift().Enabled())
if err != nil {
return nil, errors.Errorf("Error getting operator credential request from OCM %s", err)
}
for credRequest, operator := range credRequests {
if operator.MinVersion() != "" {
clusterUpgradeVersion, err := semver.NewVersion(newMinorVersion)
if err != nil {
return nil, err
}
operatorMinVersion, err := semver.NewVersion(operator.MinVersion())
if err != nil {
return nil, err
}
if clusterUpgradeVersion.GreaterThanOrEqual(operatorMinVersion) {
if !isOperatorRoleAlreadyExist(cluster, operator) {
missingRoles[credRequest] = operator
}
}
}
}
return missingRoles, nil
}
func (c *Client) createCloudProviderDataBuilder(roleARN string, awsClient aws.Client,
externalID string) (*cmv1.CloudProviderDataBuilder, error) {
var awsBuilder *cmv1.AWSBuilder
if roleARN != "" {
stsBuilder := cmv1.NewSTS().RoleARN(roleARN)
if externalID != "" {
stsBuilder = stsBuilder.ExternalID(externalID)
}
awsBuilder = cmv1.NewAWS().STS(stsBuilder)
} else {
accessKeys, err := awsClient.GetAWSAccessKeys()
if err != nil {
return &cmv1.CloudProviderDataBuilder{}, err
}
awsBuilder = cmv1.NewAWS().AccessKeyID(accessKeys.AccessKeyID).SecretAccessKey(accessKeys.SecretAccessKey)
}
return cmv1.NewCloudProviderData().AWS(awsBuilder), nil
}
func isOperatorRoleAlreadyExist(cluster *cmv1.Cluster, operator *cmv1.STSOperator) bool {
for _, role := range cluster.AWS().STS().OperatorIAMRoles() {
//FIXME: Check it does not exist on AWS itself too
// the iam roles will only return up to the version of the cluster
if role.Namespace() == operator.Namespace() && role.Name() == operator.Name() {
return true
}
}
return false
}
const (
BYOVPCSingleAZSubnetsCount = 2
BYOVPCMultiAZSubnetsCount = 6
privateLinkSingleAZSubnetsCount = 1
privateLinkMultiAZSubnetsCount = 3
)
func ValidateSubnetsCount(multiAZ bool, privateLink bool, subnetsInputCount int) error {
if privateLink {
if multiAZ && subnetsInputCount != privateLinkMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ private link cluster should be %d, "+
"instead received: %d", privateLinkMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != privateLinkSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ private link cluster should be %d, "+
"instead received: %d", privateLinkSingleAZSubnetsCount, subnetsInputCount)
}
} else {
if multiAZ && subnetsInputCount != BYOVPCMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ cluster should be %d, "+
"instead received: %d", BYOVPCMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != BYOVPCSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ cluster should be %d, "+
"instead received: %d", BYOVPCSingleAZSubnetsCount, subnetsInputCount)
}
}
return nil
}
func ValidateHostedClusterSubnets(awsClient aws.Client, isPrivate bool, subnetIDs []string) (int, error) {
if isPrivate && len(subnetIDs) < 1 {
return 0, fmt.Errorf("The number of subnets for a private hosted cluster should be at least one")
}
if !isPrivate && len(subnetIDs) < 2 {
return 0, fmt.Errorf("The number of subnets for a public hosted cluster should be at least two")
}
vpcSubnets, vpcSubnetsErr := awsClient.GetVPCSubnets(subnetIDs[0])
if vpcSubnetsErr != nil {
return 0, vpcSubnetsErr
}
var subnets []*ec2.Subnet
for _, subnet := range vpcSubnets {
for _, subnetId := range subnetIDs {
if awssdk.StringValue(subnet.SubnetId) == subnetId {
subnets = append(subnets, subnet)
break
}
}
}
privateSubnets, privateSubnetsErr := awsClient.FilterVPCsPrivateSubnets(subnets)
if privateSubnetsErr != nil {
return 0, privateSubnetsErr
}
privateSubnetCount := len(privateSubnets)
publicSubnetsCount := len(subnets) - privateSubnetCount
if isPrivate {
if publicSubnetsCount > 0 {
return 0, fmt.Errorf("The number of public subnets for a private hosted cluster should be zero")
}
} else {
if publicSubnetsCount == 0 {
return 0, fmt.Errorf("The number of public subnets for a public hosted " +
"cluster should be at least one")
}
}
return privateSubnetCount, nil
}
const (
singleAZCount = 1
MultiAZCount = 3
)
func ValidateAvailabilityZonesCount(multiAZ bool, availabilityZonesCount int) error {
if multiAZ && availabilityZonesCount != MultiAZCount {
return fmt.Errorf("The number of availability zones for a multi AZ cluster should be %d, "+
"instead received: %d", MultiAZCount, availabilityZonesCount)
}
if !multiAZ && availabilityZonesCount != singleAZCount {
return fmt.Errorf("The number of availability zones for a single AZ cluster should be %d, "+
"instead received: %d", singleAZCount, availabilityZonesCount)
}
return nil
}
func (c *Client) CheckUpgradeClusterVersion(
availableUpgrades []string,
clusterUpgradeVersion string,
cluster *cmv1.Cluster,
) (err error) {
clusterVersion := cluster.OpenshiftVersion()
if clusterVersion == "" {
clusterVersion = cluster.Version().RawID()
}
validVersion := false
for _, v := range availableUpgrades {
isValidVersion, err := IsValidVersion(clusterUpgradeVersion, v, clusterVersion)
if err != nil {
return err
}
if isValidVersion {
validVersion = true
break
}
}
if !validVersion {
return errors.Errorf(
"Expected a valid version to upgrade cluster to.\nValid versions: %s",
helper.SliceToSortedString(availableUpgrades),
)
}
return nil
}
func (c *Client) GetPolicyVersion(userRequestedVersion string, channelGroup string) (string, error) {
versionList, err := c.GetVersionsList(channelGroup)
if err != nil {
err := fmt.Errorf("%v", err)
return userRequestedVersion, err
}
if userRequestedVersion == "" {
return versionList[0], nil
}
hasVersion := false
for _, vs := range versionList {
if vs == userRequestedVersion {
hasVersion = true
break
}
}
if !hasVersion {
versionSet := helper.SliceToMap(versionList)
err := errors.Errorf(
"A valid policy version number must be specified\nValid versions: %v",
helper.MapKeysToString(versionSet),
)
return userRequestedVersion, err
}
return userRequestedVersion, nil
}
func ParseVersion(version string) (string, error) {
parsedVersion, err := semver.NewVersion(version)
if err != nil {
return "", err
}
versionSplit := parsedVersion.Segments64()
return fmt.Sprintf("%d.%d", versionSplit[0], versionSplit[1]), nil
}
func (c *Client) GetVersionsList(channelGroup string) ([]string, error) {
response, err := c.GetVersions(channelGroup)
if err != nil {
err := fmt.Errorf("error getting versions: %s", err)
return make([]string, 0), err
}
versionList := make([]string, 0)
for _, v := range response {
if !HasSTSSupport(v.RawID(), v.ChannelGroup()) {
continue
}
parsedVersion, err := ParseVersion(v.RawID())
if err != nil {
err = fmt.Errorf("error parsing version")
return versionList, err
}
versionList = append(versionList, parsedVersion)
}
if len(versionList) == 0 {
err = fmt.Errorf("could not find versions for the provided channel-group: '%s'", channelGroup)
return versionList, err
}
return versionList, nil
}
func ValidateOperatorRolesMatchOidcProvider(reporter *reporter.Object, awsClient aws.Client,
operatorIAMRoleList []OperatorIAMRole, oidcEndpointUrl string,
clusterVersion string, expectedOperatorRolePath string) error {
operatorIAMRoles := operatorIAMRoleList
parsedUrl, err := url.Parse(oidcEndpointUrl)
if err != nil {
return err
}
if reporter.IsTerminal() && !output.HasFlag() {
reporter.Infof("Reusable OIDC Configuration detected. Validating trusted relationships to operator roles: ")
}
for _, operatorIAMRole := range operatorIAMRoles {
roleObject, err := awsClient.GetRoleByARN(operatorIAMRole.RoleARN)
if err != nil {
return err
}
roleARN := *roleObject.Arn
pathFromArn, err := aws.GetPathFromARN(roleARN)
if err != nil {
return err
}
if pathFromArn != expectedOperatorRolePath {
return errors.Errorf("Operator Role '%s' does not match the path from Installer Role, "+
"please choose correct Installer Role and try again.", roleARN)
}
if roleARN != operatorIAMRole.RoleARN {
return errors.Errorf("Computed Operator Role '%s' does not match role ARN found in AWS '%s', "+
"please check if the correct parameters have been supplied.", operatorIAMRole.RoleARN, roleARN)
}
err = validateIssuerUrlMatchesAssumePolicyDocument(
roleARN, parsedUrl, *roleObject.AssumeRolePolicyDocument)
if err != nil {
return err
}
hasManagedPolicies, err := awsClient.HasManagedPolicies(roleARN)
if err != nil {
return err
}
if hasManagedPolicies {
// Managed policies should be compatible with all versions
continue
}
policiesDetails, err := awsClient.GetAttachedPolicy(roleObject.RoleName)
if err != nil {
return err
}
for _, policyDetails := range policiesDetails {
if policyDetails.PolicType == aws.Inline {
continue
}
isCompatible, err := awsClient.IsPolicyCompatible(policyDetails.PolicyArn, clusterVersion)
if err != nil {
return err
}
if !isCompatible {
return errors.Errorf("Operator role '%s' is not compatible with cluster version '%s'", roleARN, clusterVersion)
}
}
if reporter.IsTerminal() && !output.HasFlag() {
reporter.Infof("Using '%s'", *roleObject.Arn)
}
}
return nil
}
func validateIssuerUrlMatchesAssumePolicyDocument(
roleArn string, parsedUrl *url.URL, assumePolicyDocument string) error {
issuerUrl := parsedUrl.Host
if parsedUrl.Path != "" {
issuerUrl += parsedUrl.Path
}
decodedAssumePolicyDocument, err := url.QueryUnescape(assumePolicyDocument)
if err != nil {
return err
}
if !strings.Contains(decodedAssumePolicyDocument, issuerUrl) {
return errors.Errorf("Operator role '%s' does not have trusted relationship to '%s' issuer URL",
roleArn, issuerUrl)
}
return nil
}
| CheckRoleExists | identifier_name |
reconciler.go | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullsubscription
import (
"context"
"encoding/json"
"fmt"
"time"
"k8s.io/client-go/tools/cache"
"go.uber.org/zap"
"github.com/google/knative-gcp/pkg/utils"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
appsv1listers "k8s.io/client-go/listers/apps/v1"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/reconciler"
tracingconfig "knative.dev/pkg/tracing/config"
"knative.dev/pkg/logging"
"knative.dev/pkg/metrics"
"knative.dev/pkg/resolver"
"github.com/google/knative-gcp/pkg/apis/pubsub/v1alpha1"
listers "github.com/google/knative-gcp/pkg/client/listers/pubsub/v1alpha1"
gpubsub "github.com/google/knative-gcp/pkg/gclient/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/pubsub/pullsubscription/resources"
"github.com/google/knative-gcp/pkg/tracing"
)
const (
// Component names for metrics.
sourceComponent = "source"
channelComponent = "channel"
)
// Base implements the core controller logic for pullsubscription.
type Base struct {
*pubsub.PubSubBase
// DeploymentLister index properties about deployments.
DeploymentLister appsv1listers.DeploymentLister
// PullSubscriptionLister index properties about pullsubscriptions.
PullSubscriptionLister listers.PullSubscriptionLister
UriResolver *resolver.URIResolver
ReceiveAdapterImage string
ControllerAgentName string
FinalizerName string
LoggingConfig *logging.Config
MetricsConfig *metrics.ExporterOptions
TracingConfig *tracingconfig.Config
// CreateClientFn is the function used to create the Pub/Sub client that interacts with Pub/Sub.
// This is needed so that we can inject a mock client for UTs purposes.
CreateClientFn gpubsub.CreateFn
// ReconcileDataPlaneFn is the function used to reconcile the data plane resources.
ReconcileDataPlaneFn ReconcileDataPlaneFunc
}
// ReconcileDataPlaneFunc is used to reconcile the data plane component(s).
type ReconcileDataPlaneFunc func(ctx context.Context, d *appsv1.Deployment, ps *v1alpha1.PullSubscription) error
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the PullSubscription resource
// with the current status of the resource.
func (r *Base) Reconcile(ctx context.Context, key string) error {
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid resource key")
return nil
}
// Get the PullSubscription resource with this namespace/name
original, err := r.PullSubscriptionLister.PullSubscriptions(namespace).Get(name)
if apierrs.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
logging.FromContext(ctx).Desugar().Error("PullSubscription in work queue no longer exists")
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy
ps := original.DeepCopy()
// Reconcile this copy of the PullSubscription and then write back any status
// updates regardless of whether the reconciliation errored out.
var reconcileErr = r.reconcile(ctx, ps)
// If no error is returned, mark the observed generation.
// This has to be done before updateStatus is called.
if reconcileErr == nil {
ps.Status.ObservedGeneration = ps.Generation
}
if equality.Semantic.DeepEqual(original.Finalizers, ps.Finalizers) {
// If we didn't change finalizers then don't call updateFinalizers.
} else if _, updated, fErr := r.updateFinalizers(ctx, ps); fErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update PullSubscription finalizers", zap.Error(fErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update finalizers for PullSubscription %q: %v", ps.Name, fErr)
return fErr
} else if updated {
// There was a difference and updateFinalizers said it updated and did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q finalizers", ps.Name)
}
if equality.Semantic.DeepEqual(original.Status, ps.Status) {
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the informer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
} else if uErr := r.updateStatus(ctx, original, ps); uErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update ps status", zap.Error(uErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for PullSubscription %q: %v", ps.Name, uErr)
return uErr
} else if reconcileErr == nil {
// There was a difference and updateStatus did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q", ps.Name)
}
if reconcileErr != nil {
r.Recorder.Event(ps, corev1.EventTypeWarning, "InternalError", reconcileErr.Error())
}
return reconcileErr
}
func (r *Base) reconcile(ctx context.Context, ps *v1alpha1.PullSubscription) error {
ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("pullsubscription", ps)))
ps.Status.InitializeConditions()
if ps.DeletionTimestamp != nil {
logging.FromContext(ctx).Desugar().Debug("Deleting Pub/Sub subscription")
if err := r.deleteSubscription(ctx, ps); err != nil {
ps.Status.MarkNoSubscription("SubscriptionDeleteFailed", "Failed to delete Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkNoSubscription("SubscriptionDeleted", "Successfully deleted Pub/Sub subscription %q", ps.Status.SubscriptionID)
ps.Status.SubscriptionID = ""
r.removeFinalizer(ps)
return nil
}
// Sink is required.
sinkURI, err := r.resolveDestination(ctx, ps.Spec.Sink, ps)
if err != nil {
ps.Status.MarkNoSink("InvalidSink", err.Error())
return err
} else {
ps.Status.MarkSink(sinkURI)
}
// Transformer is optional.
if ps.Spec.Transformer != nil {
transformerURI, err := r.resolveDestination(ctx, *ps.Spec.Transformer, ps)
if err != nil {
ps.Status.MarkNoTransformer("InvalidTransformer", err.Error())
} else {
ps.Status.MarkTransformer(transformerURI)
}
} else {
// If the transformer is nil, mark is as nil and clean up the URI.
ps.Status.MarkNoTransformer("TransformerNil", "Transformer is nil")
ps.Status.TransformerURI = ""
}
r.addFinalizer(ps)
subscriptionID, err := r.reconcileSubscription(ctx, ps)
if err != nil {
ps.Status.MarkNoSubscription("SubscriptionReconcileFailed", "Failed to reconcile Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkSubscribed(subscriptionID)
err = r.reconcileDataPlaneResources(ctx, ps, r.ReconcileDataPlaneFn)
if err != nil {
ps.Status.MarkNotDeployed("DataPlaneReconcileFailed", "Failed to reconcile Data Plane resource(s): %s", err.Error())
return err
}
ps.Status.MarkDeployed()
return nil
}
func (r *Base) reconcileSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) (string, error) {
if ps.Status.ProjectID == "" {
projectID, err := utils.ProjectID(ps.Spec.Project)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to find project id", zap.Error(err))
return "", err
}
// Set the projectID in the status.
ps.Status.ProjectID = projectID
}
// Auth to GCP is handled by having the GOOGLE_APPLICATION_CREDENTIALS environment variable
// pointing at a credential file.
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return "", err
}
defer client.Close()
// Generate the subscription name
subID := resources.GenerateSubscriptionName(ps)
// Load the subscription.
sub := client.Subscription(subID)
subExists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return "", err
}
t := client.Topic(ps.Spec.Topic)
topicExists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return "", err
}
if !topicExists {
return "", fmt.Errorf("Topic %q does not exist", ps.Spec.Topic)
}
// subConfig is the wanted config based on settings.
subConfig := gpubsub.SubscriptionConfig{
Topic: t,
RetainAckedMessages: ps.Spec.RetainAckedMessages,
}
if ps.Spec.AckDeadline != nil {
ackDeadline, err := time.ParseDuration(*ps.Spec.AckDeadline)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid ackDeadline", zap.String("ackDeadline", *ps.Spec.AckDeadline))
return "", fmt.Errorf("invalid ackDeadline: %w", err)
}
subConfig.AckDeadline = ackDeadline
}
if ps.Spec.RetentionDuration != nil {
retentionDuration, err := time.ParseDuration(*ps.Spec.RetentionDuration)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid retentionDuration", zap.String("retentionDuration", *ps.Spec.RetentionDuration))
return "", fmt.Errorf("invalid retentionDuration: %w", err)
}
subConfig.RetentionDuration = retentionDuration
}
// If the subscription doesn't exist, create it.
if !subExists {
// Create a new subscription to the previous topic with the given name.
sub, err = client.CreateSubscription(ctx, subID, subConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create subscription", zap.Error(err))
return "", err
}
}
// TODO update the subscription's config if needed.
return subID, nil
}
// deleteSubscription looks at the status.SubscriptionID and if non-empty,
// hence indicating that we have created a subscription successfully
// in the PullSubscription, remove it.
func (r *Base) deleteSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) error {
if ps.Status.SubscriptionID == "" {
return nil
}
// At this point the project ID should have been populated in the status.
// Querying Pub/Sub as the subscription could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
// Load the subscription.
sub := client.Subscription(ps.Status.SubscriptionID)
exists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return err
}
if exists {
if err := sub.Delete(ctx); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to delete Pub/Sub subscription", zap.Error(err))
return err
}
}
return nil
}
func (r *Base) updateStatus(ctx context.Context, original *v1alpha1.PullSubscription, desired *v1alpha1.PullSubscription) error {
existing := original.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
existing, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).Get(desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
becomesReady := desired.Status.IsReady() && !existing.Status.IsReady()
existing.Status = desired.Status
_, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).UpdateStatus(existing)
if err == nil && becomesReady {
// TODO compute duration since last non-ready. See https://github.com/google/knative-gcp/issues/455.
duration := time.Since(existing.ObjectMeta.CreationTimestamp.Time)
logging.FromContext(ctx).Desugar().Info("PullSubscription became ready", zap.Any("after", duration))
r.Recorder.Event(existing, corev1.EventTypeNormal, "ReadinessChanged", fmt.Sprintf("PullSubscription %q became ready", existing.Name))
if metricErr := r.StatsReporter.ReportReady("PullSubscription", existing.Namespace, existing.Name, duration); metricErr != nil {
logging.FromContext(ctx).Desugar().Error("Failed to record ready for PullSubscription", zap.Error(metricErr))
}
}
return err
})
}
// updateFinalizers is a generic method for future compatibility with a
// reconciler SDK.
func (r *Base) updateFinalizers(ctx context.Context, desired *v1alpha1.PullSubscription) (*v1alpha1.PullSubscription, bool, error) {
source, err := r.PullSubscriptionLister.PullSubscriptions(desired.Namespace).Get(desired.Name)
if err != nil {
return nil, false, err
}
// Don't modify the informers copy.
existing := source.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.NewString(existing.Finalizers...)
desiredFinalizers := sets.NewString(desired.Finalizers...)
if desiredFinalizers.Has(r.FinalizerName) {
if existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.FinalizerName)
} else {
if !existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.FinalizerName)
finalizers = existingFinalizers.List()
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return desired, false, err
}
update, err := r.RunClientSet.PubsubV1alpha1().PullSubscriptions(existing.Namespace).Patch(existing.Name, types.MergePatchType, patch)
return update, true, err
}
func (r *Base) addFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Insert(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) removeFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Delete(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) reconcileDataPlaneResources(ctx context.Context, src *v1alpha1.PullSubscription, f ReconcileDataPlaneFunc) error |
func (r *Base) GetOrCreateReceiveAdapter(ctx context.Context, desired *appsv1.Deployment, src *v1alpha1.PullSubscription) (*appsv1.Deployment, error) {
existing, err := r.getReceiveAdapter(ctx, src)
if err != nil && !apierrors.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Unable to get an existing Receive Adapter", zap.Error(err))
return nil, err
}
if existing == nil {
existing, err = r.KubeClientSet.AppsV1().Deployments(src.Namespace).Create(desired)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error creating Receive Adapter", zap.Error(err))
return nil, err
}
}
return existing, nil
}
func (r *Base) getReceiveAdapter(ctx context.Context, src *v1alpha1.PullSubscription) (*appsv1.Deployment, error) {
dl, err := r.KubeClientSet.AppsV1().Deployments(src.Namespace).List(metav1.ListOptions{
LabelSelector: resources.GetLabelSelector(r.ControllerAgentName, src.Name).String(),
TypeMeta: metav1.TypeMeta{
APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "Deployment",
},
})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Unable to list deployments", zap.Error(err))
return nil, err
}
for _, dep := range dl.Items {
if metav1.IsControlledBy(&dep, src) {
return &dep, nil
}
}
return nil, apierrors.NewNotFound(schema.GroupResource{}, "")
}
func (r *Base) UpdateFromLoggingConfigMap(cfg *corev1.ConfigMap) {
if cfg != nil {
delete(cfg.Data, "_example")
}
logcfg, err := logging.NewConfigFromConfigMap(cfg)
if err != nil {
r.Logger.Warnw("Failed to create logging config from configmap", zap.String("cfg.Name", cfg.Name))
return
}
r.LoggingConfig = logcfg
r.Logger.Debugw("Update from logging ConfigMap", zap.Any("loggingCfg", cfg))
// TODO: requeue all PullSubscriptions. See https://github.com/google/knative-gcp/issues/457.
}
func (r *Base) UpdateFromMetricsConfigMap(cfg *corev1.ConfigMap) {
if cfg != nil {
delete(cfg.Data, "_example")
}
// Cannot set the component here as we don't know if its a source or a channel.
// Will set that up dynamically before creating the receive adapter.
// Won't be able to requeue the PullSubscriptions.
r.MetricsConfig = &metrics.ExporterOptions{
Domain: metrics.Domain(),
ConfigMap: cfg.Data,
}
r.Logger.Debugw("Update from metrics ConfigMap", zap.Any("metricsCfg", cfg))
}
func (r *Base) UpdateFromTracingConfigMap(cfg *corev1.ConfigMap) {
if cfg == nil {
r.Logger.Error("Tracing ConfigMap is nil")
return
}
delete(cfg.Data, "_example")
tracingCfg, err := tracingconfig.NewTracingConfigFromConfigMap(cfg)
if err != nil {
r.Logger.Warnw("Failed to create tracing config from configmap", zap.String("cfg.Name", cfg.Name))
return
}
r.TracingConfig = tracingCfg
r.Logger.Debugw("Updated Tracing config", zap.Any("tracingCfg", r.TracingConfig))
// TODO: requeue all PullSubscriptions. See https://github.com/google/knative-gcp/issues/457.
}
func (r *Base) resolveDestination(ctx context.Context, destination duckv1.Destination, ps *v1alpha1.PullSubscription) (string, error) {
// Setting up the namespace.
if destination.Ref != nil {
destination.Ref.Namespace = ps.Namespace
}
url, err := r.UriResolver.URIFromDestinationV1(destination, ps)
if err != nil {
return "", err
}
return url.String(), nil
}
| {
loggingConfig, err := logging.LoggingConfigToJson(r.LoggingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing existing logging config", zap.Error(err))
}
if r.MetricsConfig != nil {
component := sourceComponent
// Set the metric component based on the channel label.
if _, ok := src.Labels["events.cloud.google.com/channel"]; ok {
component = channelComponent
}
r.MetricsConfig.Component = component
}
metricsConfig, err := metrics.MetricsOptionsToJson(r.MetricsConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing metrics config", zap.Error(err))
}
tracingConfig, err := tracing.ConfigToJSON(r.TracingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing tracing config", zap.Error(err))
}
desired := resources.MakeReceiveAdapter(ctx, &resources.ReceiveAdapterArgs{
Image: r.ReceiveAdapterImage,
Source: src,
Labels: resources.GetLabels(r.ControllerAgentName, src.Name),
SubscriptionID: src.Status.SubscriptionID,
SinkURI: src.Status.SinkURI,
TransformerURI: src.Status.TransformerURI,
LoggingConfig: loggingConfig,
MetricsConfig: metricsConfig,
TracingConfig: tracingConfig,
})
return f(ctx, desired, src)
} | identifier_body |
reconciler.go | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullsubscription
import (
"context"
"encoding/json"
"fmt"
"time"
"k8s.io/client-go/tools/cache"
"go.uber.org/zap"
"github.com/google/knative-gcp/pkg/utils"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
appsv1listers "k8s.io/client-go/listers/apps/v1"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/reconciler"
tracingconfig "knative.dev/pkg/tracing/config"
"knative.dev/pkg/logging"
"knative.dev/pkg/metrics"
"knative.dev/pkg/resolver"
"github.com/google/knative-gcp/pkg/apis/pubsub/v1alpha1"
listers "github.com/google/knative-gcp/pkg/client/listers/pubsub/v1alpha1"
gpubsub "github.com/google/knative-gcp/pkg/gclient/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/pubsub/pullsubscription/resources"
"github.com/google/knative-gcp/pkg/tracing"
)
const (
// Component names for metrics.
sourceComponent = "source"
channelComponent = "channel"
)
// Base implements the core controller logic for pullsubscription.
type Base struct {
*pubsub.PubSubBase
// DeploymentLister index properties about deployments.
DeploymentLister appsv1listers.DeploymentLister
// PullSubscriptionLister index properties about pullsubscriptions.
PullSubscriptionLister listers.PullSubscriptionLister
UriResolver *resolver.URIResolver
ReceiveAdapterImage string
ControllerAgentName string
FinalizerName string
LoggingConfig *logging.Config
MetricsConfig *metrics.ExporterOptions
TracingConfig *tracingconfig.Config
// CreateClientFn is the function used to create the Pub/Sub client that interacts with Pub/Sub.
// This is needed so that we can inject a mock client for UTs purposes.
CreateClientFn gpubsub.CreateFn
// ReconcileDataPlaneFn is the function used to reconcile the data plane resources.
ReconcileDataPlaneFn ReconcileDataPlaneFunc
}
// ReconcileDataPlaneFunc is used to reconcile the data plane component(s).
type ReconcileDataPlaneFunc func(ctx context.Context, d *appsv1.Deployment, ps *v1alpha1.PullSubscription) error
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the PullSubscription resource
// with the current status of the resource.
func (r *Base) Reconcile(ctx context.Context, key string) error {
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid resource key")
return nil
}
// Get the PullSubscription resource with this namespace/name
original, err := r.PullSubscriptionLister.PullSubscriptions(namespace).Get(name)
if apierrs.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
logging.FromContext(ctx).Desugar().Error("PullSubscription in work queue no longer exists")
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy
ps := original.DeepCopy()
// Reconcile this copy of the PullSubscription and then write back any status
// updates regardless of whether the reconciliation errored out.
var reconcileErr = r.reconcile(ctx, ps)
// If no error is returned, mark the observed generation.
// This has to be done before updateStatus is called.
if reconcileErr == nil {
ps.Status.ObservedGeneration = ps.Generation
}
if equality.Semantic.DeepEqual(original.Finalizers, ps.Finalizers) {
// If we didn't change finalizers then don't call updateFinalizers.
} else if _, updated, fErr := r.updateFinalizers(ctx, ps); fErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update PullSubscription finalizers", zap.Error(fErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update finalizers for PullSubscription %q: %v", ps.Name, fErr)
return fErr
} else if updated {
// There was a difference and updateFinalizers said it updated and did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q finalizers", ps.Name)
}
if equality.Semantic.DeepEqual(original.Status, ps.Status) {
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the informer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
} else if uErr := r.updateStatus(ctx, original, ps); uErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update ps status", zap.Error(uErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for PullSubscription %q: %v", ps.Name, uErr)
return uErr
} else if reconcileErr == nil {
// There was a difference and updateStatus did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q", ps.Name)
}
if reconcileErr != nil {
r.Recorder.Event(ps, corev1.EventTypeWarning, "InternalError", reconcileErr.Error())
}
return reconcileErr
}
func (r *Base) reconcile(ctx context.Context, ps *v1alpha1.PullSubscription) error {
ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("pullsubscription", ps)))
ps.Status.InitializeConditions()
if ps.DeletionTimestamp != nil {
logging.FromContext(ctx).Desugar().Debug("Deleting Pub/Sub subscription")
if err := r.deleteSubscription(ctx, ps); err != nil {
ps.Status.MarkNoSubscription("SubscriptionDeleteFailed", "Failed to delete Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkNoSubscription("SubscriptionDeleted", "Successfully deleted Pub/Sub subscription %q", ps.Status.SubscriptionID)
ps.Status.SubscriptionID = ""
r.removeFinalizer(ps)
return nil
}
// Sink is required.
sinkURI, err := r.resolveDestination(ctx, ps.Spec.Sink, ps)
if err != nil {
ps.Status.MarkNoSink("InvalidSink", err.Error())
return err
} else {
ps.Status.MarkSink(sinkURI)
}
// Transformer is optional.
if ps.Spec.Transformer != nil {
transformerURI, err := r.resolveDestination(ctx, *ps.Spec.Transformer, ps)
if err != nil {
ps.Status.MarkNoTransformer("InvalidTransformer", err.Error())
} else {
ps.Status.MarkTransformer(transformerURI)
}
} else {
// If the transformer is nil, mark is as nil and clean up the URI.
ps.Status.MarkNoTransformer("TransformerNil", "Transformer is nil")
ps.Status.TransformerURI = ""
}
r.addFinalizer(ps)
subscriptionID, err := r.reconcileSubscription(ctx, ps)
if err != nil {
ps.Status.MarkNoSubscription("SubscriptionReconcileFailed", "Failed to reconcile Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkSubscribed(subscriptionID)
err = r.reconcileDataPlaneResources(ctx, ps, r.ReconcileDataPlaneFn)
if err != nil {
ps.Status.MarkNotDeployed("DataPlaneReconcileFailed", "Failed to reconcile Data Plane resource(s): %s", err.Error())
return err
}
ps.Status.MarkDeployed()
return nil
}
func (r *Base) reconcileSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) (string, error) {
if ps.Status.ProjectID == "" {
projectID, err := utils.ProjectID(ps.Spec.Project)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to find project id", zap.Error(err))
return "", err
}
// Set the projectID in the status.
ps.Status.ProjectID = projectID
}
// Auth to GCP is handled by having the GOOGLE_APPLICATION_CREDENTIALS environment variable
// pointing at a credential file.
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return "", err
}
defer client.Close()
// Generate the subscription name
subID := resources.GenerateSubscriptionName(ps)
// Load the subscription.
sub := client.Subscription(subID)
subExists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return "", err
}
t := client.Topic(ps.Spec.Topic)
topicExists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return "", err
}
if !topicExists {
return "", fmt.Errorf("Topic %q does not exist", ps.Spec.Topic)
}
// subConfig is the wanted config based on settings.
subConfig := gpubsub.SubscriptionConfig{
Topic: t,
RetainAckedMessages: ps.Spec.RetainAckedMessages,
}
if ps.Spec.AckDeadline != nil {
ackDeadline, err := time.ParseDuration(*ps.Spec.AckDeadline)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid ackDeadline", zap.String("ackDeadline", *ps.Spec.AckDeadline))
return "", fmt.Errorf("invalid ackDeadline: %w", err)
}
subConfig.AckDeadline = ackDeadline
}
if ps.Spec.RetentionDuration != nil {
retentionDuration, err := time.ParseDuration(*ps.Spec.RetentionDuration)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid retentionDuration", zap.String("retentionDuration", *ps.Spec.RetentionDuration))
return "", fmt.Errorf("invalid retentionDuration: %w", err)
}
subConfig.RetentionDuration = retentionDuration
}
// If the subscription doesn't exist, create it.
if !subExists {
// Create a new subscription to the previous topic with the given name.
sub, err = client.CreateSubscription(ctx, subID, subConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create subscription", zap.Error(err))
return "", err
}
}
// TODO update the subscription's config if needed.
return subID, nil
}
// deleteSubscription looks at the status.SubscriptionID and if non-empty,
// hence indicating that we have created a subscription successfully
// in the PullSubscription, remove it.
func (r *Base) deleteSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) error {
if ps.Status.SubscriptionID == "" {
return nil
}
// At this point the project ID should have been populated in the status.
// Querying Pub/Sub as the subscription could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
// Load the subscription.
sub := client.Subscription(ps.Status.SubscriptionID)
exists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return err
}
if exists {
if err := sub.Delete(ctx); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to delete Pub/Sub subscription", zap.Error(err))
return err
}
}
return nil
}
func (r *Base) updateStatus(ctx context.Context, original *v1alpha1.PullSubscription, desired *v1alpha1.PullSubscription) error {
existing := original.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
existing, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).Get(desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
becomesReady := desired.Status.IsReady() && !existing.Status.IsReady()
existing.Status = desired.Status
_, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).UpdateStatus(existing)
if err == nil && becomesReady {
// TODO compute duration since last non-ready. See https://github.com/google/knative-gcp/issues/455.
duration := time.Since(existing.ObjectMeta.CreationTimestamp.Time)
logging.FromContext(ctx).Desugar().Info("PullSubscription became ready", zap.Any("after", duration))
r.Recorder.Event(existing, corev1.EventTypeNormal, "ReadinessChanged", fmt.Sprintf("PullSubscription %q became ready", existing.Name))
if metricErr := r.StatsReporter.ReportReady("PullSubscription", existing.Namespace, existing.Name, duration); metricErr != nil {
logging.FromContext(ctx).Desugar().Error("Failed to record ready for PullSubscription", zap.Error(metricErr))
}
}
return err
})
}
// updateFinalizers is a generic method for future compatibility with a
// reconciler SDK.
func (r *Base) updateFinalizers(ctx context.Context, desired *v1alpha1.PullSubscription) (*v1alpha1.PullSubscription, bool, error) {
source, err := r.PullSubscriptionLister.PullSubscriptions(desired.Namespace).Get(desired.Name)
if err != nil {
return nil, false, err
}
// Don't modify the informers copy.
existing := source.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.NewString(existing.Finalizers...)
desiredFinalizers := sets.NewString(desired.Finalizers...)
if desiredFinalizers.Has(r.FinalizerName) {
if existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.FinalizerName)
} else {
if !existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.FinalizerName)
finalizers = existingFinalizers.List()
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return desired, false, err
}
update, err := r.RunClientSet.PubsubV1alpha1().PullSubscriptions(existing.Namespace).Patch(existing.Name, types.MergePatchType, patch)
return update, true, err
}
func (r *Base) addFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Insert(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) removeFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Delete(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) reconcileDataPlaneResources(ctx context.Context, src *v1alpha1.PullSubscription, f ReconcileDataPlaneFunc) error {
loggingConfig, err := logging.LoggingConfigToJson(r.LoggingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing existing logging config", zap.Error(err))
}
if r.MetricsConfig != nil {
component := sourceComponent
// Set the metric component based on the channel label.
if _, ok := src.Labels["events.cloud.google.com/channel"]; ok {
component = channelComponent
}
r.MetricsConfig.Component = component
}
metricsConfig, err := metrics.MetricsOptionsToJson(r.MetricsConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing metrics config", zap.Error(err))
}
tracingConfig, err := tracing.ConfigToJSON(r.TracingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing tracing config", zap.Error(err))
}
desired := resources.MakeReceiveAdapter(ctx, &resources.ReceiveAdapterArgs{
Image: r.ReceiveAdapterImage,
Source: src,
Labels: resources.GetLabels(r.ControllerAgentName, src.Name),
SubscriptionID: src.Status.SubscriptionID,
SinkURI: src.Status.SinkURI,
TransformerURI: src.Status.TransformerURI,
LoggingConfig: loggingConfig,
MetricsConfig: metricsConfig,
TracingConfig: tracingConfig,
})
return f(ctx, desired, src)
}
func (r *Base) | (ctx context.Context, desired *appsv1.Deployment, src *v1alpha1.PullSubscription) (*appsv1.Deployment, error) {
existing, err := r.getReceiveAdapter(ctx, src)
if err != nil && !apierrors.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Unable to get an existing Receive Adapter", zap.Error(err))
return nil, err
}
if existing == nil {
existing, err = r.KubeClientSet.AppsV1().Deployments(src.Namespace).Create(desired)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error creating Receive Adapter", zap.Error(err))
return nil, err
}
}
return existing, nil
}
func (r *Base) getReceiveAdapter(ctx context.Context, src *v1alpha1.PullSubscription) (*appsv1.Deployment, error) {
dl, err := r.KubeClientSet.AppsV1().Deployments(src.Namespace).List(metav1.ListOptions{
LabelSelector: resources.GetLabelSelector(r.ControllerAgentName, src.Name).String(),
TypeMeta: metav1.TypeMeta{
APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "Deployment",
},
})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Unable to list deployments", zap.Error(err))
return nil, err
}
for _, dep := range dl.Items {
if metav1.IsControlledBy(&dep, src) {
return &dep, nil
}
}
return nil, apierrors.NewNotFound(schema.GroupResource{}, "")
}
func (r *Base) UpdateFromLoggingConfigMap(cfg *corev1.ConfigMap) {
if cfg != nil {
delete(cfg.Data, "_example")
}
logcfg, err := logging.NewConfigFromConfigMap(cfg)
if err != nil {
r.Logger.Warnw("Failed to create logging config from configmap", zap.String("cfg.Name", cfg.Name))
return
}
r.LoggingConfig = logcfg
r.Logger.Debugw("Update from logging ConfigMap", zap.Any("loggingCfg", cfg))
// TODO: requeue all PullSubscriptions. See https://github.com/google/knative-gcp/issues/457.
}
func (r *Base) UpdateFromMetricsConfigMap(cfg *corev1.ConfigMap) {
if cfg != nil {
delete(cfg.Data, "_example")
}
// Cannot set the component here as we don't know if its a source or a channel.
// Will set that up dynamically before creating the receive adapter.
// Won't be able to requeue the PullSubscriptions.
r.MetricsConfig = &metrics.ExporterOptions{
Domain: metrics.Domain(),
ConfigMap: cfg.Data,
}
r.Logger.Debugw("Update from metrics ConfigMap", zap.Any("metricsCfg", cfg))
}
func (r *Base) UpdateFromTracingConfigMap(cfg *corev1.ConfigMap) {
if cfg == nil {
r.Logger.Error("Tracing ConfigMap is nil")
return
}
delete(cfg.Data, "_example")
tracingCfg, err := tracingconfig.NewTracingConfigFromConfigMap(cfg)
if err != nil {
r.Logger.Warnw("Failed to create tracing config from configmap", zap.String("cfg.Name", cfg.Name))
return
}
r.TracingConfig = tracingCfg
r.Logger.Debugw("Updated Tracing config", zap.Any("tracingCfg", r.TracingConfig))
// TODO: requeue all PullSubscriptions. See https://github.com/google/knative-gcp/issues/457.
}
func (r *Base) resolveDestination(ctx context.Context, destination duckv1.Destination, ps *v1alpha1.PullSubscription) (string, error) {
// Setting up the namespace.
if destination.Ref != nil {
destination.Ref.Namespace = ps.Namespace
}
url, err := r.UriResolver.URIFromDestinationV1(destination, ps)
if err != nil {
return "", err
}
return url.String(), nil
}
| GetOrCreateReceiveAdapter | identifier_name |
reconciler.go | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullsubscription
import (
"context"
"encoding/json"
"fmt"
"time"
"k8s.io/client-go/tools/cache"
"go.uber.org/zap"
"github.com/google/knative-gcp/pkg/utils"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
appsv1listers "k8s.io/client-go/listers/apps/v1"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/reconciler"
tracingconfig "knative.dev/pkg/tracing/config"
"knative.dev/pkg/logging"
"knative.dev/pkg/metrics"
"knative.dev/pkg/resolver"
"github.com/google/knative-gcp/pkg/apis/pubsub/v1alpha1"
listers "github.com/google/knative-gcp/pkg/client/listers/pubsub/v1alpha1"
gpubsub "github.com/google/knative-gcp/pkg/gclient/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/pubsub/pullsubscription/resources"
"github.com/google/knative-gcp/pkg/tracing"
)
const (
// Component names for metrics.
sourceComponent = "source"
channelComponent = "channel"
)
// Base implements the core controller logic for pullsubscription.
type Base struct {
*pubsub.PubSubBase
// DeploymentLister index properties about deployments.
DeploymentLister appsv1listers.DeploymentLister
// PullSubscriptionLister index properties about pullsubscriptions.
PullSubscriptionLister listers.PullSubscriptionLister
UriResolver *resolver.URIResolver
ReceiveAdapterImage string
ControllerAgentName string
FinalizerName string
LoggingConfig *logging.Config
MetricsConfig *metrics.ExporterOptions
TracingConfig *tracingconfig.Config
// CreateClientFn is the function used to create the Pub/Sub client that interacts with Pub/Sub.
// This is needed so that we can inject a mock client for UTs purposes.
CreateClientFn gpubsub.CreateFn
// ReconcileDataPlaneFn is the function used to reconcile the data plane resources.
ReconcileDataPlaneFn ReconcileDataPlaneFunc
}
// ReconcileDataPlaneFunc is used to reconcile the data plane component(s).
type ReconcileDataPlaneFunc func(ctx context.Context, d *appsv1.Deployment, ps *v1alpha1.PullSubscription) error
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the PullSubscription resource
// with the current status of the resource.
func (r *Base) Reconcile(ctx context.Context, key string) error {
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid resource key")
return nil
}
// Get the PullSubscription resource with this namespace/name
original, err := r.PullSubscriptionLister.PullSubscriptions(namespace).Get(name)
if apierrs.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
logging.FromContext(ctx).Desugar().Error("PullSubscription in work queue no longer exists")
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy
ps := original.DeepCopy()
// Reconcile this copy of the PullSubscription and then write back any status
// updates regardless of whether the reconciliation errored out.
var reconcileErr = r.reconcile(ctx, ps)
// If no error is returned, mark the observed generation.
// This has to be done before updateStatus is called.
if reconcileErr == nil {
ps.Status.ObservedGeneration = ps.Generation
}
if equality.Semantic.DeepEqual(original.Finalizers, ps.Finalizers) {
// If we didn't change finalizers then don't call updateFinalizers.
} else if _, updated, fErr := r.updateFinalizers(ctx, ps); fErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update PullSubscription finalizers", zap.Error(fErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update finalizers for PullSubscription %q: %v", ps.Name, fErr)
return fErr
} else if updated {
// There was a difference and updateFinalizers said it updated and did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q finalizers", ps.Name)
}
if equality.Semantic.DeepEqual(original.Status, ps.Status) {
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the informer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
} else if uErr := r.updateStatus(ctx, original, ps); uErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update ps status", zap.Error(uErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for PullSubscription %q: %v", ps.Name, uErr)
return uErr
} else if reconcileErr == nil {
// There was a difference and updateStatus did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q", ps.Name)
}
if reconcileErr != nil {
r.Recorder.Event(ps, corev1.EventTypeWarning, "InternalError", reconcileErr.Error())
}
return reconcileErr
}
func (r *Base) reconcile(ctx context.Context, ps *v1alpha1.PullSubscription) error {
ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("pullsubscription", ps)))
ps.Status.InitializeConditions()
if ps.DeletionTimestamp != nil {
logging.FromContext(ctx).Desugar().Debug("Deleting Pub/Sub subscription")
if err := r.deleteSubscription(ctx, ps); err != nil {
ps.Status.MarkNoSubscription("SubscriptionDeleteFailed", "Failed to delete Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkNoSubscription("SubscriptionDeleted", "Successfully deleted Pub/Sub subscription %q", ps.Status.SubscriptionID)
ps.Status.SubscriptionID = ""
r.removeFinalizer(ps)
return nil
}
// Sink is required.
sinkURI, err := r.resolveDestination(ctx, ps.Spec.Sink, ps)
if err != nil {
ps.Status.MarkNoSink("InvalidSink", err.Error())
return err
} else {
ps.Status.MarkSink(sinkURI)
}
// Transformer is optional.
if ps.Spec.Transformer != nil | else {
// If the transformer is nil, mark is as nil and clean up the URI.
ps.Status.MarkNoTransformer("TransformerNil", "Transformer is nil")
ps.Status.TransformerURI = ""
}
r.addFinalizer(ps)
subscriptionID, err := r.reconcileSubscription(ctx, ps)
if err != nil {
ps.Status.MarkNoSubscription("SubscriptionReconcileFailed", "Failed to reconcile Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkSubscribed(subscriptionID)
err = r.reconcileDataPlaneResources(ctx, ps, r.ReconcileDataPlaneFn)
if err != nil {
ps.Status.MarkNotDeployed("DataPlaneReconcileFailed", "Failed to reconcile Data Plane resource(s): %s", err.Error())
return err
}
ps.Status.MarkDeployed()
return nil
}
func (r *Base) reconcileSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) (string, error) {
if ps.Status.ProjectID == "" {
projectID, err := utils.ProjectID(ps.Spec.Project)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to find project id", zap.Error(err))
return "", err
}
// Set the projectID in the status.
ps.Status.ProjectID = projectID
}
// Auth to GCP is handled by having the GOOGLE_APPLICATION_CREDENTIALS environment variable
// pointing at a credential file.
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return "", err
}
defer client.Close()
// Generate the subscription name
subID := resources.GenerateSubscriptionName(ps)
// Load the subscription.
sub := client.Subscription(subID)
subExists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return "", err
}
t := client.Topic(ps.Spec.Topic)
topicExists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return "", err
}
if !topicExists {
return "", fmt.Errorf("Topic %q does not exist", ps.Spec.Topic)
}
// subConfig is the wanted config based on settings.
subConfig := gpubsub.SubscriptionConfig{
Topic: t,
RetainAckedMessages: ps.Spec.RetainAckedMessages,
}
if ps.Spec.AckDeadline != nil {
ackDeadline, err := time.ParseDuration(*ps.Spec.AckDeadline)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid ackDeadline", zap.String("ackDeadline", *ps.Spec.AckDeadline))
return "", fmt.Errorf("invalid ackDeadline: %w", err)
}
subConfig.AckDeadline = ackDeadline
}
if ps.Spec.RetentionDuration != nil {
retentionDuration, err := time.ParseDuration(*ps.Spec.RetentionDuration)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid retentionDuration", zap.String("retentionDuration", *ps.Spec.RetentionDuration))
return "", fmt.Errorf("invalid retentionDuration: %w", err)
}
subConfig.RetentionDuration = retentionDuration
}
// If the subscription doesn't exist, create it.
if !subExists {
// Create a new subscription to the previous topic with the given name.
sub, err = client.CreateSubscription(ctx, subID, subConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create subscription", zap.Error(err))
return "", err
}
}
// TODO update the subscription's config if needed.
return subID, nil
}
// deleteSubscription looks at the status.SubscriptionID and if non-empty,
// hence indicating that we have created a subscription successfully
// in the PullSubscription, remove it.
func (r *Base) deleteSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) error {
if ps.Status.SubscriptionID == "" {
return nil
}
// At this point the project ID should have been populated in the status.
// Querying Pub/Sub as the subscription could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
// Load the subscription.
sub := client.Subscription(ps.Status.SubscriptionID)
exists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return err
}
if exists {
if err := sub.Delete(ctx); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to delete Pub/Sub subscription", zap.Error(err))
return err
}
}
return nil
}
func (r *Base) updateStatus(ctx context.Context, original *v1alpha1.PullSubscription, desired *v1alpha1.PullSubscription) error {
existing := original.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
existing, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).Get(desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
becomesReady := desired.Status.IsReady() && !existing.Status.IsReady()
existing.Status = desired.Status
_, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).UpdateStatus(existing)
if err == nil && becomesReady {
// TODO compute duration since last non-ready. See https://github.com/google/knative-gcp/issues/455.
duration := time.Since(existing.ObjectMeta.CreationTimestamp.Time)
logging.FromContext(ctx).Desugar().Info("PullSubscription became ready", zap.Any("after", duration))
r.Recorder.Event(existing, corev1.EventTypeNormal, "ReadinessChanged", fmt.Sprintf("PullSubscription %q became ready", existing.Name))
if metricErr := r.StatsReporter.ReportReady("PullSubscription", existing.Namespace, existing.Name, duration); metricErr != nil {
logging.FromContext(ctx).Desugar().Error("Failed to record ready for PullSubscription", zap.Error(metricErr))
}
}
return err
})
}
// updateFinalizers is a generic method for future compatibility with a
// reconciler SDK.
func (r *Base) updateFinalizers(ctx context.Context, desired *v1alpha1.PullSubscription) (*v1alpha1.PullSubscription, bool, error) {
source, err := r.PullSubscriptionLister.PullSubscriptions(desired.Namespace).Get(desired.Name)
if err != nil {
return nil, false, err
}
// Don't modify the informers copy.
existing := source.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.NewString(existing.Finalizers...)
desiredFinalizers := sets.NewString(desired.Finalizers...)
if desiredFinalizers.Has(r.FinalizerName) {
if existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.FinalizerName)
} else {
if !existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.FinalizerName)
finalizers = existingFinalizers.List()
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return desired, false, err
}
update, err := r.RunClientSet.PubsubV1alpha1().PullSubscriptions(existing.Namespace).Patch(existing.Name, types.MergePatchType, patch)
return update, true, err
}
func (r *Base) addFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Insert(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) removeFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Delete(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) reconcileDataPlaneResources(ctx context.Context, src *v1alpha1.PullSubscription, f ReconcileDataPlaneFunc) error {
loggingConfig, err := logging.LoggingConfigToJson(r.LoggingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing existing logging config", zap.Error(err))
}
if r.MetricsConfig != nil {
component := sourceComponent
// Set the metric component based on the channel label.
if _, ok := src.Labels["events.cloud.google.com/channel"]; ok {
component = channelComponent
}
r.MetricsConfig.Component = component
}
metricsConfig, err := metrics.MetricsOptionsToJson(r.MetricsConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing metrics config", zap.Error(err))
}
tracingConfig, err := tracing.ConfigToJSON(r.TracingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing tracing config", zap.Error(err))
}
desired := resources.MakeReceiveAdapter(ctx, &resources.ReceiveAdapterArgs{
Image: r.ReceiveAdapterImage,
Source: src,
Labels: resources.GetLabels(r.ControllerAgentName, src.Name),
SubscriptionID: src.Status.SubscriptionID,
SinkURI: src.Status.SinkURI,
TransformerURI: src.Status.TransformerURI,
LoggingConfig: loggingConfig,
MetricsConfig: metricsConfig,
TracingConfig: tracingConfig,
})
return f(ctx, desired, src)
}
func (r *Base) GetOrCreateReceiveAdapter(ctx context.Context, desired *appsv1.Deployment, src *v1alpha1.PullSubscription) (*appsv1.Deployment, error) {
existing, err := r.getReceiveAdapter(ctx, src)
if err != nil && !apierrors.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Unable to get an existing Receive Adapter", zap.Error(err))
return nil, err
}
if existing == nil {
existing, err = r.KubeClientSet.AppsV1().Deployments(src.Namespace).Create(desired)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error creating Receive Adapter", zap.Error(err))
return nil, err
}
}
return existing, nil
}
func (r *Base) getReceiveAdapter(ctx context.Context, src *v1alpha1.PullSubscription) (*appsv1.Deployment, error) {
dl, err := r.KubeClientSet.AppsV1().Deployments(src.Namespace).List(metav1.ListOptions{
LabelSelector: resources.GetLabelSelector(r.ControllerAgentName, src.Name).String(),
TypeMeta: metav1.TypeMeta{
APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "Deployment",
},
})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Unable to list deployments", zap.Error(err))
return nil, err
}
for _, dep := range dl.Items {
if metav1.IsControlledBy(&dep, src) {
return &dep, nil
}
}
return nil, apierrors.NewNotFound(schema.GroupResource{}, "")
}
func (r *Base) UpdateFromLoggingConfigMap(cfg *corev1.ConfigMap) {
if cfg != nil {
delete(cfg.Data, "_example")
}
logcfg, err := logging.NewConfigFromConfigMap(cfg)
if err != nil {
r.Logger.Warnw("Failed to create logging config from configmap", zap.String("cfg.Name", cfg.Name))
return
}
r.LoggingConfig = logcfg
r.Logger.Debugw("Update from logging ConfigMap", zap.Any("loggingCfg", cfg))
// TODO: requeue all PullSubscriptions. See https://github.com/google/knative-gcp/issues/457.
}
func (r *Base) UpdateFromMetricsConfigMap(cfg *corev1.ConfigMap) {
if cfg != nil {
delete(cfg.Data, "_example")
}
// Cannot set the component here as we don't know if its a source or a channel.
// Will set that up dynamically before creating the receive adapter.
// Won't be able to requeue the PullSubscriptions.
r.MetricsConfig = &metrics.ExporterOptions{
Domain: metrics.Domain(),
ConfigMap: cfg.Data,
}
r.Logger.Debugw("Update from metrics ConfigMap", zap.Any("metricsCfg", cfg))
}
func (r *Base) UpdateFromTracingConfigMap(cfg *corev1.ConfigMap) {
if cfg == nil {
r.Logger.Error("Tracing ConfigMap is nil")
return
}
delete(cfg.Data, "_example")
tracingCfg, err := tracingconfig.NewTracingConfigFromConfigMap(cfg)
if err != nil {
r.Logger.Warnw("Failed to create tracing config from configmap", zap.String("cfg.Name", cfg.Name))
return
}
r.TracingConfig = tracingCfg
r.Logger.Debugw("Updated Tracing config", zap.Any("tracingCfg", r.TracingConfig))
// TODO: requeue all PullSubscriptions. See https://github.com/google/knative-gcp/issues/457.
}
func (r *Base) resolveDestination(ctx context.Context, destination duckv1.Destination, ps *v1alpha1.PullSubscription) (string, error) {
// Setting up the namespace.
if destination.Ref != nil {
destination.Ref.Namespace = ps.Namespace
}
url, err := r.UriResolver.URIFromDestinationV1(destination, ps)
if err != nil {
return "", err
}
return url.String(), nil
}
| {
transformerURI, err := r.resolveDestination(ctx, *ps.Spec.Transformer, ps)
if err != nil {
ps.Status.MarkNoTransformer("InvalidTransformer", err.Error())
} else {
ps.Status.MarkTransformer(transformerURI)
}
} | conditional_block |
reconciler.go | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullsubscription
import (
"context"
"encoding/json"
"fmt"
"time"
"k8s.io/client-go/tools/cache"
"go.uber.org/zap"
"github.com/google/knative-gcp/pkg/utils"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
appsv1listers "k8s.io/client-go/listers/apps/v1"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/reconciler"
tracingconfig "knative.dev/pkg/tracing/config"
"knative.dev/pkg/logging"
"knative.dev/pkg/metrics"
"knative.dev/pkg/resolver"
"github.com/google/knative-gcp/pkg/apis/pubsub/v1alpha1"
listers "github.com/google/knative-gcp/pkg/client/listers/pubsub/v1alpha1"
gpubsub "github.com/google/knative-gcp/pkg/gclient/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/pubsub"
"github.com/google/knative-gcp/pkg/reconciler/pubsub/pullsubscription/resources"
"github.com/google/knative-gcp/pkg/tracing"
)
const (
// Component names for metrics.
sourceComponent = "source"
channelComponent = "channel"
)
// Base implements the core controller logic for pullsubscription.
type Base struct {
*pubsub.PubSubBase
// DeploymentLister index properties about deployments.
DeploymentLister appsv1listers.DeploymentLister
// PullSubscriptionLister index properties about pullsubscriptions.
PullSubscriptionLister listers.PullSubscriptionLister
UriResolver *resolver.URIResolver
ReceiveAdapterImage string
ControllerAgentName string
FinalizerName string
LoggingConfig *logging.Config
MetricsConfig *metrics.ExporterOptions
TracingConfig *tracingconfig.Config
// CreateClientFn is the function used to create the Pub/Sub client that interacts with Pub/Sub.
// This is needed so that we can inject a mock client for UTs purposes.
CreateClientFn gpubsub.CreateFn
// ReconcileDataPlaneFn is the function used to reconcile the data plane resources.
ReconcileDataPlaneFn ReconcileDataPlaneFunc
}
// ReconcileDataPlaneFunc is used to reconcile the data plane component(s).
type ReconcileDataPlaneFunc func(ctx context.Context, d *appsv1.Deployment, ps *v1alpha1.PullSubscription) error
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the PullSubscription resource
// with the current status of the resource.
func (r *Base) Reconcile(ctx context.Context, key string) error {
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid resource key")
return nil
}
// Get the PullSubscription resource with this namespace/name
original, err := r.PullSubscriptionLister.PullSubscriptions(namespace).Get(name)
if apierrs.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
logging.FromContext(ctx).Desugar().Error("PullSubscription in work queue no longer exists")
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy
ps := original.DeepCopy()
// Reconcile this copy of the PullSubscription and then write back any status
// updates regardless of whether the reconciliation errored out.
var reconcileErr = r.reconcile(ctx, ps)
// If no error is returned, mark the observed generation.
// This has to be done before updateStatus is called.
if reconcileErr == nil {
ps.Status.ObservedGeneration = ps.Generation
}
if equality.Semantic.DeepEqual(original.Finalizers, ps.Finalizers) {
// If we didn't change finalizers then don't call updateFinalizers.
} else if _, updated, fErr := r.updateFinalizers(ctx, ps); fErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update PullSubscription finalizers", zap.Error(fErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update finalizers for PullSubscription %q: %v", ps.Name, fErr)
return fErr
} else if updated {
// There was a difference and updateFinalizers said it updated and did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q finalizers", ps.Name)
}
if equality.Semantic.DeepEqual(original.Status, ps.Status) {
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the informer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
} else if uErr := r.updateStatus(ctx, original, ps); uErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update ps status", zap.Error(uErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for PullSubscription %q: %v", ps.Name, uErr)
return uErr
} else if reconcileErr == nil {
// There was a difference and updateStatus did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q", ps.Name)
}
if reconcileErr != nil {
r.Recorder.Event(ps, corev1.EventTypeWarning, "InternalError", reconcileErr.Error())
}
return reconcileErr
}
func (r *Base) reconcile(ctx context.Context, ps *v1alpha1.PullSubscription) error {
ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("pullsubscription", ps)))
ps.Status.InitializeConditions()
if ps.DeletionTimestamp != nil {
logging.FromContext(ctx).Desugar().Debug("Deleting Pub/Sub subscription")
if err := r.deleteSubscription(ctx, ps); err != nil {
ps.Status.MarkNoSubscription("SubscriptionDeleteFailed", "Failed to delete Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkNoSubscription("SubscriptionDeleted", "Successfully deleted Pub/Sub subscription %q", ps.Status.SubscriptionID)
ps.Status.SubscriptionID = ""
r.removeFinalizer(ps)
return nil
}
// Sink is required.
sinkURI, err := r.resolveDestination(ctx, ps.Spec.Sink, ps)
if err != nil {
ps.Status.MarkNoSink("InvalidSink", err.Error())
return err
} else {
ps.Status.MarkSink(sinkURI)
}
// Transformer is optional.
if ps.Spec.Transformer != nil {
transformerURI, err := r.resolveDestination(ctx, *ps.Spec.Transformer, ps)
if err != nil {
ps.Status.MarkNoTransformer("InvalidTransformer", err.Error())
} else {
ps.Status.MarkTransformer(transformerURI)
}
} else {
// If the transformer is nil, mark is as nil and clean up the URI.
ps.Status.MarkNoTransformer("TransformerNil", "Transformer is nil")
ps.Status.TransformerURI = ""
}
r.addFinalizer(ps)
subscriptionID, err := r.reconcileSubscription(ctx, ps)
if err != nil {
ps.Status.MarkNoSubscription("SubscriptionReconcileFailed", "Failed to reconcile Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkSubscribed(subscriptionID)
err = r.reconcileDataPlaneResources(ctx, ps, r.ReconcileDataPlaneFn)
if err != nil {
ps.Status.MarkNotDeployed("DataPlaneReconcileFailed", "Failed to reconcile Data Plane resource(s): %s", err.Error())
return err
}
ps.Status.MarkDeployed()
return nil
}
func (r *Base) reconcileSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) (string, error) {
if ps.Status.ProjectID == "" {
projectID, err := utils.ProjectID(ps.Spec.Project)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to find project id", zap.Error(err))
return "", err
}
// Set the projectID in the status.
ps.Status.ProjectID = projectID
}
// Auth to GCP is handled by having the GOOGLE_APPLICATION_CREDENTIALS environment variable
// pointing at a credential file.
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return "", err
}
defer client.Close()
// Generate the subscription name
subID := resources.GenerateSubscriptionName(ps)
// Load the subscription.
sub := client.Subscription(subID)
subExists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return "", err
}
t := client.Topic(ps.Spec.Topic)
topicExists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return "", err
}
if !topicExists {
return "", fmt.Errorf("Topic %q does not exist", ps.Spec.Topic)
}
// subConfig is the wanted config based on settings.
subConfig := gpubsub.SubscriptionConfig{
Topic: t,
RetainAckedMessages: ps.Spec.RetainAckedMessages,
}
if ps.Spec.AckDeadline != nil {
ackDeadline, err := time.ParseDuration(*ps.Spec.AckDeadline)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid ackDeadline", zap.String("ackDeadline", *ps.Spec.AckDeadline))
return "", fmt.Errorf("invalid ackDeadline: %w", err)
}
subConfig.AckDeadline = ackDeadline
}
if ps.Spec.RetentionDuration != nil {
retentionDuration, err := time.ParseDuration(*ps.Spec.RetentionDuration)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid retentionDuration", zap.String("retentionDuration", *ps.Spec.RetentionDuration))
return "", fmt.Errorf("invalid retentionDuration: %w", err)
}
subConfig.RetentionDuration = retentionDuration
}
// If the subscription doesn't exist, create it.
if !subExists {
// Create a new subscription to the previous topic with the given name.
sub, err = client.CreateSubscription(ctx, subID, subConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create subscription", zap.Error(err))
return "", err
}
}
// TODO update the subscription's config if needed.
return subID, nil
}
// deleteSubscription looks at the status.SubscriptionID and if non-empty,
// hence indicating that we have created a subscription successfully
// in the PullSubscription, remove it.
func (r *Base) deleteSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) error {
if ps.Status.SubscriptionID == "" {
return nil
}
// At this point the project ID should have been populated in the status.
// Querying Pub/Sub as the subscription could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
// Load the subscription.
sub := client.Subscription(ps.Status.SubscriptionID)
exists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return err
}
if exists {
if err := sub.Delete(ctx); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to delete Pub/Sub subscription", zap.Error(err))
return err
}
}
return nil
}
func (r *Base) updateStatus(ctx context.Context, original *v1alpha1.PullSubscription, desired *v1alpha1.PullSubscription) error {
existing := original.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
existing, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).Get(desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
becomesReady := desired.Status.IsReady() && !existing.Status.IsReady()
existing.Status = desired.Status
_, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).UpdateStatus(existing)
if err == nil && becomesReady {
// TODO compute duration since last non-ready. See https://github.com/google/knative-gcp/issues/455.
duration := time.Since(existing.ObjectMeta.CreationTimestamp.Time)
logging.FromContext(ctx).Desugar().Info("PullSubscription became ready", zap.Any("after", duration))
r.Recorder.Event(existing, corev1.EventTypeNormal, "ReadinessChanged", fmt.Sprintf("PullSubscription %q became ready", existing.Name))
if metricErr := r.StatsReporter.ReportReady("PullSubscription", existing.Namespace, existing.Name, duration); metricErr != nil {
logging.FromContext(ctx).Desugar().Error("Failed to record ready for PullSubscription", zap.Error(metricErr))
} | return err
})
}
// updateFinalizers is a generic method for future compatibility with a
// reconciler SDK.
func (r *Base) updateFinalizers(ctx context.Context, desired *v1alpha1.PullSubscription) (*v1alpha1.PullSubscription, bool, error) {
source, err := r.PullSubscriptionLister.PullSubscriptions(desired.Namespace).Get(desired.Name)
if err != nil {
return nil, false, err
}
// Don't modify the informers copy.
existing := source.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.NewString(existing.Finalizers...)
desiredFinalizers := sets.NewString(desired.Finalizers...)
if desiredFinalizers.Has(r.FinalizerName) {
if existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.FinalizerName)
} else {
if !existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.FinalizerName)
finalizers = existingFinalizers.List()
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return desired, false, err
}
update, err := r.RunClientSet.PubsubV1alpha1().PullSubscriptions(existing.Namespace).Patch(existing.Name, types.MergePatchType, patch)
return update, true, err
}
func (r *Base) addFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Insert(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) removeFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Delete(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) reconcileDataPlaneResources(ctx context.Context, src *v1alpha1.PullSubscription, f ReconcileDataPlaneFunc) error {
loggingConfig, err := logging.LoggingConfigToJson(r.LoggingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing existing logging config", zap.Error(err))
}
if r.MetricsConfig != nil {
component := sourceComponent
// Set the metric component based on the channel label.
if _, ok := src.Labels["events.cloud.google.com/channel"]; ok {
component = channelComponent
}
r.MetricsConfig.Component = component
}
metricsConfig, err := metrics.MetricsOptionsToJson(r.MetricsConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing metrics config", zap.Error(err))
}
tracingConfig, err := tracing.ConfigToJSON(r.TracingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing tracing config", zap.Error(err))
}
desired := resources.MakeReceiveAdapter(ctx, &resources.ReceiveAdapterArgs{
Image: r.ReceiveAdapterImage,
Source: src,
Labels: resources.GetLabels(r.ControllerAgentName, src.Name),
SubscriptionID: src.Status.SubscriptionID,
SinkURI: src.Status.SinkURI,
TransformerURI: src.Status.TransformerURI,
LoggingConfig: loggingConfig,
MetricsConfig: metricsConfig,
TracingConfig: tracingConfig,
})
return f(ctx, desired, src)
}
func (r *Base) GetOrCreateReceiveAdapter(ctx context.Context, desired *appsv1.Deployment, src *v1alpha1.PullSubscription) (*appsv1.Deployment, error) {
existing, err := r.getReceiveAdapter(ctx, src)
if err != nil && !apierrors.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Unable to get an existing Receive Adapter", zap.Error(err))
return nil, err
}
if existing == nil {
existing, err = r.KubeClientSet.AppsV1().Deployments(src.Namespace).Create(desired)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error creating Receive Adapter", zap.Error(err))
return nil, err
}
}
return existing, nil
}
func (r *Base) getReceiveAdapter(ctx context.Context, src *v1alpha1.PullSubscription) (*appsv1.Deployment, error) {
dl, err := r.KubeClientSet.AppsV1().Deployments(src.Namespace).List(metav1.ListOptions{
LabelSelector: resources.GetLabelSelector(r.ControllerAgentName, src.Name).String(),
TypeMeta: metav1.TypeMeta{
APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "Deployment",
},
})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Unable to list deployments", zap.Error(err))
return nil, err
}
for _, dep := range dl.Items {
if metav1.IsControlledBy(&dep, src) {
return &dep, nil
}
}
return nil, apierrors.NewNotFound(schema.GroupResource{}, "")
}
func (r *Base) UpdateFromLoggingConfigMap(cfg *corev1.ConfigMap) {
if cfg != nil {
delete(cfg.Data, "_example")
}
logcfg, err := logging.NewConfigFromConfigMap(cfg)
if err != nil {
r.Logger.Warnw("Failed to create logging config from configmap", zap.String("cfg.Name", cfg.Name))
return
}
r.LoggingConfig = logcfg
r.Logger.Debugw("Update from logging ConfigMap", zap.Any("loggingCfg", cfg))
// TODO: requeue all PullSubscriptions. See https://github.com/google/knative-gcp/issues/457.
}
func (r *Base) UpdateFromMetricsConfigMap(cfg *corev1.ConfigMap) {
if cfg != nil {
delete(cfg.Data, "_example")
}
// Cannot set the component here as we don't know if its a source or a channel.
// Will set that up dynamically before creating the receive adapter.
// Won't be able to requeue the PullSubscriptions.
r.MetricsConfig = &metrics.ExporterOptions{
Domain: metrics.Domain(),
ConfigMap: cfg.Data,
}
r.Logger.Debugw("Update from metrics ConfigMap", zap.Any("metricsCfg", cfg))
}
func (r *Base) UpdateFromTracingConfigMap(cfg *corev1.ConfigMap) {
if cfg == nil {
r.Logger.Error("Tracing ConfigMap is nil")
return
}
delete(cfg.Data, "_example")
tracingCfg, err := tracingconfig.NewTracingConfigFromConfigMap(cfg)
if err != nil {
r.Logger.Warnw("Failed to create tracing config from configmap", zap.String("cfg.Name", cfg.Name))
return
}
r.TracingConfig = tracingCfg
r.Logger.Debugw("Updated Tracing config", zap.Any("tracingCfg", r.TracingConfig))
// TODO: requeue all PullSubscriptions. See https://github.com/google/knative-gcp/issues/457.
}
func (r *Base) resolveDestination(ctx context.Context, destination duckv1.Destination, ps *v1alpha1.PullSubscription) (string, error) {
// Setting up the namespace.
if destination.Ref != nil {
destination.Ref.Namespace = ps.Namespace
}
url, err := r.UriResolver.URIFromDestinationV1(destination, ps)
if err != nil {
return "", err
}
return url.String(), nil
} | }
| random_line_split |
utils.py |
from quantum_routines import (generate_empty_initial_state,
generate_mixing_Ham, generate_Ham_from_graph)
from qutip import sesolve, sigmaz, sigmap, qeye, tensor, Options
import settings
import numpy as np
import math
import numba
from tqdm.auto import tqdm, trange
from operator import itemgetter
settings.init()
def generate_signal_fourier(G, rot_init=settings.rot_init,
N_sample=1000, hamiltonian='xy',
tf=100*math.pi):
"""
Function to return the Fourier transform of the average number of
excitation signal
Arguments:
---------
- G: networx.Graph, graph to analyze
- rot_init: float, initial rotation
- N_sample: int, number of timesteps to compute the evolution
- hamiltonian: str 'xy' or 'ising', type of hamiltonian to simulate
- tf: float, total time of evolution
Returns:
--------
- plap_fft: numpy.Ndarray, shape (N_sample,) values of the fourier spectra
- freq_normalized: numpy.Ndarray, shape (N_sample,) values of the
fequencies
"""
assert hamiltonian in ['ising', 'xy']
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=hamiltonian)
rotation_angle_single_exc = rot_init/2.
tlist = np.linspace(0, rotation_angle_single_exc, 200)
psi_0 = generate_empty_initial_state(N_nodes)
H_m = generate_mixing_Ham(N_nodes)
result = sesolve(H_m, psi_0, tlist)
final_state = result.states[-1]
sz = sigmaz()
si = qeye(2)
sp = sigmap()
sz_list = []
sp_list = []
for j in range(N_nodes):
op_list = [si for _ in range(N_nodes)]
op_list[j] = sz
sz_list.append(tensor(op_list))
op_list[j] = sp
sp_list.append(tensor(op_list))
tlist = np.linspace(0, tf, N_sample)
observable = (-2*math.sin(2*rotation_angle_single_exc)
* sum(spj for spj in sp_list)
+ math.cos(2*rotation_angle_single_exc)
* sum(szj for szj in sz_list))
opts = Options()
opts.store_states = True
result = sesolve(H_evol, final_state, tlist,
e_ops=[observable], options=opts)
full_signal = result.expect
signal = full_signal[0].real
signal_fft = np.fft.fft(signal)
freq = np.fft.fftfreq(signal.shape[-1])
freq_normalized = np.abs(freq * N_sample * 2) / (tf / np.pi)
return signal_fft, freq_normalized
@numba.njit
def entropy(p):
|
@numba.njit
def jensen_shannon(hist1, hist2):
'''
Returns the Jensen Shannon divergence between two probabilities
distribution represented as histograms.
Arguments:
---------
- hist1: tuple of numpy.ndarray (density, bins),
len(bins) = len(density) + 1.
The integral of the density wrt bins sums to 1.
- hist2: same format.
Returns:
--------
- float, value of the Jensen Shannon divergence.
'''
bins = np.sort(np.unique(np.array(list(hist1[1]) + list(hist2[1]))))
masses1 = []
masses2 = []
for i, b in enumerate(bins[1::]):
if b <= hist1[1][0]:
masses1.append(0.)
elif b > hist1[1][-1]:
masses1.append(0.)
else:
j = 0
while b > hist1[1][j]:
j += 1
masses1.append((b-bins[i]) * hist1[0][j-1])
if b <= hist2[1][0]:
masses2.append(0.)
elif b > hist2[1][-1]:
masses2.append(0.)
else:
j = 0
while b > hist2[1][j]:
j += 1
masses2.append((b-bins[i]) * hist2[0][j-1])
masses1 = np.array(masses1)
masses2 = np.array(masses2)
masses12 = (masses1+masses2)/2
return entropy(masses12) - (entropy(masses1) + entropy(masses2))/2
# @ray.remote
def return_fourier_from_dataset(graph_list, rot_init=settings.rot_init):
"""
Returns the fourier transform of evolution for a list of graphs for
the hamiltonian ising and xy.
Arguments:
---------
- graph_list: list or numpy.Ndarray of networkx.Graph objects
Returns:
--------
- fs_xy: numpy.Ndarray of shape (2, len(graph_list), 1000)
[0,i]: Fourier signal of graph i at 1000 points for
hamiltonian XY
[1,i]: frequencies associated to graph i at 1000 points
for hamiltonian XY
- fs_is: same for the Ising hamiltonian
"""
fs_xy = np.zeros((2, len(graph_list), 1000))
fs_is = np.zeros((2, len(graph_list), 1000))
for i, graph in enumerate(graph_list):
fs_xy[0][i], fs_xy[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='xy')
fs_is[0][i], fs_is[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='ising')
return fs_xy, fs_is
def return_evolution(G, times, pulses, evol='xy'):
"""
Returns the final state after the following evolution:
- start with empty sate with as many qubits as vertices of G
- uniform superposition of all states
- alternating evolution of H_evol during times, and H_m during pulses
Arguments:
---------
- G: graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
Returns:
--------
- state: qutip.Qobj final state of evolution
"""
assert evol in ['xy', 'ising']
assert len(times) == len(pulses)
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=evol)
H_m = generate_mixing_Ham(N_nodes)
state = generate_empty_initial_state(N_nodes)
opts = Options()
opts.store_states = True
result = sesolve(H_m, state, [0, np.pi/4], options=opts)
state = result.states[-1]
for i, theta in enumerate(pulses):
if np.abs(times[i]) > 0:
if evol == 'xy':
result = sesolve(H_evol, state, [0, times[i]], options=opts)
state = result.states[-1]
else:
hexp = (- times[i] * 1j * H_evol).expm()
state = hexp * state
if np.abs(theta) > 0:
result = sesolve(H_m, state, [0, theta], options=opts)
state = result.states[-1]
return state
def return_list_of_states(graphs_list,
times, pulses, evol='xy', verbose=0):
"""
Returns the list of states after evolution for each graph following
return_evolution functions.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
- verbose: int, display the progression every verbose steps
Returns:
--------
- all_states: list of qutip.Qobj final states of evolution,
same lenght as graphs_list
"""
all_states = []
for G in tqdm(graphs_list, disable=verbose==0):
all_states.append(return_evolution(G, times, pulses, evol))
return all_states
def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):
"""
Returns all the discrete probability distributions of a diagonal
observable on a list of states each one associated with a graph. The
observable can be different for each state. The distribution is taken of
all possible values of all observables.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- all_states: list of qutip.Qobj states associated with graphs_list
- observable_func: function(networkx.Graph):
return qtip.Qobj diagonal observable
- return_energies: boolean
Returns:
--------
- all_e_masses: numpy.ndarray of shape (len(graphs_list), N_dim)
all discrete probability distributions
- e_values_unique: numpy.ndarray of shape (N_dim, )
if return_energies, all energies
"""
all_e_distrib = []
all_e_values_unique = []
for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):
if observable_func == None:
observable = generate_Ham_from_graph(
G, type_h='ising', type_ising='z'
)
else:
observable = observable_func(G)
e_values = observable.data.diagonal().real
e_values_unique = np.unique(e_values)
state = all_states[i]
e_distrib = np.zeros(len(e_values_unique))
for j, v in enumerate(e_values_unique):
e_distrib[j] = np.sum(
(np.abs(state.data.toarray()) ** 2)[e_values == v]
)
all_e_distrib.append(e_distrib)
all_e_values_unique.append(e_values_unique)
e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))
all_e_masses = []
for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):
masses = np.zeros_like(e_values_unique)
for d, e in zip(e_distrib, e_values):
masses[e_values_unique == e] = d
all_e_masses.append(masses)
all_e_masses = np.array(all_e_masses)
if return_energies:
return all_e_masses, e_values_unique
return all_e_masses
def extend_energies(target_energies, energies, masses):
"""
Extends masses array with columns of zeros for missing energies.
Arguments:
---------
- target_energies: numpy.ndarray of shape (N_dim, ) target energies
- energies: numpy.ndarray of shape (N_dim_init, ) energies of distributions
- masses: numpy.ndarray of shape (N, N_dim_init) discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N, N_dim)
all extended discrete probability distributions
"""
energies = list(energies)
N = masses.shape[0]
res = np.zeros((N, len(target_energies)))
for i, energy in enumerate(target_energies):
if energy not in energies:
res[:, i] = np.zeros((N, ))
else:
res[:, i] = masses[:, energies.index(energy)]
return res
def merge_energies(e1, m1, e2, m2):
"""
Merge the arrays of energy masses, filling with zeros the missing energies in each.
N_dim is the size of the union of the energies from the two distributions.
Arguments:
---------
- e1: numpy.ndarray of shape (N_dim1, ) energies of first distributions
- m1: numpy.ndarray of shape (N1, N_dim1) first discrete probability distributions
- e2: numpy.ndarray of shape (N_dim2, ) energies of first distributions
- m2: numpy.ndarray of shape (N2, N_dim2) first discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N1, N_dim)
all extended first discrete probability distributions
- numpy.ndarray of shape (N2, N_dim)
all extended second discrete probability distributions
"""
e = sorted(list(set(e1) | set(e2)))
return extend_energies(e, e1, m1), extend_energies(e, e2, m2)
def return_js_square_matrix(distributions, verbose=0):
"""
Returns the Jensen-Shannon distance matrix of discrete
distributions.
Arguments:
---------
- distributions: numpy.ndarray of shape (N_sample, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
js_matrix = np.zeros((len(distributions), len(distributions)))
for i in range(len(distributions)):
for j in range(i + 1):
masses1 = distributions[i]
masses2 = distributions[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
js_matrix[j, i] = js
return js_matrix
def return_js_matrix(distributions1, distributions2, verbose=0):
"""
Returns the Jensen-Shannon distance matrix between discrete
distributions.
Arguments:
---------
- distributions1: numpy.ndarray of shape (N_samples_1, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
- distributions2: numpy.ndarray of shape (N_samples_2, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
assert distributions1.shape[1] == distributions2.shape[1], \
"Distributions must have matching dimensions. Consider using merge_energies"
js_matrix = np.zeros((len(distributions1), len(distributions2)))
for i in trange(len(distributions1), desc='dist1 loop', disable=verbose<=0):
for j in trange(len(distributions2), desc='dist2 loop', disable=verbose<=1):
masses1 = distributions1[i]
masses2 = distributions2[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
return js_matrix
class Memoizer:
"""
Will store results of the provided observable on graphs to avoid recomputing.
Storage is based on a key computed using get_key
Attributes:
-----------
- observable: function(networkx.Graph):
return qtip.Qobj diagonal observable
- get_key: function(networkx.Graph):
return a key used to identify the graph
"""
def __init__(self, observable, get_key=None):
self.graphs = {}
self.observable = observable
self.get_key = get_key if get_key is not None else Memoizer.edges_key
@staticmethod
def edges_unique_key(graph):
"""
Key insensitive to how edges of the graph are returned
(order of edges and order of nodes in edges).
Same result for [(a, b), (c, d)] and [(d, c), (a, b)]
"""
edges = list(map(sorted, graph.edges))
return tuple(map(tuple, sorted(edges, key=itemgetter(0,1))))
@staticmethod
def edges_key(graph):
""" Simple key based on the edges list """
return tuple(graph.edges())
def get_observable(self, graph):
"""
Gets observable on graph
Uses memoization to speed up the process if graph has been seen before
Arguments:
---------
- graph: networkx.Graph to get observable on
Returns:
--------
- qtip.Qobj, diagonal observable
"""
key = self.get_key(graph)
if key not in self.graphs:
self.graphs[key] = self.observable(graph)
return self.graphs[key]
| """
Returns the entropy of a discrete distribution p
Arguments:
---------
- p: numpy.Ndarray dimension 1 non-negative floats summing to 1
Returns:
--------
- float, value of the entropy
"""
assert (p >= 0).all()
assert abs(np.sum(p)-1) < 1e-6
return -np.sum(p*np.log(p+1e-12)) | identifier_body |
utils.py |
from quantum_routines import (generate_empty_initial_state,
generate_mixing_Ham, generate_Ham_from_graph)
from qutip import sesolve, sigmaz, sigmap, qeye, tensor, Options
import settings
import numpy as np
import math
import numba
from tqdm.auto import tqdm, trange
from operator import itemgetter
settings.init()
def generate_signal_fourier(G, rot_init=settings.rot_init,
N_sample=1000, hamiltonian='xy',
tf=100*math.pi):
"""
Function to return the Fourier transform of the average number of
excitation signal
Arguments:
---------
- G: networx.Graph, graph to analyze
- rot_init: float, initial rotation
- N_sample: int, number of timesteps to compute the evolution
- hamiltonian: str 'xy' or 'ising', type of hamiltonian to simulate
- tf: float, total time of evolution
Returns:
--------
- plap_fft: numpy.Ndarray, shape (N_sample,) values of the fourier spectra
- freq_normalized: numpy.Ndarray, shape (N_sample,) values of the
fequencies
"""
assert hamiltonian in ['ising', 'xy']
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=hamiltonian)
rotation_angle_single_exc = rot_init/2.
tlist = np.linspace(0, rotation_angle_single_exc, 200)
psi_0 = generate_empty_initial_state(N_nodes)
H_m = generate_mixing_Ham(N_nodes)
result = sesolve(H_m, psi_0, tlist)
final_state = result.states[-1]
sz = sigmaz()
si = qeye(2)
sp = sigmap()
sz_list = []
sp_list = []
for j in range(N_nodes):
op_list = [si for _ in range(N_nodes)]
op_list[j] = sz
sz_list.append(tensor(op_list))
op_list[j] = sp
sp_list.append(tensor(op_list))
tlist = np.linspace(0, tf, N_sample)
observable = (-2*math.sin(2*rotation_angle_single_exc)
* sum(spj for spj in sp_list)
+ math.cos(2*rotation_angle_single_exc)
* sum(szj for szj in sz_list))
opts = Options()
opts.store_states = True
result = sesolve(H_evol, final_state, tlist,
e_ops=[observable], options=opts)
full_signal = result.expect
signal = full_signal[0].real
signal_fft = np.fft.fft(signal)
freq = np.fft.fftfreq(signal.shape[-1])
freq_normalized = np.abs(freq * N_sample * 2) / (tf / np.pi)
return signal_fft, freq_normalized
@numba.njit
def entropy(p):
"""
Returns the entropy of a discrete distribution p
Arguments:
---------
- p: numpy.Ndarray dimension 1 non-negative floats summing to 1
Returns:
--------
- float, value of the entropy
"""
assert (p >= 0).all()
assert abs(np.sum(p)-1) < 1e-6
return -np.sum(p*np.log(p+1e-12))
@numba.njit
def jensen_shannon(hist1, hist2):
'''
Returns the Jensen Shannon divergence between two probabilities
distribution represented as histograms.
Arguments:
---------
- hist1: tuple of numpy.ndarray (density, bins),
len(bins) = len(density) + 1.
The integral of the density wrt bins sums to 1.
- hist2: same format.
Returns:
--------
- float, value of the Jensen Shannon divergence.
'''
bins = np.sort(np.unique(np.array(list(hist1[1]) + list(hist2[1]))))
masses1 = []
masses2 = []
for i, b in enumerate(bins[1::]):
if b <= hist1[1][0]:
masses1.append(0.)
elif b > hist1[1][-1]:
masses1.append(0.)
else:
j = 0
while b > hist1[1][j]:
j += 1
masses1.append((b-bins[i]) * hist1[0][j-1])
if b <= hist2[1][0]:
masses2.append(0.)
elif b > hist2[1][-1]:
masses2.append(0.)
else:
j = 0
while b > hist2[1][j]:
j += 1
masses2.append((b-bins[i]) * hist2[0][j-1])
masses1 = np.array(masses1)
masses2 = np.array(masses2)
masses12 = (masses1+masses2)/2
return entropy(masses12) - (entropy(masses1) + entropy(masses2))/2
# @ray.remote
def return_fourier_from_dataset(graph_list, rot_init=settings.rot_init):
"""
Returns the fourier transform of evolution for a list of graphs for
the hamiltonian ising and xy.
Arguments:
---------
- graph_list: list or numpy.Ndarray of networkx.Graph objects
Returns:
--------
- fs_xy: numpy.Ndarray of shape (2, len(graph_list), 1000)
[0,i]: Fourier signal of graph i at 1000 points for
hamiltonian XY
[1,i]: frequencies associated to graph i at 1000 points
for hamiltonian XY
- fs_is: same for the Ising hamiltonian
"""
fs_xy = np.zeros((2, len(graph_list), 1000))
fs_is = np.zeros((2, len(graph_list), 1000))
for i, graph in enumerate(graph_list):
fs_xy[0][i], fs_xy[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='xy')
fs_is[0][i], fs_is[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='ising')
return fs_xy, fs_is
def return_evolution(G, times, pulses, evol='xy'):
"""
Returns the final state after the following evolution:
- start with empty sate with as many qubits as vertices of G
- uniform superposition of all states
- alternating evolution of H_evol during times, and H_m during pulses
Arguments:
---------
- G: graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
Returns:
--------
- state: qutip.Qobj final state of evolution
"""
assert evol in ['xy', 'ising']
assert len(times) == len(pulses)
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=evol)
H_m = generate_mixing_Ham(N_nodes)
state = generate_empty_initial_state(N_nodes)
opts = Options()
opts.store_states = True
result = sesolve(H_m, state, [0, np.pi/4], options=opts)
state = result.states[-1]
for i, theta in enumerate(pulses):
if np.abs(times[i]) > 0:
if evol == 'xy':
result = sesolve(H_evol, state, [0, times[i]], options=opts)
state = result.states[-1]
else:
hexp = (- times[i] * 1j * H_evol).expm()
state = hexp * state
if np.abs(theta) > 0:
result = sesolve(H_m, state, [0, theta], options=opts)
state = result.states[-1]
return state
def return_list_of_states(graphs_list,
times, pulses, evol='xy', verbose=0):
"""
Returns the list of states after evolution for each graph following
return_evolution functions.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
- verbose: int, display the progression every verbose steps
Returns:
--------
- all_states: list of qutip.Qobj final states of evolution,
same lenght as graphs_list
"""
all_states = []
for G in tqdm(graphs_list, disable=verbose==0):
all_states.append(return_evolution(G, times, pulses, evol))
return all_states
def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):
"""
Returns all the discrete probability distributions of a diagonal
observable on a list of states each one associated with a graph. The
observable can be different for each state. The distribution is taken of
all possible values of all observables.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- all_states: list of qutip.Qobj states associated with graphs_list
- observable_func: function(networkx.Graph):
return qtip.Qobj diagonal observable
- return_energies: boolean
Returns:
--------
- all_e_masses: numpy.ndarray of shape (len(graphs_list), N_dim)
all discrete probability distributions
- e_values_unique: numpy.ndarray of shape (N_dim, )
if return_energies, all energies
"""
all_e_distrib = []
all_e_values_unique = []
for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):
if observable_func == None:
observable = generate_Ham_from_graph(
G, type_h='ising', type_ising='z'
)
else:
observable = observable_func(G)
e_values = observable.data.diagonal().real
e_values_unique = np.unique(e_values)
state = all_states[i]
e_distrib = np.zeros(len(e_values_unique))
for j, v in enumerate(e_values_unique):
e_distrib[j] = np.sum(
(np.abs(state.data.toarray()) ** 2)[e_values == v]
)
all_e_distrib.append(e_distrib)
all_e_values_unique.append(e_values_unique)
e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))
all_e_masses = []
for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):
masses = np.zeros_like(e_values_unique)
for d, e in zip(e_distrib, e_values):
masses[e_values_unique == e] = d
all_e_masses.append(masses)
all_e_masses = np.array(all_e_masses)
if return_energies:
return all_e_masses, e_values_unique
return all_e_masses
def extend_energies(target_energies, energies, masses):
"""
Extends masses array with columns of zeros for missing energies.
Arguments:
---------
- target_energies: numpy.ndarray of shape (N_dim, ) target energies
- energies: numpy.ndarray of shape (N_dim_init, ) energies of distributions
- masses: numpy.ndarray of shape (N, N_dim_init) discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N, N_dim)
all extended discrete probability distributions
"""
energies = list(energies)
N = masses.shape[0]
res = np.zeros((N, len(target_energies)))
for i, energy in enumerate(target_energies):
if energy not in energies:
res[:, i] = np.zeros((N, ))
else:
res[:, i] = masses[:, energies.index(energy)]
return res
def merge_energies(e1, m1, e2, m2):
"""
Merge the arrays of energy masses, filling with zeros the missing energies in each.
N_dim is the size of the union of the energies from the two distributions.
Arguments:
---------
- e1: numpy.ndarray of shape (N_dim1, ) energies of first distributions
- m1: numpy.ndarray of shape (N1, N_dim1) first discrete probability distributions
- e2: numpy.ndarray of shape (N_dim2, ) energies of first distributions
- m2: numpy.ndarray of shape (N2, N_dim2) first discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N1, N_dim)
all extended first discrete probability distributions
- numpy.ndarray of shape (N2, N_dim)
all extended second discrete probability distributions
"""
e = sorted(list(set(e1) | set(e2)))
return extend_energies(e, e1, m1), extend_energies(e, e2, m2)
def return_js_square_matrix(distributions, verbose=0):
"""
Returns the Jensen-Shannon distance matrix of discrete
distributions.
Arguments:
---------
- distributions: numpy.ndarray of shape (N_sample, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
js_matrix = np.zeros((len(distributions), len(distributions)))
for i in range(len(distributions)):
for j in range(i + 1):
masses1 = distributions[i]
masses2 = distributions[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
js_matrix[j, i] = js
return js_matrix
def return_js_matrix(distributions1, distributions2, verbose=0):
"""
Returns the Jensen-Shannon distance matrix between discrete
distributions.
Arguments:
---------
- distributions1: numpy.ndarray of shape (N_samples_1, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
- distributions2: numpy.ndarray of shape (N_samples_2, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
assert distributions1.shape[1] == distributions2.shape[1], \
"Distributions must have matching dimensions. Consider using merge_energies"
js_matrix = np.zeros((len(distributions1), len(distributions2)))
for i in trange(len(distributions1), desc='dist1 loop', disable=verbose<=0):
for j in trange(len(distributions2), desc='dist2 loop', disable=verbose<=1):
|
return js_matrix
class Memoizer:
"""
Will store results of the provided observable on graphs to avoid recomputing.
Storage is based on a key computed using get_key
Attributes:
-----------
- observable: function(networkx.Graph):
return qtip.Qobj diagonal observable
- get_key: function(networkx.Graph):
return a key used to identify the graph
"""
def __init__(self, observable, get_key=None):
self.graphs = {}
self.observable = observable
self.get_key = get_key if get_key is not None else Memoizer.edges_key
@staticmethod
def edges_unique_key(graph):
"""
Key insensitive to how edges of the graph are returned
(order of edges and order of nodes in edges).
Same result for [(a, b), (c, d)] and [(d, c), (a, b)]
"""
edges = list(map(sorted, graph.edges))
return tuple(map(tuple, sorted(edges, key=itemgetter(0,1))))
@staticmethod
def edges_key(graph):
""" Simple key based on the edges list """
return tuple(graph.edges())
def get_observable(self, graph):
"""
Gets observable on graph
Uses memoization to speed up the process if graph has been seen before
Arguments:
---------
- graph: networkx.Graph to get observable on
Returns:
--------
- qtip.Qobj, diagonal observable
"""
key = self.get_key(graph)
if key not in self.graphs:
self.graphs[key] = self.observable(graph)
return self.graphs[key]
| masses1 = distributions1[i]
masses2 = distributions2[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js | conditional_block |
utils.py | from quantum_routines import (generate_empty_initial_state,
generate_mixing_Ham, generate_Ham_from_graph)
from qutip import sesolve, sigmaz, sigmap, qeye, tensor, Options
import settings
import numpy as np
import math
import numba
from tqdm.auto import tqdm, trange
from operator import itemgetter
settings.init()
def generate_signal_fourier(G, rot_init=settings.rot_init,
N_sample=1000, hamiltonian='xy',
tf=100*math.pi):
"""
Function to return the Fourier transform of the average number of
excitation signal
Arguments:
---------
- G: networx.Graph, graph to analyze
- rot_init: float, initial rotation
- N_sample: int, number of timesteps to compute the evolution
- hamiltonian: str 'xy' or 'ising', type of hamiltonian to simulate
- tf: float, total time of evolution
Returns:
--------
- plap_fft: numpy.Ndarray, shape (N_sample,) values of the fourier spectra
- freq_normalized: numpy.Ndarray, shape (N_sample,) values of the
fequencies
"""
assert hamiltonian in ['ising', 'xy']
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=hamiltonian)
rotation_angle_single_exc = rot_init/2.
tlist = np.linspace(0, rotation_angle_single_exc, 200)
psi_0 = generate_empty_initial_state(N_nodes)
H_m = generate_mixing_Ham(N_nodes)
result = sesolve(H_m, psi_0, tlist)
final_state = result.states[-1]
sz = sigmaz()
si = qeye(2)
sp = sigmap()
sz_list = []
sp_list = []
for j in range(N_nodes):
op_list = [si for _ in range(N_nodes)]
op_list[j] = sz
sz_list.append(tensor(op_list))
op_list[j] = sp
sp_list.append(tensor(op_list))
tlist = np.linspace(0, tf, N_sample)
observable = (-2*math.sin(2*rotation_angle_single_exc)
* sum(spj for spj in sp_list)
+ math.cos(2*rotation_angle_single_exc)
* sum(szj for szj in sz_list))
opts = Options()
opts.store_states = True
result = sesolve(H_evol, final_state, tlist,
e_ops=[observable], options=opts)
full_signal = result.expect
signal = full_signal[0].real
signal_fft = np.fft.fft(signal)
freq = np.fft.fftfreq(signal.shape[-1])
freq_normalized = np.abs(freq * N_sample * 2) / (tf / np.pi)
return signal_fft, freq_normalized
@numba.njit
def entropy(p):
"""
Returns the entropy of a discrete distribution p
Arguments:
---------
- p: numpy.Ndarray dimension 1 non-negative floats summing to 1
Returns:
--------
- float, value of the entropy
"""
assert (p >= 0).all()
assert abs(np.sum(p)-1) < 1e-6
return -np.sum(p*np.log(p+1e-12))
@numba.njit
def jensen_shannon(hist1, hist2):
'''
Returns the Jensen Shannon divergence between two probabilities
distribution represented as histograms.
Arguments:
---------
- hist1: tuple of numpy.ndarray (density, bins),
len(bins) = len(density) + 1.
The integral of the density wrt bins sums to 1.
- hist2: same format.
Returns:
--------
- float, value of the Jensen Shannon divergence.
'''
bins = np.sort(np.unique(np.array(list(hist1[1]) + list(hist2[1]))))
masses1 = []
masses2 = []
for i, b in enumerate(bins[1::]):
if b <= hist1[1][0]:
masses1.append(0.)
elif b > hist1[1][-1]:
masses1.append(0.)
else:
j = 0
while b > hist1[1][j]:
j += 1
masses1.append((b-bins[i]) * hist1[0][j-1])
if b <= hist2[1][0]:
masses2.append(0.)
elif b > hist2[1][-1]:
masses2.append(0.)
else:
j = 0
while b > hist2[1][j]:
j += 1
masses2.append((b-bins[i]) * hist2[0][j-1])
masses1 = np.array(masses1)
masses2 = np.array(masses2)
masses12 = (masses1+masses2)/2
return entropy(masses12) - (entropy(masses1) + entropy(masses2))/2
# @ray.remote
def return_fourier_from_dataset(graph_list, rot_init=settings.rot_init):
"""
Returns the fourier transform of evolution for a list of graphs for
the hamiltonian ising and xy.
Arguments:
---------
- graph_list: list or numpy.Ndarray of networkx.Graph objects
Returns:
--------
- fs_xy: numpy.Ndarray of shape (2, len(graph_list), 1000)
[0,i]: Fourier signal of graph i at 1000 points for
hamiltonian XY
[1,i]: frequencies associated to graph i at 1000 points
for hamiltonian XY
- fs_is: same for the Ising hamiltonian
"""
fs_xy = np.zeros((2, len(graph_list), 1000))
fs_is = np.zeros((2, len(graph_list), 1000))
for i, graph in enumerate(graph_list):
fs_xy[0][i], fs_xy[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='xy')
fs_is[0][i], fs_is[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='ising')
return fs_xy, fs_is
def return_evolution(G, times, pulses, evol='xy'):
"""
Returns the final state after the following evolution:
- start with empty sate with as many qubits as vertices of G
- uniform superposition of all states
- alternating evolution of H_evol during times, and H_m during pulses
Arguments:
---------
- G: graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
Returns:
--------
- state: qutip.Qobj final state of evolution
"""
assert evol in ['xy', 'ising']
assert len(times) == len(pulses)
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=evol)
H_m = generate_mixing_Ham(N_nodes)
state = generate_empty_initial_state(N_nodes)
opts = Options()
opts.store_states = True
result = sesolve(H_m, state, [0, np.pi/4], options=opts)
state = result.states[-1]
for i, theta in enumerate(pulses):
if np.abs(times[i]) > 0:
if evol == 'xy':
result = sesolve(H_evol, state, [0, times[i]], options=opts)
state = result.states[-1]
else:
hexp = (- times[i] * 1j * H_evol).expm()
state = hexp * state
if np.abs(theta) > 0:
result = sesolve(H_m, state, [0, theta], options=opts)
state = result.states[-1]
return state
def return_list_of_states(graphs_list,
times, pulses, evol='xy', verbose=0):
"""
Returns the list of states after evolution for each graph following
return_evolution functions.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
- verbose: int, display the progression every verbose steps
Returns: | - all_states: list of qutip.Qobj final states of evolution,
same lenght as graphs_list
"""
all_states = []
for G in tqdm(graphs_list, disable=verbose==0):
all_states.append(return_evolution(G, times, pulses, evol))
return all_states
def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):
"""
Returns all the discrete probability distributions of a diagonal
observable on a list of states each one associated with a graph. The
observable can be different for each state. The distribution is taken of
all possible values of all observables.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- all_states: list of qutip.Qobj states associated with graphs_list
- observable_func: function(networkx.Graph):
return qtip.Qobj diagonal observable
- return_energies: boolean
Returns:
--------
- all_e_masses: numpy.ndarray of shape (len(graphs_list), N_dim)
all discrete probability distributions
- e_values_unique: numpy.ndarray of shape (N_dim, )
if return_energies, all energies
"""
all_e_distrib = []
all_e_values_unique = []
for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):
if observable_func == None:
observable = generate_Ham_from_graph(
G, type_h='ising', type_ising='z'
)
else:
observable = observable_func(G)
e_values = observable.data.diagonal().real
e_values_unique = np.unique(e_values)
state = all_states[i]
e_distrib = np.zeros(len(e_values_unique))
for j, v in enumerate(e_values_unique):
e_distrib[j] = np.sum(
(np.abs(state.data.toarray()) ** 2)[e_values == v]
)
all_e_distrib.append(e_distrib)
all_e_values_unique.append(e_values_unique)
e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))
all_e_masses = []
for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):
masses = np.zeros_like(e_values_unique)
for d, e in zip(e_distrib, e_values):
masses[e_values_unique == e] = d
all_e_masses.append(masses)
all_e_masses = np.array(all_e_masses)
if return_energies:
return all_e_masses, e_values_unique
return all_e_masses
def extend_energies(target_energies, energies, masses):
"""
Extends masses array with columns of zeros for missing energies.
Arguments:
---------
- target_energies: numpy.ndarray of shape (N_dim, ) target energies
- energies: numpy.ndarray of shape (N_dim_init, ) energies of distributions
- masses: numpy.ndarray of shape (N, N_dim_init) discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N, N_dim)
all extended discrete probability distributions
"""
energies = list(energies)
N = masses.shape[0]
res = np.zeros((N, len(target_energies)))
for i, energy in enumerate(target_energies):
if energy not in energies:
res[:, i] = np.zeros((N, ))
else:
res[:, i] = masses[:, energies.index(energy)]
return res
def merge_energies(e1, m1, e2, m2):
"""
Merge the arrays of energy masses, filling with zeros the missing energies in each.
N_dim is the size of the union of the energies from the two distributions.
Arguments:
---------
- e1: numpy.ndarray of shape (N_dim1, ) energies of first distributions
- m1: numpy.ndarray of shape (N1, N_dim1) first discrete probability distributions
- e2: numpy.ndarray of shape (N_dim2, ) energies of first distributions
- m2: numpy.ndarray of shape (N2, N_dim2) first discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N1, N_dim)
all extended first discrete probability distributions
- numpy.ndarray of shape (N2, N_dim)
all extended second discrete probability distributions
"""
e = sorted(list(set(e1) | set(e2)))
return extend_energies(e, e1, m1), extend_energies(e, e2, m2)
def return_js_square_matrix(distributions, verbose=0):
"""
Returns the Jensen-Shannon distance matrix of discrete
distributions.
Arguments:
---------
- distributions: numpy.ndarray of shape (N_sample, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
js_matrix = np.zeros((len(distributions), len(distributions)))
for i in range(len(distributions)):
for j in range(i + 1):
masses1 = distributions[i]
masses2 = distributions[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
js_matrix[j, i] = js
return js_matrix
def return_js_matrix(distributions1, distributions2, verbose=0):
"""
Returns the Jensen-Shannon distance matrix between discrete
distributions.
Arguments:
---------
- distributions1: numpy.ndarray of shape (N_samples_1, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
- distributions2: numpy.ndarray of shape (N_samples_2, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
assert distributions1.shape[1] == distributions2.shape[1], \
"Distributions must have matching dimensions. Consider using merge_energies"
js_matrix = np.zeros((len(distributions1), len(distributions2)))
for i in trange(len(distributions1), desc='dist1 loop', disable=verbose<=0):
for j in trange(len(distributions2), desc='dist2 loop', disable=verbose<=1):
masses1 = distributions1[i]
masses2 = distributions2[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
return js_matrix
class Memoizer:
"""
Will store results of the provided observable on graphs to avoid recomputing.
Storage is based on a key computed using get_key
Attributes:
-----------
- observable: function(networkx.Graph):
return qtip.Qobj diagonal observable
- get_key: function(networkx.Graph):
return a key used to identify the graph
"""
def __init__(self, observable, get_key=None):
self.graphs = {}
self.observable = observable
self.get_key = get_key if get_key is not None else Memoizer.edges_key
@staticmethod
def edges_unique_key(graph):
"""
Key insensitive to how edges of the graph are returned
(order of edges and order of nodes in edges).
Same result for [(a, b), (c, d)] and [(d, c), (a, b)]
"""
edges = list(map(sorted, graph.edges))
return tuple(map(tuple, sorted(edges, key=itemgetter(0,1))))
@staticmethod
def edges_key(graph):
""" Simple key based on the edges list """
return tuple(graph.edges())
def get_observable(self, graph):
"""
Gets observable on graph
Uses memoization to speed up the process if graph has been seen before
Arguments:
---------
- graph: networkx.Graph to get observable on
Returns:
--------
- qtip.Qobj, diagonal observable
"""
key = self.get_key(graph)
if key not in self.graphs:
self.graphs[key] = self.observable(graph)
return self.graphs[key] | -------- | random_line_split |
utils.py |
from quantum_routines import (generate_empty_initial_state,
generate_mixing_Ham, generate_Ham_from_graph)
from qutip import sesolve, sigmaz, sigmap, qeye, tensor, Options
import settings
import numpy as np
import math
import numba
from tqdm.auto import tqdm, trange
from operator import itemgetter
settings.init()
def generate_signal_fourier(G, rot_init=settings.rot_init,
N_sample=1000, hamiltonian='xy',
tf=100*math.pi):
"""
Function to return the Fourier transform of the average number of
excitation signal
Arguments:
---------
- G: networx.Graph, graph to analyze
- rot_init: float, initial rotation
- N_sample: int, number of timesteps to compute the evolution
- hamiltonian: str 'xy' or 'ising', type of hamiltonian to simulate
- tf: float, total time of evolution
Returns:
--------
- plap_fft: numpy.Ndarray, shape (N_sample,) values of the fourier spectra
- freq_normalized: numpy.Ndarray, shape (N_sample,) values of the
fequencies
"""
assert hamiltonian in ['ising', 'xy']
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=hamiltonian)
rotation_angle_single_exc = rot_init/2.
tlist = np.linspace(0, rotation_angle_single_exc, 200)
psi_0 = generate_empty_initial_state(N_nodes)
H_m = generate_mixing_Ham(N_nodes)
result = sesolve(H_m, psi_0, tlist)
final_state = result.states[-1]
sz = sigmaz()
si = qeye(2)
sp = sigmap()
sz_list = []
sp_list = []
for j in range(N_nodes):
op_list = [si for _ in range(N_nodes)]
op_list[j] = sz
sz_list.append(tensor(op_list))
op_list[j] = sp
sp_list.append(tensor(op_list))
tlist = np.linspace(0, tf, N_sample)
observable = (-2*math.sin(2*rotation_angle_single_exc)
* sum(spj for spj in sp_list)
+ math.cos(2*rotation_angle_single_exc)
* sum(szj for szj in sz_list))
opts = Options()
opts.store_states = True
result = sesolve(H_evol, final_state, tlist,
e_ops=[observable], options=opts)
full_signal = result.expect
signal = full_signal[0].real
signal_fft = np.fft.fft(signal)
freq = np.fft.fftfreq(signal.shape[-1])
freq_normalized = np.abs(freq * N_sample * 2) / (tf / np.pi)
return signal_fft, freq_normalized
@numba.njit
def entropy(p):
"""
Returns the entropy of a discrete distribution p
Arguments:
---------
- p: numpy.Ndarray dimension 1 non-negative floats summing to 1
Returns:
--------
- float, value of the entropy
"""
assert (p >= 0).all()
assert abs(np.sum(p)-1) < 1e-6
return -np.sum(p*np.log(p+1e-12))
@numba.njit
def jensen_shannon(hist1, hist2):
'''
Returns the Jensen Shannon divergence between two probabilities
distribution represented as histograms.
Arguments:
---------
- hist1: tuple of numpy.ndarray (density, bins),
len(bins) = len(density) + 1.
The integral of the density wrt bins sums to 1.
- hist2: same format.
Returns:
--------
- float, value of the Jensen Shannon divergence.
'''
bins = np.sort(np.unique(np.array(list(hist1[1]) + list(hist2[1]))))
masses1 = []
masses2 = []
for i, b in enumerate(bins[1::]):
if b <= hist1[1][0]:
masses1.append(0.)
elif b > hist1[1][-1]:
masses1.append(0.)
else:
j = 0
while b > hist1[1][j]:
j += 1
masses1.append((b-bins[i]) * hist1[0][j-1])
if b <= hist2[1][0]:
masses2.append(0.)
elif b > hist2[1][-1]:
masses2.append(0.)
else:
j = 0
while b > hist2[1][j]:
j += 1
masses2.append((b-bins[i]) * hist2[0][j-1])
masses1 = np.array(masses1)
masses2 = np.array(masses2)
masses12 = (masses1+masses2)/2
return entropy(masses12) - (entropy(masses1) + entropy(masses2))/2
# @ray.remote
def return_fourier_from_dataset(graph_list, rot_init=settings.rot_init):
"""
Returns the fourier transform of evolution for a list of graphs for
the hamiltonian ising and xy.
Arguments:
---------
- graph_list: list or numpy.Ndarray of networkx.Graph objects
Returns:
--------
- fs_xy: numpy.Ndarray of shape (2, len(graph_list), 1000)
[0,i]: Fourier signal of graph i at 1000 points for
hamiltonian XY
[1,i]: frequencies associated to graph i at 1000 points
for hamiltonian XY
- fs_is: same for the Ising hamiltonian
"""
fs_xy = np.zeros((2, len(graph_list), 1000))
fs_is = np.zeros((2, len(graph_list), 1000))
for i, graph in enumerate(graph_list):
fs_xy[0][i], fs_xy[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='xy')
fs_is[0][i], fs_is[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='ising')
return fs_xy, fs_is
def return_evolution(G, times, pulses, evol='xy'):
"""
Returns the final state after the following evolution:
- start with empty sate with as many qubits as vertices of G
- uniform superposition of all states
- alternating evolution of H_evol during times, and H_m during pulses
Arguments:
---------
- G: graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
Returns:
--------
- state: qutip.Qobj final state of evolution
"""
assert evol in ['xy', 'ising']
assert len(times) == len(pulses)
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=evol)
H_m = generate_mixing_Ham(N_nodes)
state = generate_empty_initial_state(N_nodes)
opts = Options()
opts.store_states = True
result = sesolve(H_m, state, [0, np.pi/4], options=opts)
state = result.states[-1]
for i, theta in enumerate(pulses):
if np.abs(times[i]) > 0:
if evol == 'xy':
result = sesolve(H_evol, state, [0, times[i]], options=opts)
state = result.states[-1]
else:
hexp = (- times[i] * 1j * H_evol).expm()
state = hexp * state
if np.abs(theta) > 0:
result = sesolve(H_m, state, [0, theta], options=opts)
state = result.states[-1]
return state
def return_list_of_states(graphs_list,
times, pulses, evol='xy', verbose=0):
"""
Returns the list of states after evolution for each graph following
return_evolution functions.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
- verbose: int, display the progression every verbose steps
Returns:
--------
- all_states: list of qutip.Qobj final states of evolution,
same lenght as graphs_list
"""
all_states = []
for G in tqdm(graphs_list, disable=verbose==0):
all_states.append(return_evolution(G, times, pulses, evol))
return all_states
def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):
"""
Returns all the discrete probability distributions of a diagonal
observable on a list of states each one associated with a graph. The
observable can be different for each state. The distribution is taken of
all possible values of all observables.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- all_states: list of qutip.Qobj states associated with graphs_list
- observable_func: function(networkx.Graph):
return qtip.Qobj diagonal observable
- return_energies: boolean
Returns:
--------
- all_e_masses: numpy.ndarray of shape (len(graphs_list), N_dim)
all discrete probability distributions
- e_values_unique: numpy.ndarray of shape (N_dim, )
if return_energies, all energies
"""
all_e_distrib = []
all_e_values_unique = []
for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):
if observable_func == None:
observable = generate_Ham_from_graph(
G, type_h='ising', type_ising='z'
)
else:
observable = observable_func(G)
e_values = observable.data.diagonal().real
e_values_unique = np.unique(e_values)
state = all_states[i]
e_distrib = np.zeros(len(e_values_unique))
for j, v in enumerate(e_values_unique):
e_distrib[j] = np.sum(
(np.abs(state.data.toarray()) ** 2)[e_values == v]
)
all_e_distrib.append(e_distrib)
all_e_values_unique.append(e_values_unique)
e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))
all_e_masses = []
for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):
masses = np.zeros_like(e_values_unique)
for d, e in zip(e_distrib, e_values):
masses[e_values_unique == e] = d
all_e_masses.append(masses)
all_e_masses = np.array(all_e_masses)
if return_energies:
return all_e_masses, e_values_unique
return all_e_masses
def extend_energies(target_energies, energies, masses):
"""
Extends masses array with columns of zeros for missing energies.
Arguments:
---------
- target_energies: numpy.ndarray of shape (N_dim, ) target energies
- energies: numpy.ndarray of shape (N_dim_init, ) energies of distributions
- masses: numpy.ndarray of shape (N, N_dim_init) discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N, N_dim)
all extended discrete probability distributions
"""
energies = list(energies)
N = masses.shape[0]
res = np.zeros((N, len(target_energies)))
for i, energy in enumerate(target_energies):
if energy not in energies:
res[:, i] = np.zeros((N, ))
else:
res[:, i] = masses[:, energies.index(energy)]
return res
def merge_energies(e1, m1, e2, m2):
"""
Merge the arrays of energy masses, filling with zeros the missing energies in each.
N_dim is the size of the union of the energies from the two distributions.
Arguments:
---------
- e1: numpy.ndarray of shape (N_dim1, ) energies of first distributions
- m1: numpy.ndarray of shape (N1, N_dim1) first discrete probability distributions
- e2: numpy.ndarray of shape (N_dim2, ) energies of first distributions
- m2: numpy.ndarray of shape (N2, N_dim2) first discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N1, N_dim)
all extended first discrete probability distributions
- numpy.ndarray of shape (N2, N_dim)
all extended second discrete probability distributions
"""
e = sorted(list(set(e1) | set(e2)))
return extend_energies(e, e1, m1), extend_energies(e, e2, m2)
def | (distributions, verbose=0):
"""
Returns the Jensen-Shannon distance matrix of discrete
distributions.
Arguments:
---------
- distributions: numpy.ndarray of shape (N_sample, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
js_matrix = np.zeros((len(distributions), len(distributions)))
for i in range(len(distributions)):
for j in range(i + 1):
masses1 = distributions[i]
masses2 = distributions[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
js_matrix[j, i] = js
return js_matrix
def return_js_matrix(distributions1, distributions2, verbose=0):
"""
Returns the Jensen-Shannon distance matrix between discrete
distributions.
Arguments:
---------
- distributions1: numpy.ndarray of shape (N_samples_1, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
- distributions2: numpy.ndarray of shape (N_samples_2, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
assert distributions1.shape[1] == distributions2.shape[1], \
"Distributions must have matching dimensions. Consider using merge_energies"
js_matrix = np.zeros((len(distributions1), len(distributions2)))
for i in trange(len(distributions1), desc='dist1 loop', disable=verbose<=0):
for j in trange(len(distributions2), desc='dist2 loop', disable=verbose<=1):
masses1 = distributions1[i]
masses2 = distributions2[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
return js_matrix
class Memoizer:
"""
Will store results of the provided observable on graphs to avoid recomputing.
Storage is based on a key computed using get_key
Attributes:
-----------
- observable: function(networkx.Graph):
return qtip.Qobj diagonal observable
- get_key: function(networkx.Graph):
return a key used to identify the graph
"""
def __init__(self, observable, get_key=None):
self.graphs = {}
self.observable = observable
self.get_key = get_key if get_key is not None else Memoizer.edges_key
@staticmethod
def edges_unique_key(graph):
"""
Key insensitive to how edges of the graph are returned
(order of edges and order of nodes in edges).
Same result for [(a, b), (c, d)] and [(d, c), (a, b)]
"""
edges = list(map(sorted, graph.edges))
return tuple(map(tuple, sorted(edges, key=itemgetter(0,1))))
@staticmethod
def edges_key(graph):
""" Simple key based on the edges list """
return tuple(graph.edges())
def get_observable(self, graph):
"""
Gets observable on graph
Uses memoization to speed up the process if graph has been seen before
Arguments:
---------
- graph: networkx.Graph to get observable on
Returns:
--------
- qtip.Qobj, diagonal observable
"""
key = self.get_key(graph)
if key not in self.graphs:
self.graphs[key] = self.observable(graph)
return self.graphs[key]
| return_js_square_matrix | identifier_name |
instance.go | package updatectl
import (
"bytes"
"code.google.com/p/go-uuid/uuid"
"encoding/xml"
"fmt"
"github.com/coreos/go-omaha/omaha"
update "github.com/coreos/updatectl/client/update/v1"
"github.com/deis/deisctl/utils"
"io"
"log"
"math/rand"
"net/http"
"os"
"text/tabwriter"
"time"
)
const (
initialInterval = time.Second * 10
maxInterval = time.Minute * 7
downloadDir = "/home/core/deis/systemd/"
)
var (
instanceFlags struct {
groupId StringFlag
appId StringFlag
start int64
end int64
verbose bool
clientsPerApp int
minSleep int
maxSleep int
errorRate int
OEM string
pingOnly int
version string
}
cmdInstance = &Command{
Name: "instance",
Usage: "[OPTION]...",
Summary: "Operations to view instances.",
Subcommands: []*Command{
cmdInstanceListUpdates,
cmdInstanceListAppVersions,
cmdInstanceDeis,
},
}
cmdInstanceListUpdates = &Command{
Name: "instance list-updates",
Usage: "[OPTION]...",
Description: "Generates a list of instance updates.",
Run: instanceListUpdates,
}
cmdInstanceListAppVersions = &Command{
Name: "instance list-app-versions",
Usage: "[OPTION]...",
Description: "Generates a list of apps/versions with instance count.",
Run: instanceListAppVersions,
}
cmdInstanceDeis = &Command{
Name: "instance deis",
Usage: "[OPTION]...",
Description: "Simulate single deis to update instances.",
Run: instanceDeis,
}
)
func init() {
cmdInstanceListUpdates.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListUpdates.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceDeis.Flags.BoolVar(&instanceFlags.verbose, "verbose", false, "Print out the request bodies")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.clientsPerApp, "clients-per-app", 1, "Number of fake fents per appid.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.minSleep, "min-sleep", 5, "Minimum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.maxSleep, "max-sleep", 10, "Maximum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.errorRate, "errorrate", 1, "Chance of error (0-100)%.")
cmdInstanceDeis.Flags.StringVar(&instanceFlags.OEM, "oem", "fakeclient", "oem to report")
// simulate reboot lock.
cmdInstanceDeis.Flags.IntVar(&instanceFlags.pingOnly, "ping-only", 0, "halt update and just send ping requests this many times.")
cmdInstanceDeis.Flags.Var(&instanceFlags.appId, os.Getenv("DEISCTL_APP_ID"), "Application ID to update.")
instanceFlags.appId.required = true
cmdInstanceDeis.Flags.Var(&instanceFlags.groupId, os.Getenv("DEISCTL_GROUP_ID"), "Group ID to update.")
instanceFlags.groupId.required = true
cmdInstanceDeis.Flags.StringVar(&instanceFlags.version, "version", os.Getenv("DEISCTL_APP_VERSION"), "Version to report.")
}
func instanceListUpdates(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Clientupdate.List()
call.DateStart(instanceFlags.start)
call.DateEnd(instanceFlags.end)
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.groupId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tClientID\tVersion\tLastSeen\tGroup\tStatus\tOEM")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", cl.AppId,
cl.ClientId, cl.Version, cl.LastSeen, cl.GroupId,
cl.Status, cl.Oem)
}
out.Flush()
return OK
}
func instanceListAppVersions(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Appversion.List()
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.appId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
if instanceFlags.start != 0 {
call.DateStart(instanceFlags.start)
}
if instanceFlags.end != 0 {
call.DateEnd(instanceFlags.end)
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tGroupID\tVersion\tClients")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%d\n", cl.AppId, cl.GroupId, cl.Version, cl.Count)
}
out.Flush()
return OK
}
//+ downloadDir + "deis.tar.gz"
func expBackoff(interval time.Duration) time.Duration {
interval = interval * 2
if interval > maxInterval {
interval = maxInterval
}
return interval
}
type serverConfig struct {
server string
}
type Client struct {
Id string
SessionId string
Version string
AppId string
Track string
config *serverConfig
errorRate int
pingsRemaining int
}
func (c *Client) Log(format string, v ...interface{}) {
format = c.Id + ": " + format
fmt.Printf(format, v...)
}
func (c *Client) getCodebaseUrl(uc *omaha.UpdateCheck) string {
return uc.Urls.Urls[0].CodeBase
}
func (c *Client) updateservice() {
fmt.Println("starting systemd units")
files, _ := utils.ListFiles(downloadDir + "*.service")
fmt.Println(files)
}
func (c *Client) downloadFromUrl(url, fileName string) (err error) {
url = url + "deis.tar.gz"
fmt.Printf("Downloading %s to %s", url, fileName)
// TODO: check file existence first with io.IsExist
output, err := os.Create(downloadDir + fileName)
if err != nil {
fmt.Println("Error while creating", fileName, "-", err)
return
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
fmt.Println(n, "bytes downloaded.")
return
}
func (c *Client) OmahaRequest(otype, result string, updateCheck, isPing bool) *omaha.Request {
req := omaha.NewRequest("lsb", "CoreOS", "", "")
app := req.AddApp(c.AppId, c.Version)
app.MachineID = c.Id
app.BootId = c.SessionId
app.Track = c.Track
app.OEM = instanceFlags.OEM
if updateCheck {
app.AddUpdateCheck()
}
if isPing {
app.AddPing()
app.Ping.LastReportDays = "1"
app.Ping.Status = "1"
}
if otype != "" |
return req
}
func (c *Client) MakeRequest(otype, result string, updateCheck, isPing bool) (*omaha.Response, error) {
client := &http.Client{}
req := c.OmahaRequest(otype, result, updateCheck, isPing)
raw, err := xml.MarshalIndent(req, "", " ")
if err != nil {
return nil, err
}
resp, err := client.Post(c.config.server+"/v1/update/", "text/xml", bytes.NewReader(raw))
if err != nil {
return nil, err
}
defer resp.Body.Close()
oresp := new(omaha.Response)
err = xml.NewDecoder(resp.Body).Decode(oresp)
if err != nil {
return nil, err
}
if instanceFlags.verbose {
raw, _ := xml.MarshalIndent(req, "", " ")
c.Log("request: %s\n", string(raw))
raw, _ = xml.MarshalIndent(oresp, "", " ")
c.Log("response: %s\n", string(raw))
}
return oresp, nil
}
func (c *Client) SetVersion(resp *omaha.Response) {
// A field can potentially be nil.
defer func() {
if err := recover(); err != nil {
c.Log("%s: error setting version: %v", c.Id, err)
}
}()
uc := resp.Apps[0].UpdateCheck
url := c.getCodebaseUrl(uc)
c.MakeRequest("13", "1", false, false)
c.downloadFromUrl(url, "deis.tar.gz")
utils.Extract(downloadDir+"deis.tar.gz", downloadDir)
c.MakeRequest("14", "1", false, false)
c.updateservice()
fmt.Println("updated done")
c.MakeRequest("3", "1", false, false)
// installed
fmt.Println("updated done")
// simulate reboot lock for a while
for c.pingsRemaining > 0 {
c.MakeRequest("", "", false, true)
c.pingsRemaining--
time.Sleep(1 * time.Second)
}
c.Log("updated from %s to %s\n", c.Version, uc.Manifest.Version)
c.Version = uc.Manifest.Version
_, err := c.MakeRequest("3", "2", false, false) // Send complete with new version.
if err != nil {
log.Println(err)
}
c.SessionId = uuid.New()
}
// Sleep between n and m seconds
func (c *Client) Loop(n, m int) {
interval := initialInterval
for {
randSleep(n, m)
resp, err := c.MakeRequest("3", "2", true, false)
if err != nil {
log.Println(err)
continue
}
uc := resp.Apps[0].UpdateCheck
if uc.Status != "ok" {
c.Log("update check status: %s\n", uc.Status)
} else {
c.SetVersion(resp)
}
}
}
// Sleeps randomly between n and m seconds.
func randSleep(n, m int) {
r := m
if m-n > 0 {
r = rand.Intn(m-n) + n
}
time.Sleep(time.Duration(r) * time.Second)
}
func instanceDeis(args []string, service *update.Service, out *tabwriter.Writer) int {
if instanceFlags.appId.Get() == nil || instanceFlags.groupId.Get() == nil {
return ERROR_USAGE
}
conf := &serverConfig{
server: globalFlags.Server,
}
c := &Client{
Id: fmt.Sprintf("{update-client-"+utils.NewUuid(), i),
SessionId: uuid.New(),
Version: instanceFlags.version,
AppId: instanceFlags.appId.String(),
Track: instanceFlags.groupId.String(),
config: conf,
errorRate: instanceFlags.errorRate,
pingsRemaining: instanceFlags.pingOnly,
}
go c.Loop(instanceFlags.minSleep, instanceFlags.maxSleep)
// run forever
wait := make(chan bool)
<-wait
return OK
}
| {
event := app.AddEvent()
event.Type = otype
event.Result = result
if result == "0" {
event.ErrorCode = "2000"
} else {
event.ErrorCode = ""
}
} | conditional_block |
instance.go | package updatectl
import (
"bytes"
"code.google.com/p/go-uuid/uuid"
"encoding/xml"
"fmt"
"github.com/coreos/go-omaha/omaha"
update "github.com/coreos/updatectl/client/update/v1"
"github.com/deis/deisctl/utils"
"io"
"log"
"math/rand"
"net/http"
"os"
"text/tabwriter"
"time"
)
const (
initialInterval = time.Second * 10
maxInterval = time.Minute * 7
downloadDir = "/home/core/deis/systemd/"
)
var (
instanceFlags struct {
groupId StringFlag
appId StringFlag
start int64
end int64
verbose bool
clientsPerApp int
minSleep int
maxSleep int
errorRate int
OEM string
pingOnly int
version string
}
cmdInstance = &Command{
Name: "instance",
Usage: "[OPTION]...",
Summary: "Operations to view instances.",
Subcommands: []*Command{
cmdInstanceListUpdates,
cmdInstanceListAppVersions,
cmdInstanceDeis,
},
}
cmdInstanceListUpdates = &Command{
Name: "instance list-updates",
Usage: "[OPTION]...",
Description: "Generates a list of instance updates.",
Run: instanceListUpdates,
}
cmdInstanceListAppVersions = &Command{
Name: "instance list-app-versions",
Usage: "[OPTION]...",
Description: "Generates a list of apps/versions with instance count.",
Run: instanceListAppVersions,
}
cmdInstanceDeis = &Command{
Name: "instance deis",
Usage: "[OPTION]...",
Description: "Simulate single deis to update instances.",
Run: instanceDeis,
}
)
func init() {
cmdInstanceListUpdates.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListUpdates.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceDeis.Flags.BoolVar(&instanceFlags.verbose, "verbose", false, "Print out the request bodies")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.clientsPerApp, "clients-per-app", 1, "Number of fake fents per appid.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.minSleep, "min-sleep", 5, "Minimum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.maxSleep, "max-sleep", 10, "Maximum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.errorRate, "errorrate", 1, "Chance of error (0-100)%.")
cmdInstanceDeis.Flags.StringVar(&instanceFlags.OEM, "oem", "fakeclient", "oem to report")
// simulate reboot lock.
cmdInstanceDeis.Flags.IntVar(&instanceFlags.pingOnly, "ping-only", 0, "halt update and just send ping requests this many times.")
cmdInstanceDeis.Flags.Var(&instanceFlags.appId, os.Getenv("DEISCTL_APP_ID"), "Application ID to update.")
instanceFlags.appId.required = true
cmdInstanceDeis.Flags.Var(&instanceFlags.groupId, os.Getenv("DEISCTL_GROUP_ID"), "Group ID to update.")
instanceFlags.groupId.required = true
cmdInstanceDeis.Flags.StringVar(&instanceFlags.version, "version", os.Getenv("DEISCTL_APP_VERSION"), "Version to report.")
}
func instanceListUpdates(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Clientupdate.List()
call.DateStart(instanceFlags.start)
call.DateEnd(instanceFlags.end)
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.groupId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tClientID\tVersion\tLastSeen\tGroup\tStatus\tOEM")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", cl.AppId,
cl.ClientId, cl.Version, cl.LastSeen, cl.GroupId,
cl.Status, cl.Oem)
}
out.Flush()
return OK
}
func instanceListAppVersions(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Appversion.List()
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.appId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
if instanceFlags.start != 0 {
call.DateStart(instanceFlags.start)
}
if instanceFlags.end != 0 {
call.DateEnd(instanceFlags.end)
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tGroupID\tVersion\tClients")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%d\n", cl.AppId, cl.GroupId, cl.Version, cl.Count)
}
out.Flush()
return OK
}
//+ downloadDir + "deis.tar.gz"
func expBackoff(interval time.Duration) time.Duration {
interval = interval * 2
if interval > maxInterval {
interval = maxInterval
}
return interval
}
type serverConfig struct {
server string
}
type Client struct {
Id string
SessionId string
Version string
AppId string
Track string
config *serverConfig
errorRate int
pingsRemaining int
}
func (c *Client) Log(format string, v ...interface{}) {
format = c.Id + ": " + format
fmt.Printf(format, v...)
}
func (c *Client) getCodebaseUrl(uc *omaha.UpdateCheck) string {
return uc.Urls.Urls[0].CodeBase
}
func (c *Client) updateservice() {
fmt.Println("starting systemd units")
files, _ := utils.ListFiles(downloadDir + "*.service")
fmt.Println(files)
}
func (c *Client) downloadFromUrl(url, fileName string) (err error) {
url = url + "deis.tar.gz"
fmt.Printf("Downloading %s to %s", url, fileName)
// TODO: check file existence first with io.IsExist
output, err := os.Create(downloadDir + fileName)
if err != nil {
fmt.Println("Error while creating", fileName, "-", err)
return
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
fmt.Println(n, "bytes downloaded.")
return
}
func (c *Client) OmahaRequest(otype, result string, updateCheck, isPing bool) *omaha.Request {
req := omaha.NewRequest("lsb", "CoreOS", "", "")
app := req.AddApp(c.AppId, c.Version)
app.MachineID = c.Id
app.BootId = c.SessionId
app.Track = c.Track
app.OEM = instanceFlags.OEM
if updateCheck {
app.AddUpdateCheck()
}
if isPing {
app.AddPing()
app.Ping.LastReportDays = "1"
app.Ping.Status = "1"
}
if otype != "" {
event := app.AddEvent()
event.Type = otype
event.Result = result
if result == "0" {
event.ErrorCode = "2000"
} else {
event.ErrorCode = ""
}
}
return req
}
func (c *Client) MakeRequest(otype, result string, updateCheck, isPing bool) (*omaha.Response, error) {
client := &http.Client{}
req := c.OmahaRequest(otype, result, updateCheck, isPing)
raw, err := xml.MarshalIndent(req, "", " ")
if err != nil {
return nil, err
}
resp, err := client.Post(c.config.server+"/v1/update/", "text/xml", bytes.NewReader(raw))
if err != nil {
return nil, err
}
defer resp.Body.Close()
oresp := new(omaha.Response)
err = xml.NewDecoder(resp.Body).Decode(oresp)
if err != nil {
return nil, err
}
if instanceFlags.verbose {
raw, _ := xml.MarshalIndent(req, "", " ")
c.Log("request: %s\n", string(raw))
raw, _ = xml.MarshalIndent(oresp, "", " ")
c.Log("response: %s\n", string(raw))
}
return oresp, nil
}
func (c *Client) SetVersion(resp *omaha.Response) {
// A field can potentially be nil.
defer func() {
if err := recover(); err != nil {
c.Log("%s: error setting version: %v", c.Id, err)
}
}()
uc := resp.Apps[0].UpdateCheck
url := c.getCodebaseUrl(uc)
c.MakeRequest("13", "1", false, false)
c.downloadFromUrl(url, "deis.tar.gz")
utils.Extract(downloadDir+"deis.tar.gz", downloadDir)
c.MakeRequest("14", "1", false, false)
c.updateservice()
fmt.Println("updated done")
c.MakeRequest("3", "1", false, false)
// installed
fmt.Println("updated done")
// simulate reboot lock for a while
for c.pingsRemaining > 0 {
c.MakeRequest("", "", false, true)
c.pingsRemaining--
time.Sleep(1 * time.Second)
}
c.Log("updated from %s to %s\n", c.Version, uc.Manifest.Version)
c.Version = uc.Manifest.Version
_, err := c.MakeRequest("3", "2", false, false) // Send complete with new version.
if err != nil {
log.Println(err)
}
c.SessionId = uuid.New()
}
// Sleep between n and m seconds
func (c *Client) Loop(n, m int) {
interval := initialInterval
for {
randSleep(n, m)
resp, err := c.MakeRequest("3", "2", true, false)
if err != nil {
log.Println(err)
continue
}
uc := resp.Apps[0].UpdateCheck
if uc.Status != "ok" {
c.Log("update check status: %s\n", uc.Status)
} else {
c.SetVersion(resp)
}
}
}
// Sleeps randomly between n and m seconds.
func randSleep(n, m int) |
func instanceDeis(args []string, service *update.Service, out *tabwriter.Writer) int {
if instanceFlags.appId.Get() == nil || instanceFlags.groupId.Get() == nil {
return ERROR_USAGE
}
conf := &serverConfig{
server: globalFlags.Server,
}
c := &Client{
Id: fmt.Sprintf("{update-client-"+utils.NewUuid(), i),
SessionId: uuid.New(),
Version: instanceFlags.version,
AppId: instanceFlags.appId.String(),
Track: instanceFlags.groupId.String(),
config: conf,
errorRate: instanceFlags.errorRate,
pingsRemaining: instanceFlags.pingOnly,
}
go c.Loop(instanceFlags.minSleep, instanceFlags.maxSleep)
// run forever
wait := make(chan bool)
<-wait
return OK
}
| {
r := m
if m-n > 0 {
r = rand.Intn(m-n) + n
}
time.Sleep(time.Duration(r) * time.Second)
} | identifier_body |
instance.go | package updatectl
import (
"bytes"
"code.google.com/p/go-uuid/uuid"
"encoding/xml"
"fmt"
"github.com/coreos/go-omaha/omaha"
update "github.com/coreos/updatectl/client/update/v1"
"github.com/deis/deisctl/utils"
"io"
"log"
"math/rand"
"net/http"
"os"
"text/tabwriter"
"time"
)
const (
initialInterval = time.Second * 10
maxInterval = time.Minute * 7
downloadDir = "/home/core/deis/systemd/"
)
var (
instanceFlags struct {
groupId StringFlag
appId StringFlag
start int64
end int64
verbose bool
clientsPerApp int
minSleep int
maxSleep int
errorRate int
OEM string
pingOnly int
version string
}
cmdInstance = &Command{
Name: "instance",
Usage: "[OPTION]...",
Summary: "Operations to view instances.",
Subcommands: []*Command{
cmdInstanceListUpdates,
cmdInstanceListAppVersions,
cmdInstanceDeis,
},
}
cmdInstanceListUpdates = &Command{
Name: "instance list-updates",
Usage: "[OPTION]...",
Description: "Generates a list of instance updates.",
Run: instanceListUpdates,
}
cmdInstanceListAppVersions = &Command{
Name: "instance list-app-versions",
Usage: "[OPTION]...",
Description: "Generates a list of apps/versions with instance count.",
Run: instanceListAppVersions,
}
cmdInstanceDeis = &Command{
Name: "instance deis",
Usage: "[OPTION]...",
Description: "Simulate single deis to update instances.",
Run: instanceDeis,
}
)
func init() {
cmdInstanceListUpdates.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListUpdates.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceDeis.Flags.BoolVar(&instanceFlags.verbose, "verbose", false, "Print out the request bodies")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.clientsPerApp, "clients-per-app", 1, "Number of fake fents per appid.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.minSleep, "min-sleep", 5, "Minimum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.maxSleep, "max-sleep", 10, "Maximum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.errorRate, "errorrate", 1, "Chance of error (0-100)%.")
cmdInstanceDeis.Flags.StringVar(&instanceFlags.OEM, "oem", "fakeclient", "oem to report")
// simulate reboot lock.
cmdInstanceDeis.Flags.IntVar(&instanceFlags.pingOnly, "ping-only", 0, "halt update and just send ping requests this many times.")
cmdInstanceDeis.Flags.Var(&instanceFlags.appId, os.Getenv("DEISCTL_APP_ID"), "Application ID to update.")
instanceFlags.appId.required = true
cmdInstanceDeis.Flags.Var(&instanceFlags.groupId, os.Getenv("DEISCTL_GROUP_ID"), "Group ID to update.")
instanceFlags.groupId.required = true
cmdInstanceDeis.Flags.StringVar(&instanceFlags.version, "version", os.Getenv("DEISCTL_APP_VERSION"), "Version to report.")
}
func instanceListUpdates(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Clientupdate.List()
call.DateStart(instanceFlags.start)
call.DateEnd(instanceFlags.end)
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.groupId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tClientID\tVersion\tLastSeen\tGroup\tStatus\tOEM")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", cl.AppId,
cl.ClientId, cl.Version, cl.LastSeen, cl.GroupId,
cl.Status, cl.Oem)
}
out.Flush()
return OK
}
func instanceListAppVersions(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Appversion.List()
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.appId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
if instanceFlags.start != 0 {
call.DateStart(instanceFlags.start)
}
if instanceFlags.end != 0 {
call.DateEnd(instanceFlags.end)
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tGroupID\tVersion\tClients")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%d\n", cl.AppId, cl.GroupId, cl.Version, cl.Count)
}
out.Flush()
return OK
}
//+ downloadDir + "deis.tar.gz"
func expBackoff(interval time.Duration) time.Duration {
interval = interval * 2
if interval > maxInterval {
interval = maxInterval
}
return interval
}
type serverConfig struct {
server string
}
type Client struct {
Id string
SessionId string
Version string
AppId string
Track string
config *serverConfig
errorRate int
pingsRemaining int
}
func (c *Client) Log(format string, v ...interface{}) {
format = c.Id + ": " + format
fmt.Printf(format, v...)
}
func (c *Client) getCodebaseUrl(uc *omaha.UpdateCheck) string {
return uc.Urls.Urls[0].CodeBase
}
func (c *Client) updateservice() {
fmt.Println("starting systemd units")
files, _ := utils.ListFiles(downloadDir + "*.service")
fmt.Println(files)
}
func (c *Client) downloadFromUrl(url, fileName string) (err error) {
url = url + "deis.tar.gz"
fmt.Printf("Downloading %s to %s", url, fileName)
// TODO: check file existence first with io.IsExist
output, err := os.Create(downloadDir + fileName)
if err != nil {
fmt.Println("Error while creating", fileName, "-", err)
return
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
fmt.Println(n, "bytes downloaded.")
return
}
func (c *Client) | (otype, result string, updateCheck, isPing bool) *omaha.Request {
req := omaha.NewRequest("lsb", "CoreOS", "", "")
app := req.AddApp(c.AppId, c.Version)
app.MachineID = c.Id
app.BootId = c.SessionId
app.Track = c.Track
app.OEM = instanceFlags.OEM
if updateCheck {
app.AddUpdateCheck()
}
if isPing {
app.AddPing()
app.Ping.LastReportDays = "1"
app.Ping.Status = "1"
}
if otype != "" {
event := app.AddEvent()
event.Type = otype
event.Result = result
if result == "0" {
event.ErrorCode = "2000"
} else {
event.ErrorCode = ""
}
}
return req
}
func (c *Client) MakeRequest(otype, result string, updateCheck, isPing bool) (*omaha.Response, error) {
client := &http.Client{}
req := c.OmahaRequest(otype, result, updateCheck, isPing)
raw, err := xml.MarshalIndent(req, "", " ")
if err != nil {
return nil, err
}
resp, err := client.Post(c.config.server+"/v1/update/", "text/xml", bytes.NewReader(raw))
if err != nil {
return nil, err
}
defer resp.Body.Close()
oresp := new(omaha.Response)
err = xml.NewDecoder(resp.Body).Decode(oresp)
if err != nil {
return nil, err
}
if instanceFlags.verbose {
raw, _ := xml.MarshalIndent(req, "", " ")
c.Log("request: %s\n", string(raw))
raw, _ = xml.MarshalIndent(oresp, "", " ")
c.Log("response: %s\n", string(raw))
}
return oresp, nil
}
func (c *Client) SetVersion(resp *omaha.Response) {
// A field can potentially be nil.
defer func() {
if err := recover(); err != nil {
c.Log("%s: error setting version: %v", c.Id, err)
}
}()
uc := resp.Apps[0].UpdateCheck
url := c.getCodebaseUrl(uc)
c.MakeRequest("13", "1", false, false)
c.downloadFromUrl(url, "deis.tar.gz")
utils.Extract(downloadDir+"deis.tar.gz", downloadDir)
c.MakeRequest("14", "1", false, false)
c.updateservice()
fmt.Println("updated done")
c.MakeRequest("3", "1", false, false)
// installed
fmt.Println("updated done")
// simulate reboot lock for a while
for c.pingsRemaining > 0 {
c.MakeRequest("", "", false, true)
c.pingsRemaining--
time.Sleep(1 * time.Second)
}
c.Log("updated from %s to %s\n", c.Version, uc.Manifest.Version)
c.Version = uc.Manifest.Version
_, err := c.MakeRequest("3", "2", false, false) // Send complete with new version.
if err != nil {
log.Println(err)
}
c.SessionId = uuid.New()
}
// Sleep between n and m seconds
func (c *Client) Loop(n, m int) {
interval := initialInterval
for {
randSleep(n, m)
resp, err := c.MakeRequest("3", "2", true, false)
if err != nil {
log.Println(err)
continue
}
uc := resp.Apps[0].UpdateCheck
if uc.Status != "ok" {
c.Log("update check status: %s\n", uc.Status)
} else {
c.SetVersion(resp)
}
}
}
// Sleeps randomly between n and m seconds.
func randSleep(n, m int) {
r := m
if m-n > 0 {
r = rand.Intn(m-n) + n
}
time.Sleep(time.Duration(r) * time.Second)
}
func instanceDeis(args []string, service *update.Service, out *tabwriter.Writer) int {
if instanceFlags.appId.Get() == nil || instanceFlags.groupId.Get() == nil {
return ERROR_USAGE
}
conf := &serverConfig{
server: globalFlags.Server,
}
c := &Client{
Id: fmt.Sprintf("{update-client-"+utils.NewUuid(), i),
SessionId: uuid.New(),
Version: instanceFlags.version,
AppId: instanceFlags.appId.String(),
Track: instanceFlags.groupId.String(),
config: conf,
errorRate: instanceFlags.errorRate,
pingsRemaining: instanceFlags.pingOnly,
}
go c.Loop(instanceFlags.minSleep, instanceFlags.maxSleep)
// run forever
wait := make(chan bool)
<-wait
return OK
}
| OmahaRequest | identifier_name |
instance.go | package updatectl
import (
"bytes"
"code.google.com/p/go-uuid/uuid"
"encoding/xml"
"fmt"
"github.com/coreos/go-omaha/omaha"
update "github.com/coreos/updatectl/client/update/v1"
"github.com/deis/deisctl/utils"
"io"
"log"
"math/rand"
"net/http"
"os"
"text/tabwriter"
"time"
)
const (
initialInterval = time.Second * 10
maxInterval = time.Minute * 7
downloadDir = "/home/core/deis/systemd/"
)
var (
instanceFlags struct {
groupId StringFlag
appId StringFlag
start int64
end int64
verbose bool
clientsPerApp int
minSleep int
maxSleep int
errorRate int
OEM string
pingOnly int
version string
}
cmdInstance = &Command{
Name: "instance",
Usage: "[OPTION]...",
Summary: "Operations to view instances.",
Subcommands: []*Command{
cmdInstanceListUpdates,
cmdInstanceListAppVersions,
cmdInstanceDeis,
},
}
cmdInstanceListUpdates = &Command{
Name: "instance list-updates",
Usage: "[OPTION]...",
Description: "Generates a list of instance updates.",
Run: instanceListUpdates,
}
cmdInstanceListAppVersions = &Command{
Name: "instance list-app-versions",
Usage: "[OPTION]...",
Description: "Generates a list of apps/versions with instance count.",
Run: instanceListAppVersions,
}
cmdInstanceDeis = &Command{
Name: "instance deis",
Usage: "[OPTION]...",
Description: "Simulate single deis to update instances.",
Run: instanceDeis,
}
)
func init() {
cmdInstanceListUpdates.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListUpdates.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceDeis.Flags.BoolVar(&instanceFlags.verbose, "verbose", false, "Print out the request bodies")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.clientsPerApp, "clients-per-app", 1, "Number of fake fents per appid.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.minSleep, "min-sleep", 5, "Minimum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.maxSleep, "max-sleep", 10, "Maximum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.errorRate, "errorrate", 1, "Chance of error (0-100)%.")
cmdInstanceDeis.Flags.StringVar(&instanceFlags.OEM, "oem", "fakeclient", "oem to report")
// simulate reboot lock.
cmdInstanceDeis.Flags.IntVar(&instanceFlags.pingOnly, "ping-only", 0, "halt update and just send ping requests this many times.")
cmdInstanceDeis.Flags.Var(&instanceFlags.appId, os.Getenv("DEISCTL_APP_ID"), "Application ID to update.")
instanceFlags.appId.required = true
cmdInstanceDeis.Flags.Var(&instanceFlags.groupId, os.Getenv("DEISCTL_GROUP_ID"), "Group ID to update.")
instanceFlags.groupId.required = true
cmdInstanceDeis.Flags.StringVar(&instanceFlags.version, "version", os.Getenv("DEISCTL_APP_VERSION"), "Version to report.")
}
func instanceListUpdates(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Clientupdate.List()
call.DateStart(instanceFlags.start)
call.DateEnd(instanceFlags.end)
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.groupId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tClientID\tVersion\tLastSeen\tGroup\tStatus\tOEM")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", cl.AppId,
cl.ClientId, cl.Version, cl.LastSeen, cl.GroupId,
cl.Status, cl.Oem)
}
out.Flush()
return OK
}
func instanceListAppVersions(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Appversion.List()
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.appId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
if instanceFlags.start != 0 {
call.DateStart(instanceFlags.start)
}
if instanceFlags.end != 0 {
call.DateEnd(instanceFlags.end)
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tGroupID\tVersion\tClients")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%d\n", cl.AppId, cl.GroupId, cl.Version, cl.Count)
}
out.Flush()
return OK
}
//+ downloadDir + "deis.tar.gz"
func expBackoff(interval time.Duration) time.Duration {
interval = interval * 2
if interval > maxInterval {
interval = maxInterval
}
return interval
}
type serverConfig struct {
server string
}
type Client struct {
Id string | errorRate int
pingsRemaining int
}
func (c *Client) Log(format string, v ...interface{}) {
format = c.Id + ": " + format
fmt.Printf(format, v...)
}
func (c *Client) getCodebaseUrl(uc *omaha.UpdateCheck) string {
return uc.Urls.Urls[0].CodeBase
}
func (c *Client) updateservice() {
fmt.Println("starting systemd units")
files, _ := utils.ListFiles(downloadDir + "*.service")
fmt.Println(files)
}
func (c *Client) downloadFromUrl(url, fileName string) (err error) {
url = url + "deis.tar.gz"
fmt.Printf("Downloading %s to %s", url, fileName)
// TODO: check file existence first with io.IsExist
output, err := os.Create(downloadDir + fileName)
if err != nil {
fmt.Println("Error while creating", fileName, "-", err)
return
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
fmt.Println(n, "bytes downloaded.")
return
}
func (c *Client) OmahaRequest(otype, result string, updateCheck, isPing bool) *omaha.Request {
req := omaha.NewRequest("lsb", "CoreOS", "", "")
app := req.AddApp(c.AppId, c.Version)
app.MachineID = c.Id
app.BootId = c.SessionId
app.Track = c.Track
app.OEM = instanceFlags.OEM
if updateCheck {
app.AddUpdateCheck()
}
if isPing {
app.AddPing()
app.Ping.LastReportDays = "1"
app.Ping.Status = "1"
}
if otype != "" {
event := app.AddEvent()
event.Type = otype
event.Result = result
if result == "0" {
event.ErrorCode = "2000"
} else {
event.ErrorCode = ""
}
}
return req
}
func (c *Client) MakeRequest(otype, result string, updateCheck, isPing bool) (*omaha.Response, error) {
client := &http.Client{}
req := c.OmahaRequest(otype, result, updateCheck, isPing)
raw, err := xml.MarshalIndent(req, "", " ")
if err != nil {
return nil, err
}
resp, err := client.Post(c.config.server+"/v1/update/", "text/xml", bytes.NewReader(raw))
if err != nil {
return nil, err
}
defer resp.Body.Close()
oresp := new(omaha.Response)
err = xml.NewDecoder(resp.Body).Decode(oresp)
if err != nil {
return nil, err
}
if instanceFlags.verbose {
raw, _ := xml.MarshalIndent(req, "", " ")
c.Log("request: %s\n", string(raw))
raw, _ = xml.MarshalIndent(oresp, "", " ")
c.Log("response: %s\n", string(raw))
}
return oresp, nil
}
func (c *Client) SetVersion(resp *omaha.Response) {
// A field can potentially be nil.
defer func() {
if err := recover(); err != nil {
c.Log("%s: error setting version: %v", c.Id, err)
}
}()
uc := resp.Apps[0].UpdateCheck
url := c.getCodebaseUrl(uc)
c.MakeRequest("13", "1", false, false)
c.downloadFromUrl(url, "deis.tar.gz")
utils.Extract(downloadDir+"deis.tar.gz", downloadDir)
c.MakeRequest("14", "1", false, false)
c.updateservice()
fmt.Println("updated done")
c.MakeRequest("3", "1", false, false)
// installed
fmt.Println("updated done")
// simulate reboot lock for a while
for c.pingsRemaining > 0 {
c.MakeRequest("", "", false, true)
c.pingsRemaining--
time.Sleep(1 * time.Second)
}
c.Log("updated from %s to %s\n", c.Version, uc.Manifest.Version)
c.Version = uc.Manifest.Version
_, err := c.MakeRequest("3", "2", false, false) // Send complete with new version.
if err != nil {
log.Println(err)
}
c.SessionId = uuid.New()
}
// Sleep between n and m seconds
func (c *Client) Loop(n, m int) {
interval := initialInterval
for {
randSleep(n, m)
resp, err := c.MakeRequest("3", "2", true, false)
if err != nil {
log.Println(err)
continue
}
uc := resp.Apps[0].UpdateCheck
if uc.Status != "ok" {
c.Log("update check status: %s\n", uc.Status)
} else {
c.SetVersion(resp)
}
}
}
// Sleeps randomly between n and m seconds.
func randSleep(n, m int) {
r := m
if m-n > 0 {
r = rand.Intn(m-n) + n
}
time.Sleep(time.Duration(r) * time.Second)
}
func instanceDeis(args []string, service *update.Service, out *tabwriter.Writer) int {
if instanceFlags.appId.Get() == nil || instanceFlags.groupId.Get() == nil {
return ERROR_USAGE
}
conf := &serverConfig{
server: globalFlags.Server,
}
c := &Client{
Id: fmt.Sprintf("{update-client-"+utils.NewUuid(), i),
SessionId: uuid.New(),
Version: instanceFlags.version,
AppId: instanceFlags.appId.String(),
Track: instanceFlags.groupId.String(),
config: conf,
errorRate: instanceFlags.errorRate,
pingsRemaining: instanceFlags.pingOnly,
}
go c.Loop(instanceFlags.minSleep, instanceFlags.maxSleep)
// run forever
wait := make(chan bool)
<-wait
return OK
} | SessionId string
Version string
AppId string
Track string
config *serverConfig | random_line_split |
peer.rs | use crate::messages::{Message, MessageHeader, Ping, Version, NODE_BITCOIN_CASH, NODE_NETWORK};
use crate::network::Network;
use crate::peer::atomic_reader::AtomicReader;
use crate::util::rx::{Observable, Observer, Single, Subject};
use crate::util::{secs_since, Error, Result};
use snowflake::ProcessUniqueId;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::io;
use std::io::Write;
use std::net::{IpAddr, Shutdown, SocketAddr, TcpStream};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, Weak};
use std::thread;
use std::time::{Duration, UNIX_EPOCH};
/// Time to wait for the initial TCP connection
const CONNECT_TIMEOUT: Duration = Duration::from_secs(5);
/// Time to wait for handshake messages before failing to connect
const HANDSHAKE_READ_TIMEOUT: Duration = Duration::from_secs(3);
/// Event emitted when a connection is established with the peer
#[derive(Clone, Debug)]
pub struct PeerConnected {
pub peer: Arc<Peer>,
}
/// Event emitted when the connection with the peer is terminated
#[derive(Clone, Debug)]
pub struct PeerDisconnected {
pub peer: Arc<Peer>,
}
/// Event emitted when the peer receives a network message
#[derive(Clone, Debug)]
pub struct PeerMessage {
pub peer: Arc<Peer>,
pub message: Message,
}
/// Filters peers based on their version information before connecting
pub trait PeerFilter: Send + Sync {
fn connectable(&self, _: &Version) -> bool;
}
/// Filters out all peers except for Bitcoin SV full nodes
#[derive(Clone, Default, Debug)]
pub struct SVPeerFilter {
pub min_start_height: i32,
}
impl SVPeerFilter {
/// Creates a new SV filter that requires a minimum starting chain height
pub fn new(min_start_height: i32) -> Arc<SVPeerFilter> {
Arc::new(SVPeerFilter { min_start_height })
}
}
impl PeerFilter for SVPeerFilter {
fn connectable(&self, version: &Version) -> bool {
version.user_agent.contains("Bitcoin SV")
&& version.start_height >= self.min_start_height
&& version.services & (NODE_BITCOIN_CASH | NODE_NETWORK) != 0
}
}
/// Node on the network to send and receive messages
///
/// It will setup a connection, respond to pings, and store basic properties about the connection,
/// but any real logic to process messages will be handled outside. Network messages received will
/// be published to an observable on the peer's receiver thread. Messages may be sent via send()
/// from any thread. Once shutdown, the Peer may no longer be used.
pub struct Peer {
/// Unique id for this connection
pub id: ProcessUniqueId,
/// IP address
pub ip: IpAddr,
/// Port
pub port: u16,
/// Network
pub network: Network,
pub(crate) connected_event: Single<PeerConnected>,
pub(crate) disconnected_event: Single<PeerDisconnected>,
pub(crate) messages: Subject<PeerMessage>,
tcp_writer: Mutex<Option<TcpStream>>,
connected: AtomicBool,
time_delta: Mutex<i64>,
minfee: Mutex<u64>,
sendheaders: AtomicBool,
sendcmpct: AtomicBool,
version: Mutex<Option<Version>>,
/// Weak reference to self so we can pass ourselves in emitted events. This is a
/// bit ugly, but we hopefully can able to remove it once arbitrary self types goes in.
weak_self: Mutex<Option<Weak<Peer>>>,
}
impl Peer {
/// Creates a new peer and begins connecting
pub fn connect(
ip: IpAddr,
port: u16,
network: Network,
version: Version,
filter: Arc<dyn PeerFilter>,
) -> Arc<Peer> {
let peer = Arc::new(Peer {
id: ProcessUniqueId::new(),
ip,
port,
network,
connected_event: Single::new(),
disconnected_event: Single::new(),
messages: Subject::new(),
tcp_writer: Mutex::new(None),
connected: AtomicBool::new(false),
time_delta: Mutex::new(0),
minfee: Mutex::new(0),
sendheaders: AtomicBool::new(false),
sendcmpct: AtomicBool::new(false),
version: Mutex::new(None),
weak_self: Mutex::new(None),
});
*peer.weak_self.lock().unwrap() = Some(Arc::downgrade(&peer));
Peer::connect_internal(&peer, version, filter);
peer
}
/// Sends a message to the peer
pub fn send(&self, message: &Message) -> Result<()> {
if !self.connected.load(Ordering::Relaxed) {
return Err(Error::IllegalState("Not connected".to_string()));
}
let mut io_error: Option<io::Error> = None;
{
let mut tcp_writer = self.tcp_writer.lock().unwrap();
let mut tcp_writer = match tcp_writer.as_mut() {
Some(tcp_writer) => tcp_writer,
None => return Err(Error::IllegalState("No tcp stream".to_string())),
};
debug!("{:?} Write {:#?}", self, message);
if let Err(e) = message.write(&mut tcp_writer, self.network.magic()) {
io_error = Some(e);
} else {
if let Err(e) = tcp_writer.flush() {
io_error = Some(e);
}
}
}
match io_error {
Some(e) => {
self.disconnect();
Err(Error::IOError(e))
}
None => Ok(()),
}
}
/// Disconects and disables the peer
pub fn disconnect(&self) {
self.connected.swap(false, Ordering::Relaxed);
info!("{:?} Disconnecting", self);
let mut tcp_stream = self.tcp_writer.lock().unwrap();
if let Some(tcp_stream) = tcp_stream.as_mut() {
if let Err(e) = tcp_stream.shutdown(Shutdown::Both) {
warn!("{:?} Problem shutting down tcp stream: {:?}", self, e);
}
}
if let Some(peer) = self.strong_self() {
self.disconnected_event.next(&PeerDisconnected { peer });
}
}
/// Returns a Single that emits a message when connected
pub fn connected_event(&self) -> &impl Observable<PeerConnected> {
&self.connected_event
}
/// Returns a Single that emits a message when connected
pub fn disconnected_event(&self) -> &impl Observable<PeerDisconnected> {
&self.disconnected_event
}
/// Returns an Observable that emits network messages
pub fn messages(&self) -> &impl Observable<PeerMessage> {
&self.messages
}
/// Returns whether the peer is connected
pub fn connected(&self) -> bool {
self.connected.load(Ordering::Relaxed)
}
/// Returns the time difference in seconds between our time and theirs, which is valid after connecting
pub fn time_delta(&self) -> i64 {
*self.time_delta.lock().unwrap()
}
/// Returns the minimum fee this peer accepts in sats/1000bytes
pub fn minfee(&self) -> u64 {
*self.minfee.lock().unwrap()
}
/// Returns whether this peer may announce new blocks with headers instead of inv
pub fn sendheaders(&self) -> bool {
self.sendheaders.load(Ordering::Relaxed)
}
/// Returns whether compact blocks are supported
pub fn sendcmpct(&self) -> bool {
self.sendcmpct.load(Ordering::Relaxed)
}
/// Gets the version message received during the handshake
pub fn version(&self) -> Result<Version> {
match &*self.version.lock().unwrap() {
Some(ref version) => Ok(version.clone()),
None => Err(Error::IllegalState("Not connected".to_string())),
}
}
fn connect_internal(peer: &Arc<Peer>, version: Version, filter: Arc<dyn PeerFilter>) {
info!("{:?} Connecting to {:?}:{}", peer, peer.ip, peer.port);
let tpeer = peer.clone();
thread::spawn(move || {
let mut tcp_reader = match tpeer.handshake(version, filter) {
Ok(tcp_stream) => tcp_stream,
Err(e) => {
error!("Failed to complete handshake: {:?}", e);
tpeer.disconnect();
return;
}
};
// The peer is considered connected and may be written to now
info!("{:?} Connected to {:?}:{}", tpeer, tpeer.ip, tpeer.port);
tpeer.connected.store(true, Ordering::Relaxed);
tpeer.connected_event.next(&PeerConnected {
peer: tpeer.clone(),
});
let mut partial: Option<MessageHeader> = None;
let magic = tpeer.network.magic();
// Message reads over TCP must be all-or-nothing.
let mut tcp_reader = AtomicReader::new(&mut tcp_reader);
loop {
let message = match &partial {
Some(header) => Message::read_partial(&mut tcp_reader, header),
None => Message::read(&mut tcp_reader, magic),
};
// Always check the connected flag right after the blocking read so we exit right away,
// and also so that we don't mistake errors with the stream shutting down
if !tpeer.connected.load(Ordering::Relaxed) {
return;
}
match message {
Ok(message) => {
if let Message::Partial(header) = message {
partial = Some(header);
} else {
debug!("{:?} Read {:#?}", tpeer, message);
partial = None;
if let Err(e) = tpeer.handle_message(&message) {
error!("{:?} Error handling message: {:?}", tpeer, e);
tpeer.disconnect();
return;
}
tpeer.messages.next(&PeerMessage {
peer: tpeer.clone(),
message,
});
}
}
Err(e) => {
// If timeout, try again later. Otherwise, shutdown
if let Error::IOError(ref e) = e {
// Depending on platform, either TimedOut or WouldBlock may be returned to indicate a non-error timeout
if e.kind() == io::ErrorKind::TimedOut
|| e.kind() == io::ErrorKind::WouldBlock
{
continue;
}
}
error!("{:?} Error reading message {:?}", tpeer, e);
tpeer.disconnect();
return;
}
}
}
});
}
fn handshake(self: &Peer, version: Version, filter: Arc<dyn PeerFilter>) -> Result<TcpStream> |
fn handle_message(&self, message: &Message) -> Result<()> {
// A subset of messages are handled directly by the peer
match message {
Message::FeeFilter(feefilter) => {
*self.minfee.lock().unwrap() = feefilter.minfee;
}
Message::Ping(ping) => {
let pong = Message::Pong(ping.clone());
self.send(&pong)?;
}
Message::SendHeaders => {
self.sendheaders.store(true, Ordering::Relaxed);
}
Message::SendCmpct(sendcmpct) => {
let enable = sendcmpct.use_cmpctblock();
self.sendcmpct.store(enable, Ordering::Relaxed);
}
_ => {}
}
Ok(())
}
fn strong_self(&self) -> Option<Arc<Peer>> {
match &*self.weak_self.lock().unwrap() {
Some(ref weak_peer) => weak_peer.upgrade(),
None => None,
}
}
}
impl PartialEq for Peer {
fn eq(&self, other: &Peer) -> bool {
self.id == other.id
}
}
impl Eq for Peer {}
impl Hash for Peer {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
impl fmt::Debug for Peer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&format!("[Peer {}]", self.id))
}
}
impl Drop for Peer {
fn drop(&mut self) {
self.disconnect();
}
}
| {
// Connect over TCP
let tcp_addr = SocketAddr::new(self.ip, self.port);
let mut tcp_stream = TcpStream::connect_timeout(&tcp_addr, CONNECT_TIMEOUT)?;
tcp_stream.set_nodelay(true)?; // Disable buffering
tcp_stream.set_read_timeout(Some(HANDSHAKE_READ_TIMEOUT))?;
tcp_stream.set_nonblocking(false)?;
// Write our version
let our_version = Message::Version(version);
debug!("{:?} Write {:#?}", self, our_version);
let magic = self.network.magic();
our_version.write(&mut tcp_stream, magic)?;
// Read their version
let msg = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, msg);
let their_version = match msg {
Message::Version(version) => version,
_ => return Err(Error::BadData("Unexpected command".to_string())),
};
if !filter.connectable(&their_version) {
return Err(Error::IllegalState("Peer filtered out".to_string()));
}
let now = secs_since(UNIX_EPOCH) as i64;
*self.time_delta.lock().unwrap() = now - their_version.timestamp;
*self.version.lock().unwrap() = Some(their_version);
// Read their verack
let their_verack = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, their_verack);
match their_verack {
Message::Verack => {}
_ => return Err(Error::BadData("Unexpected command".to_string())),
};
// Write our verack
debug!("{:?} Write {:#?}", self, Message::Verack);
Message::Verack.write(&mut tcp_stream, magic)?;
// Write a ping message because this seems to help with connection weirdness
// https://bitcoin.stackexchange.com/questions/49487/getaddr-not-returning-connected-node-addresses
let ping = Message::Ping(Ping {
nonce: secs_since(UNIX_EPOCH) as u64,
});
debug!("{:?} Write {:#?}", self, ping);
ping.write(&mut tcp_stream, magic)?;
// After handshake, clone TCP stream and save the write version
*self.tcp_writer.lock().unwrap() = Some(tcp_stream.try_clone()?);
// We don't need a timeout for the read. The peer will shutdown just fine.
// The read timeout doesn't work reliably across platforms anyway.
tcp_stream.set_read_timeout(None)?;
Ok(tcp_stream)
} | identifier_body |
peer.rs | use crate::messages::{Message, MessageHeader, Ping, Version, NODE_BITCOIN_CASH, NODE_NETWORK};
use crate::network::Network;
use crate::peer::atomic_reader::AtomicReader;
use crate::util::rx::{Observable, Observer, Single, Subject};
use crate::util::{secs_since, Error, Result};
use snowflake::ProcessUniqueId;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::io;
use std::io::Write;
use std::net::{IpAddr, Shutdown, SocketAddr, TcpStream};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, Weak};
use std::thread;
use std::time::{Duration, UNIX_EPOCH};
/// Time to wait for the initial TCP connection
const CONNECT_TIMEOUT: Duration = Duration::from_secs(5);
/// Time to wait for handshake messages before failing to connect
const HANDSHAKE_READ_TIMEOUT: Duration = Duration::from_secs(3);
/// Event emitted when a connection is established with the peer
#[derive(Clone, Debug)]
pub struct PeerConnected {
pub peer: Arc<Peer>,
}
/// Event emitted when the connection with the peer is terminated
#[derive(Clone, Debug)]
pub struct PeerDisconnected {
pub peer: Arc<Peer>,
}
/// Event emitted when the peer receives a network message
#[derive(Clone, Debug)]
pub struct PeerMessage {
pub peer: Arc<Peer>,
pub message: Message,
}
/// Filters peers based on their version information before connecting
pub trait PeerFilter: Send + Sync {
fn connectable(&self, _: &Version) -> bool;
}
/// Filters out all peers except for Bitcoin SV full nodes
#[derive(Clone, Default, Debug)]
pub struct SVPeerFilter {
pub min_start_height: i32,
}
impl SVPeerFilter {
/// Creates a new SV filter that requires a minimum starting chain height
pub fn new(min_start_height: i32) -> Arc<SVPeerFilter> {
Arc::new(SVPeerFilter { min_start_height })
}
}
impl PeerFilter for SVPeerFilter {
fn connectable(&self, version: &Version) -> bool {
version.user_agent.contains("Bitcoin SV")
&& version.start_height >= self.min_start_height
&& version.services & (NODE_BITCOIN_CASH | NODE_NETWORK) != 0
}
}
/// Node on the network to send and receive messages
///
/// It will setup a connection, respond to pings, and store basic properties about the connection,
/// but any real logic to process messages will be handled outside. Network messages received will
/// be published to an observable on the peer's receiver thread. Messages may be sent via send()
/// from any thread. Once shutdown, the Peer may no longer be used.
pub struct Peer {
/// Unique id for this connection
pub id: ProcessUniqueId,
/// IP address
pub ip: IpAddr,
/// Port
pub port: u16,
/// Network
pub network: Network,
pub(crate) connected_event: Single<PeerConnected>,
pub(crate) disconnected_event: Single<PeerDisconnected>,
pub(crate) messages: Subject<PeerMessage>,
tcp_writer: Mutex<Option<TcpStream>>,
connected: AtomicBool,
time_delta: Mutex<i64>,
minfee: Mutex<u64>,
sendheaders: AtomicBool,
sendcmpct: AtomicBool,
version: Mutex<Option<Version>>,
/// Weak reference to self so we can pass ourselves in emitted events. This is a
/// bit ugly, but we hopefully can able to remove it once arbitrary self types goes in.
weak_self: Mutex<Option<Weak<Peer>>>,
}
impl Peer {
/// Creates a new peer and begins connecting
pub fn connect(
ip: IpAddr,
port: u16,
network: Network,
version: Version,
filter: Arc<dyn PeerFilter>,
) -> Arc<Peer> {
let peer = Arc::new(Peer {
id: ProcessUniqueId::new(),
ip,
port,
network,
connected_event: Single::new(),
disconnected_event: Single::new(),
messages: Subject::new(),
tcp_writer: Mutex::new(None),
connected: AtomicBool::new(false),
time_delta: Mutex::new(0),
minfee: Mutex::new(0),
sendheaders: AtomicBool::new(false),
sendcmpct: AtomicBool::new(false),
version: Mutex::new(None),
weak_self: Mutex::new(None),
});
*peer.weak_self.lock().unwrap() = Some(Arc::downgrade(&peer));
Peer::connect_internal(&peer, version, filter);
peer
}
/// Sends a message to the peer
pub fn send(&self, message: &Message) -> Result<()> {
if !self.connected.load(Ordering::Relaxed) {
return Err(Error::IllegalState("Not connected".to_string()));
}
let mut io_error: Option<io::Error> = None;
{
let mut tcp_writer = self.tcp_writer.lock().unwrap();
let mut tcp_writer = match tcp_writer.as_mut() {
Some(tcp_writer) => tcp_writer,
None => return Err(Error::IllegalState("No tcp stream".to_string())),
};
debug!("{:?} Write {:#?}", self, message);
if let Err(e) = message.write(&mut tcp_writer, self.network.magic()) {
io_error = Some(e);
} else {
if let Err(e) = tcp_writer.flush() {
io_error = Some(e);
}
}
}
match io_error {
Some(e) => {
self.disconnect();
Err(Error::IOError(e))
}
None => Ok(()),
}
}
/// Disconects and disables the peer
pub fn disconnect(&self) {
self.connected.swap(false, Ordering::Relaxed);
info!("{:?} Disconnecting", self);
let mut tcp_stream = self.tcp_writer.lock().unwrap();
if let Some(tcp_stream) = tcp_stream.as_mut() {
if let Err(e) = tcp_stream.shutdown(Shutdown::Both) {
warn!("{:?} Problem shutting down tcp stream: {:?}", self, e);
}
}
if let Some(peer) = self.strong_self() {
self.disconnected_event.next(&PeerDisconnected { peer });
}
}
/// Returns a Single that emits a message when connected
pub fn connected_event(&self) -> &impl Observable<PeerConnected> {
&self.connected_event
}
/// Returns a Single that emits a message when connected
pub fn disconnected_event(&self) -> &impl Observable<PeerDisconnected> {
&self.disconnected_event
}
/// Returns an Observable that emits network messages
pub fn messages(&self) -> &impl Observable<PeerMessage> {
&self.messages
}
/// Returns whether the peer is connected
pub fn connected(&self) -> bool {
self.connected.load(Ordering::Relaxed)
}
/// Returns the time difference in seconds between our time and theirs, which is valid after connecting
pub fn time_delta(&self) -> i64 {
*self.time_delta.lock().unwrap()
}
/// Returns the minimum fee this peer accepts in sats/1000bytes
pub fn minfee(&self) -> u64 {
*self.minfee.lock().unwrap()
}
/// Returns whether this peer may announce new blocks with headers instead of inv
pub fn sendheaders(&self) -> bool {
self.sendheaders.load(Ordering::Relaxed)
}
/// Returns whether compact blocks are supported
pub fn sendcmpct(&self) -> bool {
self.sendcmpct.load(Ordering::Relaxed)
}
/// Gets the version message received during the handshake
pub fn version(&self) -> Result<Version> {
match &*self.version.lock().unwrap() {
Some(ref version) => Ok(version.clone()),
None => Err(Error::IllegalState("Not connected".to_string())),
}
}
fn connect_internal(peer: &Arc<Peer>, version: Version, filter: Arc<dyn PeerFilter>) {
info!("{:?} Connecting to {:?}:{}", peer, peer.ip, peer.port);
let tpeer = peer.clone();
thread::spawn(move || {
let mut tcp_reader = match tpeer.handshake(version, filter) {
Ok(tcp_stream) => tcp_stream,
Err(e) => {
error!("Failed to complete handshake: {:?}", e);
tpeer.disconnect();
return;
}
};
// The peer is considered connected and may be written to now
info!("{:?} Connected to {:?}:{}", tpeer, tpeer.ip, tpeer.port);
tpeer.connected.store(true, Ordering::Relaxed);
tpeer.connected_event.next(&PeerConnected {
peer: tpeer.clone(),
});
let mut partial: Option<MessageHeader> = None;
let magic = tpeer.network.magic();
// Message reads over TCP must be all-or-nothing.
let mut tcp_reader = AtomicReader::new(&mut tcp_reader);
loop {
let message = match &partial {
Some(header) => Message::read_partial(&mut tcp_reader, header), | // Always check the connected flag right after the blocking read so we exit right away,
// and also so that we don't mistake errors with the stream shutting down
if !tpeer.connected.load(Ordering::Relaxed) {
return;
}
match message {
Ok(message) => {
if let Message::Partial(header) = message {
partial = Some(header);
} else {
debug!("{:?} Read {:#?}", tpeer, message);
partial = None;
if let Err(e) = tpeer.handle_message(&message) {
error!("{:?} Error handling message: {:?}", tpeer, e);
tpeer.disconnect();
return;
}
tpeer.messages.next(&PeerMessage {
peer: tpeer.clone(),
message,
});
}
}
Err(e) => {
// If timeout, try again later. Otherwise, shutdown
if let Error::IOError(ref e) = e {
// Depending on platform, either TimedOut or WouldBlock may be returned to indicate a non-error timeout
if e.kind() == io::ErrorKind::TimedOut
|| e.kind() == io::ErrorKind::WouldBlock
{
continue;
}
}
error!("{:?} Error reading message {:?}", tpeer, e);
tpeer.disconnect();
return;
}
}
}
});
}
fn handshake(self: &Peer, version: Version, filter: Arc<dyn PeerFilter>) -> Result<TcpStream> {
// Connect over TCP
let tcp_addr = SocketAddr::new(self.ip, self.port);
let mut tcp_stream = TcpStream::connect_timeout(&tcp_addr, CONNECT_TIMEOUT)?;
tcp_stream.set_nodelay(true)?; // Disable buffering
tcp_stream.set_read_timeout(Some(HANDSHAKE_READ_TIMEOUT))?;
tcp_stream.set_nonblocking(false)?;
// Write our version
let our_version = Message::Version(version);
debug!("{:?} Write {:#?}", self, our_version);
let magic = self.network.magic();
our_version.write(&mut tcp_stream, magic)?;
// Read their version
let msg = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, msg);
let their_version = match msg {
Message::Version(version) => version,
_ => return Err(Error::BadData("Unexpected command".to_string())),
};
if !filter.connectable(&their_version) {
return Err(Error::IllegalState("Peer filtered out".to_string()));
}
let now = secs_since(UNIX_EPOCH) as i64;
*self.time_delta.lock().unwrap() = now - their_version.timestamp;
*self.version.lock().unwrap() = Some(their_version);
// Read their verack
let their_verack = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, their_verack);
match their_verack {
Message::Verack => {}
_ => return Err(Error::BadData("Unexpected command".to_string())),
};
// Write our verack
debug!("{:?} Write {:#?}", self, Message::Verack);
Message::Verack.write(&mut tcp_stream, magic)?;
// Write a ping message because this seems to help with connection weirdness
// https://bitcoin.stackexchange.com/questions/49487/getaddr-not-returning-connected-node-addresses
let ping = Message::Ping(Ping {
nonce: secs_since(UNIX_EPOCH) as u64,
});
debug!("{:?} Write {:#?}", self, ping);
ping.write(&mut tcp_stream, magic)?;
// After handshake, clone TCP stream and save the write version
*self.tcp_writer.lock().unwrap() = Some(tcp_stream.try_clone()?);
// We don't need a timeout for the read. The peer will shutdown just fine.
// The read timeout doesn't work reliably across platforms anyway.
tcp_stream.set_read_timeout(None)?;
Ok(tcp_stream)
}
fn handle_message(&self, message: &Message) -> Result<()> {
// A subset of messages are handled directly by the peer
match message {
Message::FeeFilter(feefilter) => {
*self.minfee.lock().unwrap() = feefilter.minfee;
}
Message::Ping(ping) => {
let pong = Message::Pong(ping.clone());
self.send(&pong)?;
}
Message::SendHeaders => {
self.sendheaders.store(true, Ordering::Relaxed);
}
Message::SendCmpct(sendcmpct) => {
let enable = sendcmpct.use_cmpctblock();
self.sendcmpct.store(enable, Ordering::Relaxed);
}
_ => {}
}
Ok(())
}
fn strong_self(&self) -> Option<Arc<Peer>> {
match &*self.weak_self.lock().unwrap() {
Some(ref weak_peer) => weak_peer.upgrade(),
None => None,
}
}
}
impl PartialEq for Peer {
fn eq(&self, other: &Peer) -> bool {
self.id == other.id
}
}
impl Eq for Peer {}
impl Hash for Peer {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
impl fmt::Debug for Peer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&format!("[Peer {}]", self.id))
}
}
impl Drop for Peer {
fn drop(&mut self) {
self.disconnect();
}
} | None => Message::read(&mut tcp_reader, magic),
};
| random_line_split |
peer.rs | use crate::messages::{Message, MessageHeader, Ping, Version, NODE_BITCOIN_CASH, NODE_NETWORK};
use crate::network::Network;
use crate::peer::atomic_reader::AtomicReader;
use crate::util::rx::{Observable, Observer, Single, Subject};
use crate::util::{secs_since, Error, Result};
use snowflake::ProcessUniqueId;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::io;
use std::io::Write;
use std::net::{IpAddr, Shutdown, SocketAddr, TcpStream};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, Weak};
use std::thread;
use std::time::{Duration, UNIX_EPOCH};
/// Time to wait for the initial TCP connection
const CONNECT_TIMEOUT: Duration = Duration::from_secs(5);
/// Time to wait for handshake messages before failing to connect
const HANDSHAKE_READ_TIMEOUT: Duration = Duration::from_secs(3);
/// Event emitted when a connection is established with the peer
#[derive(Clone, Debug)]
pub struct PeerConnected {
pub peer: Arc<Peer>,
}
/// Event emitted when the connection with the peer is terminated
#[derive(Clone, Debug)]
pub struct PeerDisconnected {
pub peer: Arc<Peer>,
}
/// Event emitted when the peer receives a network message
#[derive(Clone, Debug)]
pub struct PeerMessage {
pub peer: Arc<Peer>,
pub message: Message,
}
/// Filters peers based on their version information before connecting
pub trait PeerFilter: Send + Sync {
fn connectable(&self, _: &Version) -> bool;
}
/// Filters out all peers except for Bitcoin SV full nodes
#[derive(Clone, Default, Debug)]
pub struct SVPeerFilter {
pub min_start_height: i32,
}
impl SVPeerFilter {
/// Creates a new SV filter that requires a minimum starting chain height
pub fn new(min_start_height: i32) -> Arc<SVPeerFilter> {
Arc::new(SVPeerFilter { min_start_height })
}
}
impl PeerFilter for SVPeerFilter {
fn connectable(&self, version: &Version) -> bool {
version.user_agent.contains("Bitcoin SV")
&& version.start_height >= self.min_start_height
&& version.services & (NODE_BITCOIN_CASH | NODE_NETWORK) != 0
}
}
/// Node on the network to send and receive messages
///
/// It will setup a connection, respond to pings, and store basic properties about the connection,
/// but any real logic to process messages will be handled outside. Network messages received will
/// be published to an observable on the peer's receiver thread. Messages may be sent via send()
/// from any thread. Once shutdown, the Peer may no longer be used.
pub struct Peer {
/// Unique id for this connection
pub id: ProcessUniqueId,
/// IP address
pub ip: IpAddr,
/// Port
pub port: u16,
/// Network
pub network: Network,
pub(crate) connected_event: Single<PeerConnected>,
pub(crate) disconnected_event: Single<PeerDisconnected>,
pub(crate) messages: Subject<PeerMessage>,
tcp_writer: Mutex<Option<TcpStream>>,
connected: AtomicBool,
time_delta: Mutex<i64>,
minfee: Mutex<u64>,
sendheaders: AtomicBool,
sendcmpct: AtomicBool,
version: Mutex<Option<Version>>,
/// Weak reference to self so we can pass ourselves in emitted events. This is a
/// bit ugly, but we hopefully can able to remove it once arbitrary self types goes in.
weak_self: Mutex<Option<Weak<Peer>>>,
}
impl Peer {
/// Creates a new peer and begins connecting
pub fn connect(
ip: IpAddr,
port: u16,
network: Network,
version: Version,
filter: Arc<dyn PeerFilter>,
) -> Arc<Peer> {
let peer = Arc::new(Peer {
id: ProcessUniqueId::new(),
ip,
port,
network,
connected_event: Single::new(),
disconnected_event: Single::new(),
messages: Subject::new(),
tcp_writer: Mutex::new(None),
connected: AtomicBool::new(false),
time_delta: Mutex::new(0),
minfee: Mutex::new(0),
sendheaders: AtomicBool::new(false),
sendcmpct: AtomicBool::new(false),
version: Mutex::new(None),
weak_self: Mutex::new(None),
});
*peer.weak_self.lock().unwrap() = Some(Arc::downgrade(&peer));
Peer::connect_internal(&peer, version, filter);
peer
}
/// Sends a message to the peer
pub fn send(&self, message: &Message) -> Result<()> {
if !self.connected.load(Ordering::Relaxed) {
return Err(Error::IllegalState("Not connected".to_string()));
}
let mut io_error: Option<io::Error> = None;
{
let mut tcp_writer = self.tcp_writer.lock().unwrap();
let mut tcp_writer = match tcp_writer.as_mut() {
Some(tcp_writer) => tcp_writer,
None => return Err(Error::IllegalState("No tcp stream".to_string())),
};
debug!("{:?} Write {:#?}", self, message);
if let Err(e) = message.write(&mut tcp_writer, self.network.magic()) {
io_error = Some(e);
} else {
if let Err(e) = tcp_writer.flush() {
io_error = Some(e);
}
}
}
match io_error {
Some(e) => {
self.disconnect();
Err(Error::IOError(e))
}
None => Ok(()),
}
}
/// Disconects and disables the peer
pub fn disconnect(&self) {
self.connected.swap(false, Ordering::Relaxed);
info!("{:?} Disconnecting", self);
let mut tcp_stream = self.tcp_writer.lock().unwrap();
if let Some(tcp_stream) = tcp_stream.as_mut() {
if let Err(e) = tcp_stream.shutdown(Shutdown::Both) {
warn!("{:?} Problem shutting down tcp stream: {:?}", self, e);
}
}
if let Some(peer) = self.strong_self() {
self.disconnected_event.next(&PeerDisconnected { peer });
}
}
/// Returns a Single that emits a message when connected
pub fn connected_event(&self) -> &impl Observable<PeerConnected> {
&self.connected_event
}
/// Returns a Single that emits a message when connected
pub fn disconnected_event(&self) -> &impl Observable<PeerDisconnected> {
&self.disconnected_event
}
/// Returns an Observable that emits network messages
pub fn messages(&self) -> &impl Observable<PeerMessage> {
&self.messages
}
/// Returns whether the peer is connected
pub fn connected(&self) -> bool {
self.connected.load(Ordering::Relaxed)
}
/// Returns the time difference in seconds between our time and theirs, which is valid after connecting
pub fn time_delta(&self) -> i64 {
*self.time_delta.lock().unwrap()
}
/// Returns the minimum fee this peer accepts in sats/1000bytes
pub fn minfee(&self) -> u64 {
*self.minfee.lock().unwrap()
}
/// Returns whether this peer may announce new blocks with headers instead of inv
pub fn sendheaders(&self) -> bool {
self.sendheaders.load(Ordering::Relaxed)
}
/// Returns whether compact blocks are supported
pub fn sendcmpct(&self) -> bool {
self.sendcmpct.load(Ordering::Relaxed)
}
/// Gets the version message received during the handshake
pub fn | (&self) -> Result<Version> {
match &*self.version.lock().unwrap() {
Some(ref version) => Ok(version.clone()),
None => Err(Error::IllegalState("Not connected".to_string())),
}
}
fn connect_internal(peer: &Arc<Peer>, version: Version, filter: Arc<dyn PeerFilter>) {
info!("{:?} Connecting to {:?}:{}", peer, peer.ip, peer.port);
let tpeer = peer.clone();
thread::spawn(move || {
let mut tcp_reader = match tpeer.handshake(version, filter) {
Ok(tcp_stream) => tcp_stream,
Err(e) => {
error!("Failed to complete handshake: {:?}", e);
tpeer.disconnect();
return;
}
};
// The peer is considered connected and may be written to now
info!("{:?} Connected to {:?}:{}", tpeer, tpeer.ip, tpeer.port);
tpeer.connected.store(true, Ordering::Relaxed);
tpeer.connected_event.next(&PeerConnected {
peer: tpeer.clone(),
});
let mut partial: Option<MessageHeader> = None;
let magic = tpeer.network.magic();
// Message reads over TCP must be all-or-nothing.
let mut tcp_reader = AtomicReader::new(&mut tcp_reader);
loop {
let message = match &partial {
Some(header) => Message::read_partial(&mut tcp_reader, header),
None => Message::read(&mut tcp_reader, magic),
};
// Always check the connected flag right after the blocking read so we exit right away,
// and also so that we don't mistake errors with the stream shutting down
if !tpeer.connected.load(Ordering::Relaxed) {
return;
}
match message {
Ok(message) => {
if let Message::Partial(header) = message {
partial = Some(header);
} else {
debug!("{:?} Read {:#?}", tpeer, message);
partial = None;
if let Err(e) = tpeer.handle_message(&message) {
error!("{:?} Error handling message: {:?}", tpeer, e);
tpeer.disconnect();
return;
}
tpeer.messages.next(&PeerMessage {
peer: tpeer.clone(),
message,
});
}
}
Err(e) => {
// If timeout, try again later. Otherwise, shutdown
if let Error::IOError(ref e) = e {
// Depending on platform, either TimedOut or WouldBlock may be returned to indicate a non-error timeout
if e.kind() == io::ErrorKind::TimedOut
|| e.kind() == io::ErrorKind::WouldBlock
{
continue;
}
}
error!("{:?} Error reading message {:?}", tpeer, e);
tpeer.disconnect();
return;
}
}
}
});
}
fn handshake(self: &Peer, version: Version, filter: Arc<dyn PeerFilter>) -> Result<TcpStream> {
// Connect over TCP
let tcp_addr = SocketAddr::new(self.ip, self.port);
let mut tcp_stream = TcpStream::connect_timeout(&tcp_addr, CONNECT_TIMEOUT)?;
tcp_stream.set_nodelay(true)?; // Disable buffering
tcp_stream.set_read_timeout(Some(HANDSHAKE_READ_TIMEOUT))?;
tcp_stream.set_nonblocking(false)?;
// Write our version
let our_version = Message::Version(version);
debug!("{:?} Write {:#?}", self, our_version);
let magic = self.network.magic();
our_version.write(&mut tcp_stream, magic)?;
// Read their version
let msg = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, msg);
let their_version = match msg {
Message::Version(version) => version,
_ => return Err(Error::BadData("Unexpected command".to_string())),
};
if !filter.connectable(&their_version) {
return Err(Error::IllegalState("Peer filtered out".to_string()));
}
let now = secs_since(UNIX_EPOCH) as i64;
*self.time_delta.lock().unwrap() = now - their_version.timestamp;
*self.version.lock().unwrap() = Some(their_version);
// Read their verack
let their_verack = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, their_verack);
match their_verack {
Message::Verack => {}
_ => return Err(Error::BadData("Unexpected command".to_string())),
};
// Write our verack
debug!("{:?} Write {:#?}", self, Message::Verack);
Message::Verack.write(&mut tcp_stream, magic)?;
// Write a ping message because this seems to help with connection weirdness
// https://bitcoin.stackexchange.com/questions/49487/getaddr-not-returning-connected-node-addresses
let ping = Message::Ping(Ping {
nonce: secs_since(UNIX_EPOCH) as u64,
});
debug!("{:?} Write {:#?}", self, ping);
ping.write(&mut tcp_stream, magic)?;
// After handshake, clone TCP stream and save the write version
*self.tcp_writer.lock().unwrap() = Some(tcp_stream.try_clone()?);
// We don't need a timeout for the read. The peer will shutdown just fine.
// The read timeout doesn't work reliably across platforms anyway.
tcp_stream.set_read_timeout(None)?;
Ok(tcp_stream)
}
fn handle_message(&self, message: &Message) -> Result<()> {
// A subset of messages are handled directly by the peer
match message {
Message::FeeFilter(feefilter) => {
*self.minfee.lock().unwrap() = feefilter.minfee;
}
Message::Ping(ping) => {
let pong = Message::Pong(ping.clone());
self.send(&pong)?;
}
Message::SendHeaders => {
self.sendheaders.store(true, Ordering::Relaxed);
}
Message::SendCmpct(sendcmpct) => {
let enable = sendcmpct.use_cmpctblock();
self.sendcmpct.store(enable, Ordering::Relaxed);
}
_ => {}
}
Ok(())
}
fn strong_self(&self) -> Option<Arc<Peer>> {
match &*self.weak_self.lock().unwrap() {
Some(ref weak_peer) => weak_peer.upgrade(),
None => None,
}
}
}
impl PartialEq for Peer {
fn eq(&self, other: &Peer) -> bool {
self.id == other.id
}
}
impl Eq for Peer {}
impl Hash for Peer {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
impl fmt::Debug for Peer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&format!("[Peer {}]", self.id))
}
}
impl Drop for Peer {
fn drop(&mut self) {
self.disconnect();
}
}
| version | identifier_name |
peer.rs | use crate::messages::{Message, MessageHeader, Ping, Version, NODE_BITCOIN_CASH, NODE_NETWORK};
use crate::network::Network;
use crate::peer::atomic_reader::AtomicReader;
use crate::util::rx::{Observable, Observer, Single, Subject};
use crate::util::{secs_since, Error, Result};
use snowflake::ProcessUniqueId;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::io;
use std::io::Write;
use std::net::{IpAddr, Shutdown, SocketAddr, TcpStream};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, Weak};
use std::thread;
use std::time::{Duration, UNIX_EPOCH};
/// Time to wait for the initial TCP connection
const CONNECT_TIMEOUT: Duration = Duration::from_secs(5);
/// Time to wait for handshake messages before failing to connect
const HANDSHAKE_READ_TIMEOUT: Duration = Duration::from_secs(3);
/// Event emitted when a connection is established with the peer
#[derive(Clone, Debug)]
pub struct PeerConnected {
pub peer: Arc<Peer>,
}
/// Event emitted when the connection with the peer is terminated
#[derive(Clone, Debug)]
pub struct PeerDisconnected {
pub peer: Arc<Peer>,
}
/// Event emitted when the peer receives a network message
#[derive(Clone, Debug)]
pub struct PeerMessage {
pub peer: Arc<Peer>,
pub message: Message,
}
/// Filters peers based on their version information before connecting
pub trait PeerFilter: Send + Sync {
fn connectable(&self, _: &Version) -> bool;
}
/// Filters out all peers except for Bitcoin SV full nodes
#[derive(Clone, Default, Debug)]
pub struct SVPeerFilter {
pub min_start_height: i32,
}
impl SVPeerFilter {
/// Creates a new SV filter that requires a minimum starting chain height
pub fn new(min_start_height: i32) -> Arc<SVPeerFilter> {
Arc::new(SVPeerFilter { min_start_height })
}
}
impl PeerFilter for SVPeerFilter {
fn connectable(&self, version: &Version) -> bool {
version.user_agent.contains("Bitcoin SV")
&& version.start_height >= self.min_start_height
&& version.services & (NODE_BITCOIN_CASH | NODE_NETWORK) != 0
}
}
/// Node on the network to send and receive messages
///
/// It will setup a connection, respond to pings, and store basic properties about the connection,
/// but any real logic to process messages will be handled outside. Network messages received will
/// be published to an observable on the peer's receiver thread. Messages may be sent via send()
/// from any thread. Once shutdown, the Peer may no longer be used.
pub struct Peer {
/// Unique id for this connection
pub id: ProcessUniqueId,
/// IP address
pub ip: IpAddr,
/// Port
pub port: u16,
/// Network
pub network: Network,
pub(crate) connected_event: Single<PeerConnected>,
pub(crate) disconnected_event: Single<PeerDisconnected>,
pub(crate) messages: Subject<PeerMessage>,
tcp_writer: Mutex<Option<TcpStream>>,
connected: AtomicBool,
time_delta: Mutex<i64>,
minfee: Mutex<u64>,
sendheaders: AtomicBool,
sendcmpct: AtomicBool,
version: Mutex<Option<Version>>,
/// Weak reference to self so we can pass ourselves in emitted events. This is a
/// bit ugly, but we hopefully can able to remove it once arbitrary self types goes in.
weak_self: Mutex<Option<Weak<Peer>>>,
}
impl Peer {
/// Creates a new peer and begins connecting
pub fn connect(
ip: IpAddr,
port: u16,
network: Network,
version: Version,
filter: Arc<dyn PeerFilter>,
) -> Arc<Peer> {
let peer = Arc::new(Peer {
id: ProcessUniqueId::new(),
ip,
port,
network,
connected_event: Single::new(),
disconnected_event: Single::new(),
messages: Subject::new(),
tcp_writer: Mutex::new(None),
connected: AtomicBool::new(false),
time_delta: Mutex::new(0),
minfee: Mutex::new(0),
sendheaders: AtomicBool::new(false),
sendcmpct: AtomicBool::new(false),
version: Mutex::new(None),
weak_self: Mutex::new(None),
});
*peer.weak_self.lock().unwrap() = Some(Arc::downgrade(&peer));
Peer::connect_internal(&peer, version, filter);
peer
}
/// Sends a message to the peer
pub fn send(&self, message: &Message) -> Result<()> {
if !self.connected.load(Ordering::Relaxed) {
return Err(Error::IllegalState("Not connected".to_string()));
}
let mut io_error: Option<io::Error> = None;
{
let mut tcp_writer = self.tcp_writer.lock().unwrap();
let mut tcp_writer = match tcp_writer.as_mut() {
Some(tcp_writer) => tcp_writer,
None => return Err(Error::IllegalState("No tcp stream".to_string())),
};
debug!("{:?} Write {:#?}", self, message);
if let Err(e) = message.write(&mut tcp_writer, self.network.magic()) {
io_error = Some(e);
} else {
if let Err(e) = tcp_writer.flush() {
io_error = Some(e);
}
}
}
match io_error {
Some(e) => {
self.disconnect();
Err(Error::IOError(e))
}
None => Ok(()),
}
}
/// Disconects and disables the peer
pub fn disconnect(&self) {
self.connected.swap(false, Ordering::Relaxed);
info!("{:?} Disconnecting", self);
let mut tcp_stream = self.tcp_writer.lock().unwrap();
if let Some(tcp_stream) = tcp_stream.as_mut() {
if let Err(e) = tcp_stream.shutdown(Shutdown::Both) {
warn!("{:?} Problem shutting down tcp stream: {:?}", self, e);
}
}
if let Some(peer) = self.strong_self() {
self.disconnected_event.next(&PeerDisconnected { peer });
}
}
/// Returns a Single that emits a message when connected
pub fn connected_event(&self) -> &impl Observable<PeerConnected> {
&self.connected_event
}
/// Returns a Single that emits a message when connected
pub fn disconnected_event(&self) -> &impl Observable<PeerDisconnected> {
&self.disconnected_event
}
/// Returns an Observable that emits network messages
pub fn messages(&self) -> &impl Observable<PeerMessage> {
&self.messages
}
/// Returns whether the peer is connected
pub fn connected(&self) -> bool {
self.connected.load(Ordering::Relaxed)
}
/// Returns the time difference in seconds between our time and theirs, which is valid after connecting
pub fn time_delta(&self) -> i64 {
*self.time_delta.lock().unwrap()
}
/// Returns the minimum fee this peer accepts in sats/1000bytes
pub fn minfee(&self) -> u64 {
*self.minfee.lock().unwrap()
}
/// Returns whether this peer may announce new blocks with headers instead of inv
pub fn sendheaders(&self) -> bool {
self.sendheaders.load(Ordering::Relaxed)
}
/// Returns whether compact blocks are supported
pub fn sendcmpct(&self) -> bool {
self.sendcmpct.load(Ordering::Relaxed)
}
/// Gets the version message received during the handshake
pub fn version(&self) -> Result<Version> {
match &*self.version.lock().unwrap() {
Some(ref version) => Ok(version.clone()),
None => Err(Error::IllegalState("Not connected".to_string())),
}
}
fn connect_internal(peer: &Arc<Peer>, version: Version, filter: Arc<dyn PeerFilter>) {
info!("{:?} Connecting to {:?}:{}", peer, peer.ip, peer.port);
let tpeer = peer.clone();
thread::spawn(move || {
let mut tcp_reader = match tpeer.handshake(version, filter) {
Ok(tcp_stream) => tcp_stream,
Err(e) => {
error!("Failed to complete handshake: {:?}", e);
tpeer.disconnect();
return;
}
};
// The peer is considered connected and may be written to now
info!("{:?} Connected to {:?}:{}", tpeer, tpeer.ip, tpeer.port);
tpeer.connected.store(true, Ordering::Relaxed);
tpeer.connected_event.next(&PeerConnected {
peer: tpeer.clone(),
});
let mut partial: Option<MessageHeader> = None;
let magic = tpeer.network.magic();
// Message reads over TCP must be all-or-nothing.
let mut tcp_reader = AtomicReader::new(&mut tcp_reader);
loop {
let message = match &partial {
Some(header) => Message::read_partial(&mut tcp_reader, header),
None => Message::read(&mut tcp_reader, magic),
};
// Always check the connected flag right after the blocking read so we exit right away,
// and also so that we don't mistake errors with the stream shutting down
if !tpeer.connected.load(Ordering::Relaxed) {
return;
}
match message {
Ok(message) => {
if let Message::Partial(header) = message {
partial = Some(header);
} else {
debug!("{:?} Read {:#?}", tpeer, message);
partial = None;
if let Err(e) = tpeer.handle_message(&message) {
error!("{:?} Error handling message: {:?}", tpeer, e);
tpeer.disconnect();
return;
}
tpeer.messages.next(&PeerMessage {
peer: tpeer.clone(),
message,
});
}
}
Err(e) => {
// If timeout, try again later. Otherwise, shutdown
if let Error::IOError(ref e) = e {
// Depending on platform, either TimedOut or WouldBlock may be returned to indicate a non-error timeout
if e.kind() == io::ErrorKind::TimedOut
|| e.kind() == io::ErrorKind::WouldBlock
{
continue;
}
}
error!("{:?} Error reading message {:?}", tpeer, e);
tpeer.disconnect();
return;
}
}
}
});
}
fn handshake(self: &Peer, version: Version, filter: Arc<dyn PeerFilter>) -> Result<TcpStream> {
// Connect over TCP
let tcp_addr = SocketAddr::new(self.ip, self.port);
let mut tcp_stream = TcpStream::connect_timeout(&tcp_addr, CONNECT_TIMEOUT)?;
tcp_stream.set_nodelay(true)?; // Disable buffering
tcp_stream.set_read_timeout(Some(HANDSHAKE_READ_TIMEOUT))?;
tcp_stream.set_nonblocking(false)?;
// Write our version
let our_version = Message::Version(version);
debug!("{:?} Write {:#?}", self, our_version);
let magic = self.network.magic();
our_version.write(&mut tcp_stream, magic)?;
// Read their version
let msg = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, msg);
let their_version = match msg {
Message::Version(version) => version,
_ => return Err(Error::BadData("Unexpected command".to_string())),
};
if !filter.connectable(&their_version) {
return Err(Error::IllegalState("Peer filtered out".to_string()));
}
let now = secs_since(UNIX_EPOCH) as i64;
*self.time_delta.lock().unwrap() = now - their_version.timestamp;
*self.version.lock().unwrap() = Some(their_version);
// Read their verack
let their_verack = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, their_verack);
match their_verack {
Message::Verack => |
_ => return Err(Error::BadData("Unexpected command".to_string())),
};
// Write our verack
debug!("{:?} Write {:#?}", self, Message::Verack);
Message::Verack.write(&mut tcp_stream, magic)?;
// Write a ping message because this seems to help with connection weirdness
// https://bitcoin.stackexchange.com/questions/49487/getaddr-not-returning-connected-node-addresses
let ping = Message::Ping(Ping {
nonce: secs_since(UNIX_EPOCH) as u64,
});
debug!("{:?} Write {:#?}", self, ping);
ping.write(&mut tcp_stream, magic)?;
// After handshake, clone TCP stream and save the write version
*self.tcp_writer.lock().unwrap() = Some(tcp_stream.try_clone()?);
// We don't need a timeout for the read. The peer will shutdown just fine.
// The read timeout doesn't work reliably across platforms anyway.
tcp_stream.set_read_timeout(None)?;
Ok(tcp_stream)
}
fn handle_message(&self, message: &Message) -> Result<()> {
// A subset of messages are handled directly by the peer
match message {
Message::FeeFilter(feefilter) => {
*self.minfee.lock().unwrap() = feefilter.minfee;
}
Message::Ping(ping) => {
let pong = Message::Pong(ping.clone());
self.send(&pong)?;
}
Message::SendHeaders => {
self.sendheaders.store(true, Ordering::Relaxed);
}
Message::SendCmpct(sendcmpct) => {
let enable = sendcmpct.use_cmpctblock();
self.sendcmpct.store(enable, Ordering::Relaxed);
}
_ => {}
}
Ok(())
}
fn strong_self(&self) -> Option<Arc<Peer>> {
match &*self.weak_self.lock().unwrap() {
Some(ref weak_peer) => weak_peer.upgrade(),
None => None,
}
}
}
impl PartialEq for Peer {
fn eq(&self, other: &Peer) -> bool {
self.id == other.id
}
}
impl Eq for Peer {}
impl Hash for Peer {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
impl fmt::Debug for Peer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&format!("[Peer {}]", self.id))
}
}
impl Drop for Peer {
fn drop(&mut self) {
self.disconnect();
}
}
| {} | conditional_block |
mod.rs | // Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! API entry point.
//!
//! The first thing to do after loading the Vulkan library is to create an `Instance` object.
//!
//! For example:
//!
//! ```no_run
//! use vulkano::{
//! instance::{Instance, InstanceExtensions},
//! Version, VulkanLibrary,
//! };
//!
//! let library = VulkanLibrary::new()
//! .unwrap_or_else(|err| panic!("Couldn't load Vulkan library: {:?}", err));
//! let instance = Instance::new(library, Default::default())
//! .unwrap_or_else(|err| panic!("Couldn't create instance: {:?}", err));
//! ```
//!
//! Creating an instance initializes everything and allows you to enumerate physical devices,
//! ie. all the Vulkan implementations that are available on the system.
//!
//! ```no_run
//! # use vulkano::{
//! # instance::{Instance, InstanceExtensions},
//! # Version, VulkanLibrary,
//! # };
//! use vulkano::device::physical::PhysicalDevice;
//!
//! # let library = VulkanLibrary::new().unwrap();
//! # let instance = Instance::new(library, Default::default()).unwrap();
//! for physical_device in instance.enumerate_physical_devices().unwrap() {
//! println!("Available device: {}", physical_device.properties().device_name);
//! }
//! ```
//!
//! # Enumerating physical devices and creating a device
//!
//! After you have created an instance, the next step is usually to enumerate the physical devices
//! that are available on the system with `Instance::enumerate_physical_devices()` (see above).
//!
//! When choosing which physical device to use, keep in mind that physical devices may or may not
//! be able to draw to a certain surface (ie. to a window or a monitor), or may even not be able
//! to draw at all. See the `swapchain` module for more information about surfaces.
//!
//! Once you have chosen a physical device, you can create a `Device` object from it. See the
//! `device` module for more info.
//!
//! # Portability subset devices and the `enumerate_portability` flag
//!
//! Certain devices, currently those on MacOS and iOS systems, do not fully conform to the Vulkan
//! specification. They are usable as normal devices, but they do not implement everything that
//! is required; some mandatory parts of Vulkan are missing. These are known as
//! "portability subset" devices.
//!
//! A portability subset device will advertise support for the
//! [`khr_portability_subset`](crate::device::DeviceExtensions::khr_portability_subset) device
//! extension. This extension must always be enabled when it is supported, and Vulkano will
//! automatically enable it when creating the device. When it is enabled, some parts of Vulkan that
//! are available in standard Vulkan will not be available by default, but they can be used by
//! enabling corresponding features when creating the device, if the device supports them.
//!
//! Because these devices are non-conformant, Vulkan programs that rely on full compliance may
//! not work (crash or have validation errors) when run on them, if they happen to use a part of
//! Vulkan that is missing from the non-conformant device. Therefore, Vulkan hides them from
//! the user by default when calling `enumerate_physical_devices` on the instance. If there are no
//! conformant devices on the system, `Instance::new` will return an `IncompatibleDriver` error.
//!
//! In order to enumerate portability subset devices, you must set the
//! [`InstanceCreateInfo::enumerate_portability`] flag when creating the instance. However, if you
//! do this, your program must be prepared to handle the non-conformant aspects of these devices,
//! and must enable the appropriate features when creating the `Device` if you intend to use them.
use self::debug::{
DebugUtilsMessengerCreateInfo, UserCallback, ValidationFeatureDisable, ValidationFeatureEnable,
};
pub use self::{extensions::InstanceExtensions, layers::LayerProperties};
use crate::{
device::physical::PhysicalDevice, instance::debug::trampoline, OomError, RequiresOneOf,
VulkanError, VulkanLibrary, VulkanObject,
};
pub use crate::{
extensions::{ExtensionRestriction, ExtensionRestrictionError},
fns::InstanceFunctions,
version::Version,
};
use smallvec::SmallVec;
use std::{
error::Error,
ffi::{c_void, CString},
fmt::{Debug, Display, Error as FmtError, Formatter},
mem::MaybeUninit,
num::NonZeroU64,
panic::{RefUnwindSafe, UnwindSafe},
ptr,
sync::Arc,
};
pub mod debug;
pub(crate) mod extensions;
mod layers;
/// An instance of a Vulkan context. This is the main object that should be created by an
/// application before everything else.
///
/// # Application and engine info
///
/// When you create an instance, you have the possibility to set information about your application
/// and its engine.
///
/// Providing this information allows for example the driver to let the user configure the driver's
/// behavior for your application alone through a control panel.
///
/// ```no_run
/// # #[macro_use] extern crate vulkano;
/// # fn main() {
/// use vulkano::{
/// instance::{Instance, InstanceCreateInfo, InstanceExtensions},
/// Version, VulkanLibrary,
/// };
///
/// let library = VulkanLibrary::new().unwrap();
/// let _instance = Instance::new(
/// library,
/// InstanceCreateInfo::application_from_cargo_toml(),
/// ).unwrap();
/// # }
/// ```
///
/// # API versions
///
/// Both an `Instance` and a [`Device`](crate::device::Device) have a highest version of the Vulkan
/// API that they support. This places a limit on what Vulkan functions and features are available
/// to use when used on a particular instance or device. It is possible for the instance and the
/// device to support different versions. The supported version for an instance can be queried
/// before creation with
/// [`VulkanLibrary::api_version`](crate::VulkanLibrary::api_version),
/// while for a device it can be retrieved with
/// [`PhysicalDevice::api_version`](crate::device::physical::PhysicalDevice::api_version).
///
/// When creating an `Instance`, you have to specify a maximum API version that you will use.
/// This restricts the API version that is available for the instance and any devices created from
/// it. For example, if both instance and device potentially support Vulkan 1.2, but you specify
/// 1.1 as the maximum API version when creating the `Instance`, then you can only use Vulkan 1.1
/// functions, even though they could theoretically support a higher version. You can think of it
/// as a promise never to use any functionality from a higher version.
///
/// The maximum API version is not a _minimum_, so it is possible to set it to a higher version than
/// what the instance or device inherently support. The final API version that you are able to use
/// on an instance or device is the lower of the supported API version and the chosen maximum API
/// version of the `Instance`.
///
/// Due to a quirk in how the Vulkan 1.0 specification was written, if the instance only
/// supports Vulkan 1.0, then it is not possible to specify a maximum API version higher than 1.0.
/// Trying to create an `Instance` will return an `IncompatibleDriver` error. Consequently, it is
/// not possible to use a higher device API version with an instance that only supports 1.0.
///
/// # Extensions
///
/// When creating an `Instance`, you must provide a list of extensions that must be enabled on the
/// newly-created instance. Trying to enable an extension that is not supported by the system will
/// result in an error.
///
/// Contrary to OpenGL, it is not possible to use the features of an extension if it was not
/// explicitly enabled.
///
/// Extensions are especially important to take into account if you want to render images on the
/// screen, as the only way to do so is to use the `VK_KHR_surface` extension. More information
/// about this in the `swapchain` module.
///
/// For example, here is how we create an instance with the `VK_KHR_surface` and
/// `VK_KHR_android_surface` extensions enabled, which will allow us to render images to an
/// Android screen. You can compile and run this code on any system, but it is highly unlikely to
/// succeed on anything else than an Android-running device.
///
/// ```no_run
/// use vulkano::{
/// instance::{Instance, InstanceCreateInfo, InstanceExtensions},
/// Version, VulkanLibrary,
/// };
///
/// let library = VulkanLibrary::new()
/// .unwrap_or_else(|err| panic!("Couldn't load Vulkan library: {:?}", err));
///
/// let extensions = InstanceExtensions {
/// khr_surface: true,
/// khr_android_surface: true,
/// .. InstanceExtensions::empty()
/// };
///
/// let instance = Instance::new(
/// library,
/// InstanceCreateInfo {
/// enabled_extensions: extensions,
/// ..Default::default()
/// },
/// )
/// .unwrap_or_else(|err| panic!("Couldn't create instance: {:?}", err));
/// ```
///
/// # Layers
///
/// When creating an `Instance`, you have the possibility to pass a list of **layers** that will
/// be activated on the newly-created instance. The list of available layers can be retrieved by
/// calling the [`layer_properties`](crate::VulkanLibrary::layer_properties) method of
/// `VulkanLibrary`.
///
/// A layer is a component that will hook and potentially modify the Vulkan function calls.
/// For example, activating a layer could add a frames-per-second counter on the screen, or it
/// could send information to a debugger that will debug your application.
///
/// > **Note**: From an application's point of view, layers "just exist". In practice, on Windows
/// > and Linux, layers can be installed by third party installers or by package managers and can
/// > also be activated by setting the value of the `VK_INSTANCE_LAYERS` environment variable
/// > before starting the program. See the documentation of the official Vulkan loader for these
/// > platforms.
///
/// > **Note**: In practice, the most common use of layers right now is for debugging purposes.
/// > To do so, you are encouraged to set the `VK_INSTANCE_LAYERS` environment variable on Windows
/// > or Linux instead of modifying the source code of your program. For example:
/// > `export VK_INSTANCE_LAYERS=VK_LAYER_LUNARG_api_dump` on Linux if you installed the Vulkan SDK
/// > will print the list of raw Vulkan function calls.
///
/// ## Examples
///
/// ```
/// # use std::{sync::Arc, error::Error};
/// # use vulkano::{
/// # instance::{Instance, InstanceCreateInfo, InstanceExtensions},
/// # Version, VulkanLibrary,
/// # };
/// # fn test() -> Result<Arc<Instance>, Box<dyn Error>> {
/// let library = VulkanLibrary::new()?;
///
/// // For the sake of the example, we activate all the layers that
/// // contain the word "foo" in their description.
/// let layers: Vec<_> = library.layer_properties()?
/// .filter(|l| l.description().contains("foo"))
/// .collect();
///
/// let instance = Instance::new(
/// library,
/// InstanceCreateInfo {
/// enabled_layers: layers.iter().map(|l| l.name().to_owned()).collect(),
/// ..Default::default()
/// },
/// )?;
/// # Ok(instance)
/// # }
/// ```
// TODO: mention that extensions must be supported by layers as well
pub struct Instance {
handle: ash::vk::Instance,
fns: InstanceFunctions,
id: NonZeroU64,
api_version: Version,
enabled_extensions: InstanceExtensions,
enabled_layers: Vec<String>,
library: Arc<VulkanLibrary>,
max_api_version: Version,
_user_callbacks: Vec<Box<UserCallback>>,
}
// TODO: fix the underlying cause instead
impl UnwindSafe for Instance {}
impl RefUnwindSafe for Instance {}
impl Instance {
/// Creates a new `Instance`.
///
/// # Panics
///
/// - Panics if any version numbers in `create_info` contain a field too large to be converted
/// into a Vulkan version number.
/// - Panics if `create_info.max_api_version` is not at least `V1_0`.
pub fn new(
library: Arc<VulkanLibrary>,
create_info: InstanceCreateInfo,
) -> Result<Arc<Instance>, InstanceCreationError> {
unsafe { Self::with_debug_utils_messengers(library, create_info, []) }
}
/// Creates a new `Instance` with debug messengers to use during the creation and destruction
/// of the instance.
///
/// The debug messengers are not used at any other time,
/// [`DebugUtilsMessenger`](crate::instance::debug::DebugUtilsMessenger) should be used for
/// that.
///
/// If `debug_utils_messengers` is not empty, the `ext_debug_utils` extension must be set in
/// `enabled_extensions`.
///
/// # Panics
///
/// - Panics if the `message_severity` or `message_type` members of any element of
/// `debug_utils_messengers` are empty.
///
/// # Safety
///
/// - The `user_callback` of each element of `debug_utils_messengers` must not make any calls
/// to the Vulkan API.
pub unsafe fn with_debug_utils_messengers(
library: Arc<VulkanLibrary>,
create_info: InstanceCreateInfo,
debug_utils_messengers: impl IntoIterator<Item = DebugUtilsMessengerCreateInfo>,
) -> Result<Arc<Instance>, InstanceCreationError> {
let InstanceCreateInfo {
application_name,
application_version,
mut enabled_extensions,
enabled_layers,
engine_name,
engine_version,
max_api_version,
enumerate_portability,
enabled_validation_features,
disabled_validation_features,
_ne: _,
} = create_info;
let (api_version, max_api_version) = {
let api_version = library.api_version();
let max_api_version = if let Some(max_api_version) = max_api_version {
max_api_version
} else if api_version < Version::V1_1 {
api_version
} else {
Version::HEADER_VERSION
};
(std::cmp::min(max_api_version, api_version), max_api_version)
};
// VUID-VkApplicationInfo-apiVersion-04010
assert!(max_api_version >= Version::V1_0);
let supported_extensions =
library.supported_extensions_with_layers(enabled_layers.iter().map(String::as_str))?;
let mut flags = ash::vk::InstanceCreateFlags::empty();
if enumerate_portability && supported_extensions.khr_portability_enumeration {
enabled_extensions.khr_portability_enumeration = true;
flags |= ash::vk::InstanceCreateFlags::ENUMERATE_PORTABILITY_KHR;
}
// Check if the extensions are correct
enabled_extensions.check_requirements(&supported_extensions, api_version)?;
// FIXME: check whether each layer is supported
let enabled_layers_cstr: Vec<CString> = enabled_layers
.iter()
.map(|name| CString::new(name.clone()).unwrap())
.collect();
let enabled_layers_ptrs = enabled_layers_cstr
.iter()
.map(|layer| layer.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let enabled_extensions_cstr: Vec<CString> = (&enabled_extensions).into();
let enabled_extensions_ptrs = enabled_extensions_cstr
.iter()
.map(|extension| extension.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let application_name_cstr = application_name.map(|name| CString::new(name).unwrap());
let engine_name_cstr = engine_name.map(|name| CString::new(name).unwrap());
let application_info = ash::vk::ApplicationInfo {
p_application_name: application_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
application_version: application_version
.try_into()
.expect("Version out of range"),
p_engine_name: engine_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
engine_version: engine_version.try_into().expect("Version out of range"),
api_version: max_api_version.try_into().expect("Version out of range"),
..Default::default()
};
let enable_validation_features_vk: SmallVec<[_; 5]> = enabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let disable_validation_features_vk: SmallVec<[_; 8]> = disabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let mut create_info_vk = ash::vk::InstanceCreateInfo {
flags,
p_application_info: &application_info,
enabled_layer_count: enabled_layers_ptrs.len() as u32,
pp_enabled_layer_names: enabled_layers_ptrs.as_ptr(),
enabled_extension_count: enabled_extensions_ptrs.len() as u32,
pp_enabled_extension_names: enabled_extensions_ptrs.as_ptr(),
..Default::default()
};
let mut validation_features_vk = None;
if !enabled_validation_features.is_empty() || !disabled_validation_features.is_empty() {
if !enabled_extensions.ext_validation_features {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.enabled_validation_features` or \
`create_info.disabled_validation_features` are not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_validation_features"],
..Default::default()
},
});
}
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02967
assert!(
!enabled_validation_features
.contains(&ValidationFeatureEnable::GpuAssistedReserveBindingSlot)
|| enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted)
);
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02968
assert!(
!(enabled_validation_features.contains(&ValidationFeatureEnable::DebugPrintf)
&& enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted))
);
let next = validation_features_vk.insert(ash::vk::ValidationFeaturesEXT {
enabled_validation_feature_count: enable_validation_features_vk.len() as u32,
p_enabled_validation_features: enable_validation_features_vk.as_ptr(),
disabled_validation_feature_count: disable_validation_features_vk.len() as u32,
p_disabled_validation_features: disable_validation_features_vk.as_ptr(),
..Default::default()
});
next.p_next = create_info_vk.p_next;
create_info_vk.p_next = next as *const _ as *const _;
}
// Handle debug messengers
let debug_utils_messengers = debug_utils_messengers.into_iter();
let mut debug_utils_messenger_create_infos =
Vec::with_capacity(debug_utils_messengers.size_hint().0);
let mut user_callbacks = Vec::with_capacity(debug_utils_messengers.size_hint().0);
for create_info in debug_utils_messengers {
let DebugUtilsMessengerCreateInfo {
message_type,
message_severity,
user_callback,
_ne: _,
} = create_info;
// VUID-VkInstanceCreateInfo-pNext-04926
if !enabled_extensions.ext_debug_utils {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.debug_utils_messengers` is not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_debug_utils"],
..Default::default()
},
});
}
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-parameter
// TODO: message_severity.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-requiredbitmask
assert!(!message_severity.is_empty());
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-parameter
// TODO: message_type.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-requiredbitmask
assert!(!message_type.is_empty());
// VUID-PFN_vkDebugUtilsMessengerCallbackEXT-None-04769
// Can't be checked, creation is unsafe.
let user_callback = Box::new(user_callback);
let create_info = ash::vk::DebugUtilsMessengerCreateInfoEXT {
flags: ash::vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: message_severity.into(),
message_type: message_type.into(),
pfn_user_callback: Some(trampoline),
p_user_data: &*user_callback as &Arc<_> as *const Arc<_> as *const c_void as *mut _,
..Default::default()
};
debug_utils_messenger_create_infos.push(create_info);
user_callbacks.push(user_callback);
}
for i in 1..debug_utils_messenger_create_infos.len() {
debug_utils_messenger_create_infos[i - 1].p_next =
&debug_utils_messenger_create_infos[i] as *const _ as *const _;
}
if let Some(info) = debug_utils_messenger_create_infos.first() {
create_info_vk.p_next = info as *const _ as *const _;
}
// Creating the Vulkan instance.
let handle = {
let mut output = MaybeUninit::uninit();
let fns = library.fns();
(fns.v1_0.create_instance)(&create_info_vk, ptr::null(), output.as_mut_ptr())
.result()
.map_err(VulkanError::from)?;
output.assume_init()
};
// Loading the function pointers of the newly-created instance.
let fns = {
InstanceFunctions::load(|name| {
library
.get_instance_proc_addr(handle, name.as_ptr())
.map_or(ptr::null(), |func| func as _)
})
};
Ok(Arc::new(Instance {
handle,
fns,
id: Self::next_id(),
api_version,
enabled_extensions,
enabled_layers,
library,
max_api_version,
_user_callbacks: user_callbacks,
}))
} | pub fn library(&self) -> &Arc<VulkanLibrary> {
&self.library
}
/// Returns the Vulkan version supported by the instance.
///
/// This is the lower of the
/// [driver's supported version](crate::VulkanLibrary::api_version) and
/// [`max_api_version`](Instance::max_api_version).
#[inline]
pub fn api_version(&self) -> Version {
self.api_version
}
/// Returns the maximum Vulkan version that was specified when creating the instance.
#[inline]
pub fn max_api_version(&self) -> Version {
self.max_api_version
}
/// Returns pointers to the raw Vulkan functions of the instance.
#[inline]
pub fn fns(&self) -> &InstanceFunctions {
&self.fns
}
/// Returns the extensions that have been enabled on the instance.
#[inline]
pub fn enabled_extensions(&self) -> &InstanceExtensions {
&self.enabled_extensions
}
/// Returns the layers that have been enabled on the instance.
#[inline]
pub fn enabled_layers(&self) -> &[String] {
&self.enabled_layers
}
/// Returns an iterator that enumerates the physical devices available.
///
/// # Examples
///
/// ```no_run
/// # use vulkano::{
/// # instance::{Instance, InstanceExtensions},
/// # Version, VulkanLibrary,
/// # };
///
/// # let library = VulkanLibrary::new().unwrap();
/// # let instance = Instance::new(library, Default::default()).unwrap();
/// for physical_device in instance.enumerate_physical_devices().unwrap() {
/// println!("Available device: {}", physical_device.properties().device_name);
/// }
/// ```
pub fn enumerate_physical_devices(
self: &Arc<Self>,
) -> Result<impl ExactSizeIterator<Item = Arc<PhysicalDevice>>, VulkanError> {
let fns = self.fns();
unsafe {
let handles = loop {
let mut count = 0;
(fns.v1_0.enumerate_physical_devices)(self.handle, &mut count, ptr::null_mut())
.result()
.map_err(VulkanError::from)?;
let mut handles = Vec::with_capacity(count as usize);
let result = (fns.v1_0.enumerate_physical_devices)(
self.handle,
&mut count,
handles.as_mut_ptr(),
);
match result {
ash::vk::Result::SUCCESS => {
handles.set_len(count as usize);
break handles;
}
ash::vk::Result::INCOMPLETE => (),
err => return Err(VulkanError::from(err)),
}
};
let physical_devices: SmallVec<[_; 4]> = handles
.into_iter()
.map(|handle| PhysicalDevice::from_handle(self.clone(), handle))
.collect::<Result<_, _>>()?;
Ok(physical_devices.into_iter())
}
}
}
impl Drop for Instance {
#[inline]
fn drop(&mut self) {
let fns = self.fns();
unsafe {
(fns.v1_0.destroy_instance)(self.handle, ptr::null());
}
}
}
unsafe impl VulkanObject for Instance {
type Handle = ash::vk::Instance;
#[inline]
fn handle(&self) -> Self::Handle {
self.handle
}
}
crate::impl_id_counter!(Instance);
impl Debug for Instance {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
let Self {
handle,
fns,
id: _,
api_version,
enabled_extensions,
enabled_layers,
library: function_pointers,
max_api_version,
_user_callbacks: _,
} = self;
f.debug_struct("Instance")
.field("handle", handle)
.field("fns", fns)
.field("api_version", api_version)
.field("enabled_extensions", enabled_extensions)
.field("enabled_layers", enabled_layers)
.field("function_pointers", function_pointers)
.field("max_api_version", max_api_version)
.finish_non_exhaustive()
}
}
/// Parameters to create a new `Instance`.
#[derive(Debug)]
pub struct InstanceCreateInfo {
/// A string of your choice stating the name of your application.
///
/// The default value is `None`.
pub application_name: Option<String>,
/// A version number of your choice specifying the version of your application.
///
/// The default value is zero.
pub application_version: Version,
/// The extensions to enable on the instance.
///
/// The default value is [`InstanceExtensions::empty()`].
pub enabled_extensions: InstanceExtensions,
/// The layers to enable on the instance.
///
/// The default value is empty.
pub enabled_layers: Vec<String>,
/// A string of your choice stating the name of the engine used to power the application.
pub engine_name: Option<String>,
/// A version number of your choice specifying the version of the engine used to power the
/// application.
///
/// The default value is zero.
pub engine_version: Version,
/// The highest Vulkan API version that the application will use with the instance.
///
/// Usually, you will want to leave this at the default.
///
/// The default value is [`Version::HEADER_VERSION`], but if the
/// supported instance version is 1.0, then it will be 1.0.
pub max_api_version: Option<Version>,
/// Include [portability subset](crate::instance#portability-subset-devices-and-the-enumerate_portability-flag)
/// devices when enumerating physical devices.
///
/// If you enable this flag, you must ensure that your program is prepared to handle the
/// non-conformant aspects of these devices.
///
/// If this flag is not enabled, and there are no fully-conformant devices on the system, then
/// [`Instance::new`] will return an `IncompatibleDriver` error.
///
/// The default value is `false`.
///
/// # Notes
///
/// If this flag is enabled, and the
/// [`khr_portability_enumeration`](crate::instance::InstanceExtensions::khr_portability_enumeration)
/// extension is supported, it will be enabled automatically when creating the instance.
/// If the extension is not supported, this flag will be ignored.
pub enumerate_portability: bool,
/// Features of the validation layer to enable.
///
/// If not empty, the
/// [`ext_validation_features`](crate::instance::InstanceExtensions::ext_validation_features)
/// extension must be enabled on the instance.
pub enabled_validation_features: Vec<ValidationFeatureEnable>,
/// Features of the validation layer to disable.
///
/// If not empty, the
/// [`ext_validation_features`](crate::instance::InstanceExtensions::ext_validation_features)
/// extension must be enabled on the instance.
pub disabled_validation_features: Vec<ValidationFeatureDisable>,
pub _ne: crate::NonExhaustive,
}
impl Default for InstanceCreateInfo {
#[inline]
fn default() -> Self {
Self {
application_name: None,
application_version: Version::major_minor(0, 0),
enabled_extensions: InstanceExtensions::empty(),
enabled_layers: Vec::new(),
engine_name: None,
engine_version: Version::major_minor(0, 0),
max_api_version: None,
enumerate_portability: false,
enabled_validation_features: Vec::new(),
disabled_validation_features: Vec::new(),
_ne: crate::NonExhaustive(()),
}
}
}
impl InstanceCreateInfo {
/// Returns an `InstanceCreateInfo` with the `application_name` and `application_version` set
/// from information in your crate's Cargo.toml file.
///
/// # Panics
///
/// - Panics if the required environment variables are missing, which happens if the project
/// wasn't built by Cargo.
#[inline]
pub fn application_from_cargo_toml() -> Self {
Self {
application_name: Some(env!("CARGO_PKG_NAME").to_owned()),
application_version: Version {
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
},
..Default::default()
}
}
}
/// Error that can happen when creating an instance.
#[derive(Clone, Debug)]
pub enum InstanceCreationError {
/// Not enough memory.
OomError(OomError),
/// Failed to initialize for an implementation-specific reason.
InitializationFailed,
/// One of the requested layers is missing.
LayerNotPresent,
/// One of the requested extensions is not supported by the implementation.
ExtensionNotPresent,
/// The version requested is not supported by the implementation.
IncompatibleDriver,
/// A restriction for an extension was not met.
ExtensionRestrictionNotMet(ExtensionRestrictionError),
RequirementNotMet {
required_for: &'static str,
requires_one_of: RequiresOneOf,
},
}
impl Error for InstanceCreationError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::OomError(err) => Some(err),
_ => None,
}
}
}
impl Display for InstanceCreationError {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
match self {
Self::OomError(_) => write!(f, "not enough memory available"),
Self::InitializationFailed => write!(f, "initialization failed"),
Self::LayerNotPresent => write!(f, "layer not present"),
Self::ExtensionNotPresent => write!(f, "extension not present"),
Self::IncompatibleDriver => write!(f, "incompatible driver"),
Self::ExtensionRestrictionNotMet(err) => Display::fmt(err, f),
Self::RequirementNotMet {
required_for,
requires_one_of,
} => write!(
f,
"a requirement was not met for: {}; requires one of: {}",
required_for, requires_one_of,
),
}
}
}
impl From<OomError> for InstanceCreationError {
fn from(err: OomError) -> Self {
Self::OomError(err)
}
}
impl From<ExtensionRestrictionError> for InstanceCreationError {
fn from(err: ExtensionRestrictionError) -> Self {
Self::ExtensionRestrictionNotMet(err)
}
}
impl From<VulkanError> for InstanceCreationError {
fn from(err: VulkanError) -> Self {
match err {
err @ VulkanError::OutOfHostMemory => Self::OomError(OomError::from(err)),
err @ VulkanError::OutOfDeviceMemory => Self::OomError(OomError::from(err)),
VulkanError::InitializationFailed => Self::InitializationFailed,
VulkanError::LayerNotPresent => Self::LayerNotPresent,
VulkanError::ExtensionNotPresent => Self::ExtensionNotPresent,
VulkanError::IncompatibleDriver => Self::IncompatibleDriver,
_ => panic!("unexpected error: {:?}", err),
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn create_instance() {
let _ = instance!();
}
} |
/// Returns the Vulkan library used to create this instance.
#[inline] | random_line_split |
mod.rs | // Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! API entry point.
//!
//! The first thing to do after loading the Vulkan library is to create an `Instance` object.
//!
//! For example:
//!
//! ```no_run
//! use vulkano::{
//! instance::{Instance, InstanceExtensions},
//! Version, VulkanLibrary,
//! };
//!
//! let library = VulkanLibrary::new()
//! .unwrap_or_else(|err| panic!("Couldn't load Vulkan library: {:?}", err));
//! let instance = Instance::new(library, Default::default())
//! .unwrap_or_else(|err| panic!("Couldn't create instance: {:?}", err));
//! ```
//!
//! Creating an instance initializes everything and allows you to enumerate physical devices,
//! ie. all the Vulkan implementations that are available on the system.
//!
//! ```no_run
//! # use vulkano::{
//! # instance::{Instance, InstanceExtensions},
//! # Version, VulkanLibrary,
//! # };
//! use vulkano::device::physical::PhysicalDevice;
//!
//! # let library = VulkanLibrary::new().unwrap();
//! # let instance = Instance::new(library, Default::default()).unwrap();
//! for physical_device in instance.enumerate_physical_devices().unwrap() {
//! println!("Available device: {}", physical_device.properties().device_name);
//! }
//! ```
//!
//! # Enumerating physical devices and creating a device
//!
//! After you have created an instance, the next step is usually to enumerate the physical devices
//! that are available on the system with `Instance::enumerate_physical_devices()` (see above).
//!
//! When choosing which physical device to use, keep in mind that physical devices may or may not
//! be able to draw to a certain surface (ie. to a window or a monitor), or may even not be able
//! to draw at all. See the `swapchain` module for more information about surfaces.
//!
//! Once you have chosen a physical device, you can create a `Device` object from it. See the
//! `device` module for more info.
//!
//! # Portability subset devices and the `enumerate_portability` flag
//!
//! Certain devices, currently those on MacOS and iOS systems, do not fully conform to the Vulkan
//! specification. They are usable as normal devices, but they do not implement everything that
//! is required; some mandatory parts of Vulkan are missing. These are known as
//! "portability subset" devices.
//!
//! A portability subset device will advertise support for the
//! [`khr_portability_subset`](crate::device::DeviceExtensions::khr_portability_subset) device
//! extension. This extension must always be enabled when it is supported, and Vulkano will
//! automatically enable it when creating the device. When it is enabled, some parts of Vulkan that
//! are available in standard Vulkan will not be available by default, but they can be used by
//! enabling corresponding features when creating the device, if the device supports them.
//!
//! Because these devices are non-conformant, Vulkan programs that rely on full compliance may
//! not work (crash or have validation errors) when run on them, if they happen to use a part of
//! Vulkan that is missing from the non-conformant device. Therefore, Vulkan hides them from
//! the user by default when calling `enumerate_physical_devices` on the instance. If there are no
//! conformant devices on the system, `Instance::new` will return an `IncompatibleDriver` error.
//!
//! In order to enumerate portability subset devices, you must set the
//! [`InstanceCreateInfo::enumerate_portability`] flag when creating the instance. However, if you
//! do this, your program must be prepared to handle the non-conformant aspects of these devices,
//! and must enable the appropriate features when creating the `Device` if you intend to use them.
use self::debug::{
DebugUtilsMessengerCreateInfo, UserCallback, ValidationFeatureDisable, ValidationFeatureEnable,
};
pub use self::{extensions::InstanceExtensions, layers::LayerProperties};
use crate::{
device::physical::PhysicalDevice, instance::debug::trampoline, OomError, RequiresOneOf,
VulkanError, VulkanLibrary, VulkanObject,
};
pub use crate::{
extensions::{ExtensionRestriction, ExtensionRestrictionError},
fns::InstanceFunctions,
version::Version,
};
use smallvec::SmallVec;
use std::{
error::Error,
ffi::{c_void, CString},
fmt::{Debug, Display, Error as FmtError, Formatter},
mem::MaybeUninit,
num::NonZeroU64,
panic::{RefUnwindSafe, UnwindSafe},
ptr,
sync::Arc,
};
pub mod debug;
pub(crate) mod extensions;
mod layers;
/// An instance of a Vulkan context. This is the main object that should be created by an
/// application before everything else.
///
/// # Application and engine info
///
/// When you create an instance, you have the possibility to set information about your application
/// and its engine.
///
/// Providing this information allows for example the driver to let the user configure the driver's
/// behavior for your application alone through a control panel.
///
/// ```no_run
/// # #[macro_use] extern crate vulkano;
/// # fn main() {
/// use vulkano::{
/// instance::{Instance, InstanceCreateInfo, InstanceExtensions},
/// Version, VulkanLibrary,
/// };
///
/// let library = VulkanLibrary::new().unwrap();
/// let _instance = Instance::new(
/// library,
/// InstanceCreateInfo::application_from_cargo_toml(),
/// ).unwrap();
/// # }
/// ```
///
/// # API versions
///
/// Both an `Instance` and a [`Device`](crate::device::Device) have a highest version of the Vulkan
/// API that they support. This places a limit on what Vulkan functions and features are available
/// to use when used on a particular instance or device. It is possible for the instance and the
/// device to support different versions. The supported version for an instance can be queried
/// before creation with
/// [`VulkanLibrary::api_version`](crate::VulkanLibrary::api_version),
/// while for a device it can be retrieved with
/// [`PhysicalDevice::api_version`](crate::device::physical::PhysicalDevice::api_version).
///
/// When creating an `Instance`, you have to specify a maximum API version that you will use.
/// This restricts the API version that is available for the instance and any devices created from
/// it. For example, if both instance and device potentially support Vulkan 1.2, but you specify
/// 1.1 as the maximum API version when creating the `Instance`, then you can only use Vulkan 1.1
/// functions, even though they could theoretically support a higher version. You can think of it
/// as a promise never to use any functionality from a higher version.
///
/// The maximum API version is not a _minimum_, so it is possible to set it to a higher version than
/// what the instance or device inherently support. The final API version that you are able to use
/// on an instance or device is the lower of the supported API version and the chosen maximum API
/// version of the `Instance`.
///
/// Due to a quirk in how the Vulkan 1.0 specification was written, if the instance only
/// supports Vulkan 1.0, then it is not possible to specify a maximum API version higher than 1.0.
/// Trying to create an `Instance` will return an `IncompatibleDriver` error. Consequently, it is
/// not possible to use a higher device API version with an instance that only supports 1.0.
///
/// # Extensions
///
/// When creating an `Instance`, you must provide a list of extensions that must be enabled on the
/// newly-created instance. Trying to enable an extension that is not supported by the system will
/// result in an error.
///
/// Contrary to OpenGL, it is not possible to use the features of an extension if it was not
/// explicitly enabled.
///
/// Extensions are especially important to take into account if you want to render images on the
/// screen, as the only way to do so is to use the `VK_KHR_surface` extension. More information
/// about this in the `swapchain` module.
///
/// For example, here is how we create an instance with the `VK_KHR_surface` and
/// `VK_KHR_android_surface` extensions enabled, which will allow us to render images to an
/// Android screen. You can compile and run this code on any system, but it is highly unlikely to
/// succeed on anything else than an Android-running device.
///
/// ```no_run
/// use vulkano::{
/// instance::{Instance, InstanceCreateInfo, InstanceExtensions},
/// Version, VulkanLibrary,
/// };
///
/// let library = VulkanLibrary::new()
/// .unwrap_or_else(|err| panic!("Couldn't load Vulkan library: {:?}", err));
///
/// let extensions = InstanceExtensions {
/// khr_surface: true,
/// khr_android_surface: true,
/// .. InstanceExtensions::empty()
/// };
///
/// let instance = Instance::new(
/// library,
/// InstanceCreateInfo {
/// enabled_extensions: extensions,
/// ..Default::default()
/// },
/// )
/// .unwrap_or_else(|err| panic!("Couldn't create instance: {:?}", err));
/// ```
///
/// # Layers
///
/// When creating an `Instance`, you have the possibility to pass a list of **layers** that will
/// be activated on the newly-created instance. The list of available layers can be retrieved by
/// calling the [`layer_properties`](crate::VulkanLibrary::layer_properties) method of
/// `VulkanLibrary`.
///
/// A layer is a component that will hook and potentially modify the Vulkan function calls.
/// For example, activating a layer could add a frames-per-second counter on the screen, or it
/// could send information to a debugger that will debug your application.
///
/// > **Note**: From an application's point of view, layers "just exist". In practice, on Windows
/// > and Linux, layers can be installed by third party installers or by package managers and can
/// > also be activated by setting the value of the `VK_INSTANCE_LAYERS` environment variable
/// > before starting the program. See the documentation of the official Vulkan loader for these
/// > platforms.
///
/// > **Note**: In practice, the most common use of layers right now is for debugging purposes.
/// > To do so, you are encouraged to set the `VK_INSTANCE_LAYERS` environment variable on Windows
/// > or Linux instead of modifying the source code of your program. For example:
/// > `export VK_INSTANCE_LAYERS=VK_LAYER_LUNARG_api_dump` on Linux if you installed the Vulkan SDK
/// > will print the list of raw Vulkan function calls.
///
/// ## Examples
///
/// ```
/// # use std::{sync::Arc, error::Error};
/// # use vulkano::{
/// # instance::{Instance, InstanceCreateInfo, InstanceExtensions},
/// # Version, VulkanLibrary,
/// # };
/// # fn test() -> Result<Arc<Instance>, Box<dyn Error>> {
/// let library = VulkanLibrary::new()?;
///
/// // For the sake of the example, we activate all the layers that
/// // contain the word "foo" in their description.
/// let layers: Vec<_> = library.layer_properties()?
/// .filter(|l| l.description().contains("foo"))
/// .collect();
///
/// let instance = Instance::new(
/// library,
/// InstanceCreateInfo {
/// enabled_layers: layers.iter().map(|l| l.name().to_owned()).collect(),
/// ..Default::default()
/// },
/// )?;
/// # Ok(instance)
/// # }
/// ```
// TODO: mention that extensions must be supported by layers as well
pub struct Instance {
handle: ash::vk::Instance,
fns: InstanceFunctions,
id: NonZeroU64,
api_version: Version,
enabled_extensions: InstanceExtensions,
enabled_layers: Vec<String>,
library: Arc<VulkanLibrary>,
max_api_version: Version,
_user_callbacks: Vec<Box<UserCallback>>,
}
// TODO: fix the underlying cause instead
impl UnwindSafe for Instance {}
impl RefUnwindSafe for Instance {}
impl Instance {
/// Creates a new `Instance`.
///
/// # Panics
///
/// - Panics if any version numbers in `create_info` contain a field too large to be converted
/// into a Vulkan version number.
/// - Panics if `create_info.max_api_version` is not at least `V1_0`.
pub fn new(
library: Arc<VulkanLibrary>,
create_info: InstanceCreateInfo,
) -> Result<Arc<Instance>, InstanceCreationError> {
unsafe { Self::with_debug_utils_messengers(library, create_info, []) }
}
/// Creates a new `Instance` with debug messengers to use during the creation and destruction
/// of the instance.
///
/// The debug messengers are not used at any other time,
/// [`DebugUtilsMessenger`](crate::instance::debug::DebugUtilsMessenger) should be used for
/// that.
///
/// If `debug_utils_messengers` is not empty, the `ext_debug_utils` extension must be set in
/// `enabled_extensions`.
///
/// # Panics
///
/// - Panics if the `message_severity` or `message_type` members of any element of
/// `debug_utils_messengers` are empty.
///
/// # Safety
///
/// - The `user_callback` of each element of `debug_utils_messengers` must not make any calls
/// to the Vulkan API.
pub unsafe fn with_debug_utils_messengers(
library: Arc<VulkanLibrary>,
create_info: InstanceCreateInfo,
debug_utils_messengers: impl IntoIterator<Item = DebugUtilsMessengerCreateInfo>,
) -> Result<Arc<Instance>, InstanceCreationError> {
let InstanceCreateInfo {
application_name,
application_version,
mut enabled_extensions,
enabled_layers,
engine_name,
engine_version,
max_api_version,
enumerate_portability,
enabled_validation_features,
disabled_validation_features,
_ne: _,
} = create_info;
let (api_version, max_api_version) = {
let api_version = library.api_version();
let max_api_version = if let Some(max_api_version) = max_api_version {
max_api_version
} else if api_version < Version::V1_1 {
api_version
} else {
Version::HEADER_VERSION
};
(std::cmp::min(max_api_version, api_version), max_api_version)
};
// VUID-VkApplicationInfo-apiVersion-04010
assert!(max_api_version >= Version::V1_0);
let supported_extensions =
library.supported_extensions_with_layers(enabled_layers.iter().map(String::as_str))?;
let mut flags = ash::vk::InstanceCreateFlags::empty();
if enumerate_portability && supported_extensions.khr_portability_enumeration {
enabled_extensions.khr_portability_enumeration = true;
flags |= ash::vk::InstanceCreateFlags::ENUMERATE_PORTABILITY_KHR;
}
// Check if the extensions are correct
enabled_extensions.check_requirements(&supported_extensions, api_version)?;
// FIXME: check whether each layer is supported
let enabled_layers_cstr: Vec<CString> = enabled_layers
.iter()
.map(|name| CString::new(name.clone()).unwrap())
.collect();
let enabled_layers_ptrs = enabled_layers_cstr
.iter()
.map(|layer| layer.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let enabled_extensions_cstr: Vec<CString> = (&enabled_extensions).into();
let enabled_extensions_ptrs = enabled_extensions_cstr
.iter()
.map(|extension| extension.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let application_name_cstr = application_name.map(|name| CString::new(name).unwrap());
let engine_name_cstr = engine_name.map(|name| CString::new(name).unwrap());
let application_info = ash::vk::ApplicationInfo {
p_application_name: application_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
application_version: application_version
.try_into()
.expect("Version out of range"),
p_engine_name: engine_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
engine_version: engine_version.try_into().expect("Version out of range"),
api_version: max_api_version.try_into().expect("Version out of range"),
..Default::default()
};
let enable_validation_features_vk: SmallVec<[_; 5]> = enabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let disable_validation_features_vk: SmallVec<[_; 8]> = disabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let mut create_info_vk = ash::vk::InstanceCreateInfo {
flags,
p_application_info: &application_info,
enabled_layer_count: enabled_layers_ptrs.len() as u32,
pp_enabled_layer_names: enabled_layers_ptrs.as_ptr(),
enabled_extension_count: enabled_extensions_ptrs.len() as u32,
pp_enabled_extension_names: enabled_extensions_ptrs.as_ptr(),
..Default::default()
};
let mut validation_features_vk = None;
if !enabled_validation_features.is_empty() || !disabled_validation_features.is_empty() {
if !enabled_extensions.ext_validation_features {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.enabled_validation_features` or \
`create_info.disabled_validation_features` are not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_validation_features"],
..Default::default()
},
});
}
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02967
assert!(
!enabled_validation_features
.contains(&ValidationFeatureEnable::GpuAssistedReserveBindingSlot)
|| enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted)
);
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02968
assert!(
!(enabled_validation_features.contains(&ValidationFeatureEnable::DebugPrintf)
&& enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted))
);
let next = validation_features_vk.insert(ash::vk::ValidationFeaturesEXT {
enabled_validation_feature_count: enable_validation_features_vk.len() as u32,
p_enabled_validation_features: enable_validation_features_vk.as_ptr(),
disabled_validation_feature_count: disable_validation_features_vk.len() as u32,
p_disabled_validation_features: disable_validation_features_vk.as_ptr(),
..Default::default()
});
next.p_next = create_info_vk.p_next;
create_info_vk.p_next = next as *const _ as *const _;
}
// Handle debug messengers
let debug_utils_messengers = debug_utils_messengers.into_iter();
let mut debug_utils_messenger_create_infos =
Vec::with_capacity(debug_utils_messengers.size_hint().0);
let mut user_callbacks = Vec::with_capacity(debug_utils_messengers.size_hint().0);
for create_info in debug_utils_messengers {
let DebugUtilsMessengerCreateInfo {
message_type,
message_severity,
user_callback,
_ne: _,
} = create_info;
// VUID-VkInstanceCreateInfo-pNext-04926
if !enabled_extensions.ext_debug_utils {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.debug_utils_messengers` is not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_debug_utils"],
..Default::default()
},
});
}
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-parameter
// TODO: message_severity.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-requiredbitmask
assert!(!message_severity.is_empty());
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-parameter
// TODO: message_type.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-requiredbitmask
assert!(!message_type.is_empty());
// VUID-PFN_vkDebugUtilsMessengerCallbackEXT-None-04769
// Can't be checked, creation is unsafe.
let user_callback = Box::new(user_callback);
let create_info = ash::vk::DebugUtilsMessengerCreateInfoEXT {
flags: ash::vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: message_severity.into(),
message_type: message_type.into(),
pfn_user_callback: Some(trampoline),
p_user_data: &*user_callback as &Arc<_> as *const Arc<_> as *const c_void as *mut _,
..Default::default()
};
debug_utils_messenger_create_infos.push(create_info);
user_callbacks.push(user_callback);
}
for i in 1..debug_utils_messenger_create_infos.len() {
debug_utils_messenger_create_infos[i - 1].p_next =
&debug_utils_messenger_create_infos[i] as *const _ as *const _;
}
if let Some(info) = debug_utils_messenger_create_infos.first() {
create_info_vk.p_next = info as *const _ as *const _;
}
// Creating the Vulkan instance.
let handle = {
let mut output = MaybeUninit::uninit();
let fns = library.fns();
(fns.v1_0.create_instance)(&create_info_vk, ptr::null(), output.as_mut_ptr())
.result()
.map_err(VulkanError::from)?;
output.assume_init()
};
// Loading the function pointers of the newly-created instance.
let fns = {
InstanceFunctions::load(|name| {
library
.get_instance_proc_addr(handle, name.as_ptr())
.map_or(ptr::null(), |func| func as _)
})
};
Ok(Arc::new(Instance {
handle,
fns,
id: Self::next_id(),
api_version,
enabled_extensions,
enabled_layers,
library,
max_api_version,
_user_callbacks: user_callbacks,
}))
}
/// Returns the Vulkan library used to create this instance.
#[inline]
pub fn library(&self) -> &Arc<VulkanLibrary> {
&self.library
}
/// Returns the Vulkan version supported by the instance.
///
/// This is the lower of the
/// [driver's supported version](crate::VulkanLibrary::api_version) and
/// [`max_api_version`](Instance::max_api_version).
#[inline]
pub fn api_version(&self) -> Version {
self.api_version
}
/// Returns the maximum Vulkan version that was specified when creating the instance.
#[inline]
pub fn max_api_version(&self) -> Version {
self.max_api_version
}
/// Returns pointers to the raw Vulkan functions of the instance.
#[inline]
pub fn fns(&self) -> &InstanceFunctions {
&self.fns
}
/// Returns the extensions that have been enabled on the instance.
#[inline]
pub fn enabled_extensions(&self) -> &InstanceExtensions {
&self.enabled_extensions
}
/// Returns the layers that have been enabled on the instance.
#[inline]
pub fn enabled_layers(&self) -> &[String] {
&self.enabled_layers
}
/// Returns an iterator that enumerates the physical devices available.
///
/// # Examples
///
/// ```no_run
/// # use vulkano::{
/// # instance::{Instance, InstanceExtensions},
/// # Version, VulkanLibrary,
/// # };
///
/// # let library = VulkanLibrary::new().unwrap();
/// # let instance = Instance::new(library, Default::default()).unwrap();
/// for physical_device in instance.enumerate_physical_devices().unwrap() {
/// println!("Available device: {}", physical_device.properties().device_name);
/// }
/// ```
pub fn enumerate_physical_devices(
self: &Arc<Self>,
) -> Result<impl ExactSizeIterator<Item = Arc<PhysicalDevice>>, VulkanError> {
let fns = self.fns();
unsafe {
let handles = loop {
let mut count = 0;
(fns.v1_0.enumerate_physical_devices)(self.handle, &mut count, ptr::null_mut())
.result()
.map_err(VulkanError::from)?;
let mut handles = Vec::with_capacity(count as usize);
let result = (fns.v1_0.enumerate_physical_devices)(
self.handle,
&mut count,
handles.as_mut_ptr(),
);
match result {
ash::vk::Result::SUCCESS => {
handles.set_len(count as usize);
break handles;
}
ash::vk::Result::INCOMPLETE => (),
err => return Err(VulkanError::from(err)),
}
};
let physical_devices: SmallVec<[_; 4]> = handles
.into_iter()
.map(|handle| PhysicalDevice::from_handle(self.clone(), handle))
.collect::<Result<_, _>>()?;
Ok(physical_devices.into_iter())
}
}
}
impl Drop for Instance {
#[inline]
fn drop(&mut self) {
let fns = self.fns();
unsafe {
(fns.v1_0.destroy_instance)(self.handle, ptr::null());
}
}
}
unsafe impl VulkanObject for Instance {
type Handle = ash::vk::Instance;
#[inline]
fn handle(&self) -> Self::Handle {
self.handle
}
}
crate::impl_id_counter!(Instance);
impl Debug for Instance {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
let Self {
handle,
fns,
id: _,
api_version,
enabled_extensions,
enabled_layers,
library: function_pointers,
max_api_version,
_user_callbacks: _,
} = self;
f.debug_struct("Instance")
.field("handle", handle)
.field("fns", fns)
.field("api_version", api_version)
.field("enabled_extensions", enabled_extensions)
.field("enabled_layers", enabled_layers)
.field("function_pointers", function_pointers)
.field("max_api_version", max_api_version)
.finish_non_exhaustive()
}
}
/// Parameters to create a new `Instance`.
#[derive(Debug)]
pub struct InstanceCreateInfo {
/// A string of your choice stating the name of your application.
///
/// The default value is `None`.
pub application_name: Option<String>,
/// A version number of your choice specifying the version of your application.
///
/// The default value is zero.
pub application_version: Version,
/// The extensions to enable on the instance.
///
/// The default value is [`InstanceExtensions::empty()`].
pub enabled_extensions: InstanceExtensions,
/// The layers to enable on the instance.
///
/// The default value is empty.
pub enabled_layers: Vec<String>,
/// A string of your choice stating the name of the engine used to power the application.
pub engine_name: Option<String>,
/// A version number of your choice specifying the version of the engine used to power the
/// application.
///
/// The default value is zero.
pub engine_version: Version,
/// The highest Vulkan API version that the application will use with the instance.
///
/// Usually, you will want to leave this at the default.
///
/// The default value is [`Version::HEADER_VERSION`], but if the
/// supported instance version is 1.0, then it will be 1.0.
pub max_api_version: Option<Version>,
/// Include [portability subset](crate::instance#portability-subset-devices-and-the-enumerate_portability-flag)
/// devices when enumerating physical devices.
///
/// If you enable this flag, you must ensure that your program is prepared to handle the
/// non-conformant aspects of these devices.
///
/// If this flag is not enabled, and there are no fully-conformant devices on the system, then
/// [`Instance::new`] will return an `IncompatibleDriver` error.
///
/// The default value is `false`.
///
/// # Notes
///
/// If this flag is enabled, and the
/// [`khr_portability_enumeration`](crate::instance::InstanceExtensions::khr_portability_enumeration)
/// extension is supported, it will be enabled automatically when creating the instance.
/// If the extension is not supported, this flag will be ignored.
pub enumerate_portability: bool,
/// Features of the validation layer to enable.
///
/// If not empty, the
/// [`ext_validation_features`](crate::instance::InstanceExtensions::ext_validation_features)
/// extension must be enabled on the instance.
pub enabled_validation_features: Vec<ValidationFeatureEnable>,
/// Features of the validation layer to disable.
///
/// If not empty, the
/// [`ext_validation_features`](crate::instance::InstanceExtensions::ext_validation_features)
/// extension must be enabled on the instance.
pub disabled_validation_features: Vec<ValidationFeatureDisable>,
pub _ne: crate::NonExhaustive,
}
impl Default for InstanceCreateInfo {
#[inline]
fn default() -> Self {
Self {
application_name: None,
application_version: Version::major_minor(0, 0),
enabled_extensions: InstanceExtensions::empty(),
enabled_layers: Vec::new(),
engine_name: None,
engine_version: Version::major_minor(0, 0),
max_api_version: None,
enumerate_portability: false,
enabled_validation_features: Vec::new(),
disabled_validation_features: Vec::new(),
_ne: crate::NonExhaustive(()),
}
}
}
impl InstanceCreateInfo {
/// Returns an `InstanceCreateInfo` with the `application_name` and `application_version` set
/// from information in your crate's Cargo.toml file.
///
/// # Panics
///
/// - Panics if the required environment variables are missing, which happens if the project
/// wasn't built by Cargo.
#[inline]
pub fn application_from_cargo_toml() -> Self {
Self {
application_name: Some(env!("CARGO_PKG_NAME").to_owned()),
application_version: Version {
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
},
..Default::default()
}
}
}
/// Error that can happen when creating an instance.
#[derive(Clone, Debug)]
pub enum InstanceCreationError {
/// Not enough memory.
OomError(OomError),
/// Failed to initialize for an implementation-specific reason.
InitializationFailed,
/// One of the requested layers is missing.
LayerNotPresent,
/// One of the requested extensions is not supported by the implementation.
ExtensionNotPresent,
/// The version requested is not supported by the implementation.
IncompatibleDriver,
/// A restriction for an extension was not met.
ExtensionRestrictionNotMet(ExtensionRestrictionError),
RequirementNotMet {
required_for: &'static str,
requires_one_of: RequiresOneOf,
},
}
impl Error for InstanceCreationError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::OomError(err) => Some(err),
_ => None,
}
}
}
impl Display for InstanceCreationError {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
match self {
Self::OomError(_) => write!(f, "not enough memory available"),
Self::InitializationFailed => write!(f, "initialization failed"),
Self::LayerNotPresent => write!(f, "layer not present"),
Self::ExtensionNotPresent => write!(f, "extension not present"),
Self::IncompatibleDriver => write!(f, "incompatible driver"),
Self::ExtensionRestrictionNotMet(err) => Display::fmt(err, f),
Self::RequirementNotMet {
required_for,
requires_one_of,
} => write!(
f,
"a requirement was not met for: {}; requires one of: {}",
required_for, requires_one_of,
),
}
}
}
impl From<OomError> for InstanceCreationError {
fn | (err: OomError) -> Self {
Self::OomError(err)
}
}
impl From<ExtensionRestrictionError> for InstanceCreationError {
fn from(err: ExtensionRestrictionError) -> Self {
Self::ExtensionRestrictionNotMet(err)
}
}
impl From<VulkanError> for InstanceCreationError {
fn from(err: VulkanError) -> Self {
match err {
err @ VulkanError::OutOfHostMemory => Self::OomError(OomError::from(err)),
err @ VulkanError::OutOfDeviceMemory => Self::OomError(OomError::from(err)),
VulkanError::InitializationFailed => Self::InitializationFailed,
VulkanError::LayerNotPresent => Self::LayerNotPresent,
VulkanError::ExtensionNotPresent => Self::ExtensionNotPresent,
VulkanError::IncompatibleDriver => Self::IncompatibleDriver,
_ => panic!("unexpected error: {:?}", err),
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn create_instance() {
let _ = instance!();
}
}
| from | identifier_name |
mod.rs | // Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! API entry point.
//!
//! The first thing to do after loading the Vulkan library is to create an `Instance` object.
//!
//! For example:
//!
//! ```no_run
//! use vulkano::{
//! instance::{Instance, InstanceExtensions},
//! Version, VulkanLibrary,
//! };
//!
//! let library = VulkanLibrary::new()
//! .unwrap_or_else(|err| panic!("Couldn't load Vulkan library: {:?}", err));
//! let instance = Instance::new(library, Default::default())
//! .unwrap_or_else(|err| panic!("Couldn't create instance: {:?}", err));
//! ```
//!
//! Creating an instance initializes everything and allows you to enumerate physical devices,
//! ie. all the Vulkan implementations that are available on the system.
//!
//! ```no_run
//! # use vulkano::{
//! # instance::{Instance, InstanceExtensions},
//! # Version, VulkanLibrary,
//! # };
//! use vulkano::device::physical::PhysicalDevice;
//!
//! # let library = VulkanLibrary::new().unwrap();
//! # let instance = Instance::new(library, Default::default()).unwrap();
//! for physical_device in instance.enumerate_physical_devices().unwrap() {
//! println!("Available device: {}", physical_device.properties().device_name);
//! }
//! ```
//!
//! # Enumerating physical devices and creating a device
//!
//! After you have created an instance, the next step is usually to enumerate the physical devices
//! that are available on the system with `Instance::enumerate_physical_devices()` (see above).
//!
//! When choosing which physical device to use, keep in mind that physical devices may or may not
//! be able to draw to a certain surface (ie. to a window or a monitor), or may even not be able
//! to draw at all. See the `swapchain` module for more information about surfaces.
//!
//! Once you have chosen a physical device, you can create a `Device` object from it. See the
//! `device` module for more info.
//!
//! # Portability subset devices and the `enumerate_portability` flag
//!
//! Certain devices, currently those on MacOS and iOS systems, do not fully conform to the Vulkan
//! specification. They are usable as normal devices, but they do not implement everything that
//! is required; some mandatory parts of Vulkan are missing. These are known as
//! "portability subset" devices.
//!
//! A portability subset device will advertise support for the
//! [`khr_portability_subset`](crate::device::DeviceExtensions::khr_portability_subset) device
//! extension. This extension must always be enabled when it is supported, and Vulkano will
//! automatically enable it when creating the device. When it is enabled, some parts of Vulkan that
//! are available in standard Vulkan will not be available by default, but they can be used by
//! enabling corresponding features when creating the device, if the device supports them.
//!
//! Because these devices are non-conformant, Vulkan programs that rely on full compliance may
//! not work (crash or have validation errors) when run on them, if they happen to use a part of
//! Vulkan that is missing from the non-conformant device. Therefore, Vulkan hides them from
//! the user by default when calling `enumerate_physical_devices` on the instance. If there are no
//! conformant devices on the system, `Instance::new` will return an `IncompatibleDriver` error.
//!
//! In order to enumerate portability subset devices, you must set the
//! [`InstanceCreateInfo::enumerate_portability`] flag when creating the instance. However, if you
//! do this, your program must be prepared to handle the non-conformant aspects of these devices,
//! and must enable the appropriate features when creating the `Device` if you intend to use them.
use self::debug::{
DebugUtilsMessengerCreateInfo, UserCallback, ValidationFeatureDisable, ValidationFeatureEnable,
};
pub use self::{extensions::InstanceExtensions, layers::LayerProperties};
use crate::{
device::physical::PhysicalDevice, instance::debug::trampoline, OomError, RequiresOneOf,
VulkanError, VulkanLibrary, VulkanObject,
};
pub use crate::{
extensions::{ExtensionRestriction, ExtensionRestrictionError},
fns::InstanceFunctions,
version::Version,
};
use smallvec::SmallVec;
use std::{
error::Error,
ffi::{c_void, CString},
fmt::{Debug, Display, Error as FmtError, Formatter},
mem::MaybeUninit,
num::NonZeroU64,
panic::{RefUnwindSafe, UnwindSafe},
ptr,
sync::Arc,
};
pub mod debug;
pub(crate) mod extensions;
mod layers;
/// An instance of a Vulkan context. This is the main object that should be created by an
/// application before everything else.
///
/// # Application and engine info
///
/// When you create an instance, you have the possibility to set information about your application
/// and its engine.
///
/// Providing this information allows for example the driver to let the user configure the driver's
/// behavior for your application alone through a control panel.
///
/// ```no_run
/// # #[macro_use] extern crate vulkano;
/// # fn main() {
/// use vulkano::{
/// instance::{Instance, InstanceCreateInfo, InstanceExtensions},
/// Version, VulkanLibrary,
/// };
///
/// let library = VulkanLibrary::new().unwrap();
/// let _instance = Instance::new(
/// library,
/// InstanceCreateInfo::application_from_cargo_toml(),
/// ).unwrap();
/// # }
/// ```
///
/// # API versions
///
/// Both an `Instance` and a [`Device`](crate::device::Device) have a highest version of the Vulkan
/// API that they support. This places a limit on what Vulkan functions and features are available
/// to use when used on a particular instance or device. It is possible for the instance and the
/// device to support different versions. The supported version for an instance can be queried
/// before creation with
/// [`VulkanLibrary::api_version`](crate::VulkanLibrary::api_version),
/// while for a device it can be retrieved with
/// [`PhysicalDevice::api_version`](crate::device::physical::PhysicalDevice::api_version).
///
/// When creating an `Instance`, you have to specify a maximum API version that you will use.
/// This restricts the API version that is available for the instance and any devices created from
/// it. For example, if both instance and device potentially support Vulkan 1.2, but you specify
/// 1.1 as the maximum API version when creating the `Instance`, then you can only use Vulkan 1.1
/// functions, even though they could theoretically support a higher version. You can think of it
/// as a promise never to use any functionality from a higher version.
///
/// The maximum API version is not a _minimum_, so it is possible to set it to a higher version than
/// what the instance or device inherently support. The final API version that you are able to use
/// on an instance or device is the lower of the supported API version and the chosen maximum API
/// version of the `Instance`.
///
/// Due to a quirk in how the Vulkan 1.0 specification was written, if the instance only
/// supports Vulkan 1.0, then it is not possible to specify a maximum API version higher than 1.0.
/// Trying to create an `Instance` will return an `IncompatibleDriver` error. Consequently, it is
/// not possible to use a higher device API version with an instance that only supports 1.0.
///
/// # Extensions
///
/// When creating an `Instance`, you must provide a list of extensions that must be enabled on the
/// newly-created instance. Trying to enable an extension that is not supported by the system will
/// result in an error.
///
/// Contrary to OpenGL, it is not possible to use the features of an extension if it was not
/// explicitly enabled.
///
/// Extensions are especially important to take into account if you want to render images on the
/// screen, as the only way to do so is to use the `VK_KHR_surface` extension. More information
/// about this in the `swapchain` module.
///
/// For example, here is how we create an instance with the `VK_KHR_surface` and
/// `VK_KHR_android_surface` extensions enabled, which will allow us to render images to an
/// Android screen. You can compile and run this code on any system, but it is highly unlikely to
/// succeed on anything else than an Android-running device.
///
/// ```no_run
/// use vulkano::{
/// instance::{Instance, InstanceCreateInfo, InstanceExtensions},
/// Version, VulkanLibrary,
/// };
///
/// let library = VulkanLibrary::new()
/// .unwrap_or_else(|err| panic!("Couldn't load Vulkan library: {:?}", err));
///
/// let extensions = InstanceExtensions {
/// khr_surface: true,
/// khr_android_surface: true,
/// .. InstanceExtensions::empty()
/// };
///
/// let instance = Instance::new(
/// library,
/// InstanceCreateInfo {
/// enabled_extensions: extensions,
/// ..Default::default()
/// },
/// )
/// .unwrap_or_else(|err| panic!("Couldn't create instance: {:?}", err));
/// ```
///
/// # Layers
///
/// When creating an `Instance`, you have the possibility to pass a list of **layers** that will
/// be activated on the newly-created instance. The list of available layers can be retrieved by
/// calling the [`layer_properties`](crate::VulkanLibrary::layer_properties) method of
/// `VulkanLibrary`.
///
/// A layer is a component that will hook and potentially modify the Vulkan function calls.
/// For example, activating a layer could add a frames-per-second counter on the screen, or it
/// could send information to a debugger that will debug your application.
///
/// > **Note**: From an application's point of view, layers "just exist". In practice, on Windows
/// > and Linux, layers can be installed by third party installers or by package managers and can
/// > also be activated by setting the value of the `VK_INSTANCE_LAYERS` environment variable
/// > before starting the program. See the documentation of the official Vulkan loader for these
/// > platforms.
///
/// > **Note**: In practice, the most common use of layers right now is for debugging purposes.
/// > To do so, you are encouraged to set the `VK_INSTANCE_LAYERS` environment variable on Windows
/// > or Linux instead of modifying the source code of your program. For example:
/// > `export VK_INSTANCE_LAYERS=VK_LAYER_LUNARG_api_dump` on Linux if you installed the Vulkan SDK
/// > will print the list of raw Vulkan function calls.
///
/// ## Examples
///
/// ```
/// # use std::{sync::Arc, error::Error};
/// # use vulkano::{
/// # instance::{Instance, InstanceCreateInfo, InstanceExtensions},
/// # Version, VulkanLibrary,
/// # };
/// # fn test() -> Result<Arc<Instance>, Box<dyn Error>> {
/// let library = VulkanLibrary::new()?;
///
/// // For the sake of the example, we activate all the layers that
/// // contain the word "foo" in their description.
/// let layers: Vec<_> = library.layer_properties()?
/// .filter(|l| l.description().contains("foo"))
/// .collect();
///
/// let instance = Instance::new(
/// library,
/// InstanceCreateInfo {
/// enabled_layers: layers.iter().map(|l| l.name().to_owned()).collect(),
/// ..Default::default()
/// },
/// )?;
/// # Ok(instance)
/// # }
/// ```
// TODO: mention that extensions must be supported by layers as well
pub struct Instance {
handle: ash::vk::Instance,
fns: InstanceFunctions,
id: NonZeroU64,
api_version: Version,
enabled_extensions: InstanceExtensions,
enabled_layers: Vec<String>,
library: Arc<VulkanLibrary>,
max_api_version: Version,
_user_callbacks: Vec<Box<UserCallback>>,
}
// TODO: fix the underlying cause instead
impl UnwindSafe for Instance {}
impl RefUnwindSafe for Instance {}
impl Instance {
/// Creates a new `Instance`.
///
/// # Panics
///
/// - Panics if any version numbers in `create_info` contain a field too large to be converted
/// into a Vulkan version number.
/// - Panics if `create_info.max_api_version` is not at least `V1_0`.
pub fn new(
library: Arc<VulkanLibrary>,
create_info: InstanceCreateInfo,
) -> Result<Arc<Instance>, InstanceCreationError> {
unsafe { Self::with_debug_utils_messengers(library, create_info, []) }
}
/// Creates a new `Instance` with debug messengers to use during the creation and destruction
/// of the instance.
///
/// The debug messengers are not used at any other time,
/// [`DebugUtilsMessenger`](crate::instance::debug::DebugUtilsMessenger) should be used for
/// that.
///
/// If `debug_utils_messengers` is not empty, the `ext_debug_utils` extension must be set in
/// `enabled_extensions`.
///
/// # Panics
///
/// - Panics if the `message_severity` or `message_type` members of any element of
/// `debug_utils_messengers` are empty.
///
/// # Safety
///
/// - The `user_callback` of each element of `debug_utils_messengers` must not make any calls
/// to the Vulkan API.
pub unsafe fn with_debug_utils_messengers(
library: Arc<VulkanLibrary>,
create_info: InstanceCreateInfo,
debug_utils_messengers: impl IntoIterator<Item = DebugUtilsMessengerCreateInfo>,
) -> Result<Arc<Instance>, InstanceCreationError> {
let InstanceCreateInfo {
application_name,
application_version,
mut enabled_extensions,
enabled_layers,
engine_name,
engine_version,
max_api_version,
enumerate_portability,
enabled_validation_features,
disabled_validation_features,
_ne: _,
} = create_info;
let (api_version, max_api_version) = {
let api_version = library.api_version();
let max_api_version = if let Some(max_api_version) = max_api_version {
max_api_version
} else if api_version < Version::V1_1 {
api_version
} else {
Version::HEADER_VERSION
};
(std::cmp::min(max_api_version, api_version), max_api_version)
};
// VUID-VkApplicationInfo-apiVersion-04010
assert!(max_api_version >= Version::V1_0);
let supported_extensions =
library.supported_extensions_with_layers(enabled_layers.iter().map(String::as_str))?;
let mut flags = ash::vk::InstanceCreateFlags::empty();
if enumerate_portability && supported_extensions.khr_portability_enumeration {
enabled_extensions.khr_portability_enumeration = true;
flags |= ash::vk::InstanceCreateFlags::ENUMERATE_PORTABILITY_KHR;
}
// Check if the extensions are correct
enabled_extensions.check_requirements(&supported_extensions, api_version)?;
// FIXME: check whether each layer is supported
let enabled_layers_cstr: Vec<CString> = enabled_layers
.iter()
.map(|name| CString::new(name.clone()).unwrap())
.collect();
let enabled_layers_ptrs = enabled_layers_cstr
.iter()
.map(|layer| layer.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let enabled_extensions_cstr: Vec<CString> = (&enabled_extensions).into();
let enabled_extensions_ptrs = enabled_extensions_cstr
.iter()
.map(|extension| extension.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let application_name_cstr = application_name.map(|name| CString::new(name).unwrap());
let engine_name_cstr = engine_name.map(|name| CString::new(name).unwrap());
let application_info = ash::vk::ApplicationInfo {
p_application_name: application_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
application_version: application_version
.try_into()
.expect("Version out of range"),
p_engine_name: engine_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
engine_version: engine_version.try_into().expect("Version out of range"),
api_version: max_api_version.try_into().expect("Version out of range"),
..Default::default()
};
let enable_validation_features_vk: SmallVec<[_; 5]> = enabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let disable_validation_features_vk: SmallVec<[_; 8]> = disabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let mut create_info_vk = ash::vk::InstanceCreateInfo {
flags,
p_application_info: &application_info,
enabled_layer_count: enabled_layers_ptrs.len() as u32,
pp_enabled_layer_names: enabled_layers_ptrs.as_ptr(),
enabled_extension_count: enabled_extensions_ptrs.len() as u32,
pp_enabled_extension_names: enabled_extensions_ptrs.as_ptr(),
..Default::default()
};
let mut validation_features_vk = None;
if !enabled_validation_features.is_empty() || !disabled_validation_features.is_empty() {
if !enabled_extensions.ext_validation_features {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.enabled_validation_features` or \
`create_info.disabled_validation_features` are not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_validation_features"],
..Default::default()
},
});
}
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02967
assert!(
!enabled_validation_features
.contains(&ValidationFeatureEnable::GpuAssistedReserveBindingSlot)
|| enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted)
);
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02968
assert!(
!(enabled_validation_features.contains(&ValidationFeatureEnable::DebugPrintf)
&& enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted))
);
let next = validation_features_vk.insert(ash::vk::ValidationFeaturesEXT {
enabled_validation_feature_count: enable_validation_features_vk.len() as u32,
p_enabled_validation_features: enable_validation_features_vk.as_ptr(),
disabled_validation_feature_count: disable_validation_features_vk.len() as u32,
p_disabled_validation_features: disable_validation_features_vk.as_ptr(),
..Default::default()
});
next.p_next = create_info_vk.p_next;
create_info_vk.p_next = next as *const _ as *const _;
}
// Handle debug messengers
let debug_utils_messengers = debug_utils_messengers.into_iter();
let mut debug_utils_messenger_create_infos =
Vec::with_capacity(debug_utils_messengers.size_hint().0);
let mut user_callbacks = Vec::with_capacity(debug_utils_messengers.size_hint().0);
for create_info in debug_utils_messengers {
let DebugUtilsMessengerCreateInfo {
message_type,
message_severity,
user_callback,
_ne: _,
} = create_info;
// VUID-VkInstanceCreateInfo-pNext-04926
if !enabled_extensions.ext_debug_utils {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.debug_utils_messengers` is not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_debug_utils"],
..Default::default()
},
});
}
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-parameter
// TODO: message_severity.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-requiredbitmask
assert!(!message_severity.is_empty());
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-parameter
// TODO: message_type.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-requiredbitmask
assert!(!message_type.is_empty());
// VUID-PFN_vkDebugUtilsMessengerCallbackEXT-None-04769
// Can't be checked, creation is unsafe.
let user_callback = Box::new(user_callback);
let create_info = ash::vk::DebugUtilsMessengerCreateInfoEXT {
flags: ash::vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: message_severity.into(),
message_type: message_type.into(),
pfn_user_callback: Some(trampoline),
p_user_data: &*user_callback as &Arc<_> as *const Arc<_> as *const c_void as *mut _,
..Default::default()
};
debug_utils_messenger_create_infos.push(create_info);
user_callbacks.push(user_callback);
}
for i in 1..debug_utils_messenger_create_infos.len() {
debug_utils_messenger_create_infos[i - 1].p_next =
&debug_utils_messenger_create_infos[i] as *const _ as *const _;
}
if let Some(info) = debug_utils_messenger_create_infos.first() {
create_info_vk.p_next = info as *const _ as *const _;
}
// Creating the Vulkan instance.
let handle = {
let mut output = MaybeUninit::uninit();
let fns = library.fns();
(fns.v1_0.create_instance)(&create_info_vk, ptr::null(), output.as_mut_ptr())
.result()
.map_err(VulkanError::from)?;
output.assume_init()
};
// Loading the function pointers of the newly-created instance.
let fns = {
InstanceFunctions::load(|name| {
library
.get_instance_proc_addr(handle, name.as_ptr())
.map_or(ptr::null(), |func| func as _)
})
};
Ok(Arc::new(Instance {
handle,
fns,
id: Self::next_id(),
api_version,
enabled_extensions,
enabled_layers,
library,
max_api_version,
_user_callbacks: user_callbacks,
}))
}
/// Returns the Vulkan library used to create this instance.
#[inline]
pub fn library(&self) -> &Arc<VulkanLibrary> {
&self.library
}
/// Returns the Vulkan version supported by the instance.
///
/// This is the lower of the
/// [driver's supported version](crate::VulkanLibrary::api_version) and
/// [`max_api_version`](Instance::max_api_version).
#[inline]
pub fn api_version(&self) -> Version {
self.api_version
}
/// Returns the maximum Vulkan version that was specified when creating the instance.
#[inline]
pub fn max_api_version(&self) -> Version {
self.max_api_version
}
/// Returns pointers to the raw Vulkan functions of the instance.
#[inline]
pub fn fns(&self) -> &InstanceFunctions {
&self.fns
}
/// Returns the extensions that have been enabled on the instance.
#[inline]
pub fn enabled_extensions(&self) -> &InstanceExtensions {
&self.enabled_extensions
}
/// Returns the layers that have been enabled on the instance.
#[inline]
pub fn enabled_layers(&self) -> &[String] |
/// Returns an iterator that enumerates the physical devices available.
///
/// # Examples
///
/// ```no_run
/// # use vulkano::{
/// # instance::{Instance, InstanceExtensions},
/// # Version, VulkanLibrary,
/// # };
///
/// # let library = VulkanLibrary::new().unwrap();
/// # let instance = Instance::new(library, Default::default()).unwrap();
/// for physical_device in instance.enumerate_physical_devices().unwrap() {
/// println!("Available device: {}", physical_device.properties().device_name);
/// }
/// ```
pub fn enumerate_physical_devices(
self: &Arc<Self>,
) -> Result<impl ExactSizeIterator<Item = Arc<PhysicalDevice>>, VulkanError> {
let fns = self.fns();
unsafe {
let handles = loop {
let mut count = 0;
(fns.v1_0.enumerate_physical_devices)(self.handle, &mut count, ptr::null_mut())
.result()
.map_err(VulkanError::from)?;
let mut handles = Vec::with_capacity(count as usize);
let result = (fns.v1_0.enumerate_physical_devices)(
self.handle,
&mut count,
handles.as_mut_ptr(),
);
match result {
ash::vk::Result::SUCCESS => {
handles.set_len(count as usize);
break handles;
}
ash::vk::Result::INCOMPLETE => (),
err => return Err(VulkanError::from(err)),
}
};
let physical_devices: SmallVec<[_; 4]> = handles
.into_iter()
.map(|handle| PhysicalDevice::from_handle(self.clone(), handle))
.collect::<Result<_, _>>()?;
Ok(physical_devices.into_iter())
}
}
}
impl Drop for Instance {
#[inline]
fn drop(&mut self) {
let fns = self.fns();
unsafe {
(fns.v1_0.destroy_instance)(self.handle, ptr::null());
}
}
}
unsafe impl VulkanObject for Instance {
type Handle = ash::vk::Instance;
#[inline]
fn handle(&self) -> Self::Handle {
self.handle
}
}
crate::impl_id_counter!(Instance);
impl Debug for Instance {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
let Self {
handle,
fns,
id: _,
api_version,
enabled_extensions,
enabled_layers,
library: function_pointers,
max_api_version,
_user_callbacks: _,
} = self;
f.debug_struct("Instance")
.field("handle", handle)
.field("fns", fns)
.field("api_version", api_version)
.field("enabled_extensions", enabled_extensions)
.field("enabled_layers", enabled_layers)
.field("function_pointers", function_pointers)
.field("max_api_version", max_api_version)
.finish_non_exhaustive()
}
}
/// Parameters to create a new `Instance`.
#[derive(Debug)]
pub struct InstanceCreateInfo {
/// A string of your choice stating the name of your application.
///
/// The default value is `None`.
pub application_name: Option<String>,
/// A version number of your choice specifying the version of your application.
///
/// The default value is zero.
pub application_version: Version,
/// The extensions to enable on the instance.
///
/// The default value is [`InstanceExtensions::empty()`].
pub enabled_extensions: InstanceExtensions,
/// The layers to enable on the instance.
///
/// The default value is empty.
pub enabled_layers: Vec<String>,
/// A string of your choice stating the name of the engine used to power the application.
pub engine_name: Option<String>,
/// A version number of your choice specifying the version of the engine used to power the
/// application.
///
/// The default value is zero.
pub engine_version: Version,
/// The highest Vulkan API version that the application will use with the instance.
///
/// Usually, you will want to leave this at the default.
///
/// The default value is [`Version::HEADER_VERSION`], but if the
/// supported instance version is 1.0, then it will be 1.0.
pub max_api_version: Option<Version>,
/// Include [portability subset](crate::instance#portability-subset-devices-and-the-enumerate_portability-flag)
/// devices when enumerating physical devices.
///
/// If you enable this flag, you must ensure that your program is prepared to handle the
/// non-conformant aspects of these devices.
///
/// If this flag is not enabled, and there are no fully-conformant devices on the system, then
/// [`Instance::new`] will return an `IncompatibleDriver` error.
///
/// The default value is `false`.
///
/// # Notes
///
/// If this flag is enabled, and the
/// [`khr_portability_enumeration`](crate::instance::InstanceExtensions::khr_portability_enumeration)
/// extension is supported, it will be enabled automatically when creating the instance.
/// If the extension is not supported, this flag will be ignored.
pub enumerate_portability: bool,
/// Features of the validation layer to enable.
///
/// If not empty, the
/// [`ext_validation_features`](crate::instance::InstanceExtensions::ext_validation_features)
/// extension must be enabled on the instance.
pub enabled_validation_features: Vec<ValidationFeatureEnable>,
/// Features of the validation layer to disable.
///
/// If not empty, the
/// [`ext_validation_features`](crate::instance::InstanceExtensions::ext_validation_features)
/// extension must be enabled on the instance.
pub disabled_validation_features: Vec<ValidationFeatureDisable>,
pub _ne: crate::NonExhaustive,
}
impl Default for InstanceCreateInfo {
#[inline]
fn default() -> Self {
Self {
application_name: None,
application_version: Version::major_minor(0, 0),
enabled_extensions: InstanceExtensions::empty(),
enabled_layers: Vec::new(),
engine_name: None,
engine_version: Version::major_minor(0, 0),
max_api_version: None,
enumerate_portability: false,
enabled_validation_features: Vec::new(),
disabled_validation_features: Vec::new(),
_ne: crate::NonExhaustive(()),
}
}
}
impl InstanceCreateInfo {
/// Returns an `InstanceCreateInfo` with the `application_name` and `application_version` set
/// from information in your crate's Cargo.toml file.
///
/// # Panics
///
/// - Panics if the required environment variables are missing, which happens if the project
/// wasn't built by Cargo.
#[inline]
pub fn application_from_cargo_toml() -> Self {
Self {
application_name: Some(env!("CARGO_PKG_NAME").to_owned()),
application_version: Version {
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
},
..Default::default()
}
}
}
/// Error that can happen when creating an instance.
#[derive(Clone, Debug)]
pub enum InstanceCreationError {
/// Not enough memory.
OomError(OomError),
/// Failed to initialize for an implementation-specific reason.
InitializationFailed,
/// One of the requested layers is missing.
LayerNotPresent,
/// One of the requested extensions is not supported by the implementation.
ExtensionNotPresent,
/// The version requested is not supported by the implementation.
IncompatibleDriver,
/// A restriction for an extension was not met.
ExtensionRestrictionNotMet(ExtensionRestrictionError),
RequirementNotMet {
required_for: &'static str,
requires_one_of: RequiresOneOf,
},
}
impl Error for InstanceCreationError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::OomError(err) => Some(err),
_ => None,
}
}
}
impl Display for InstanceCreationError {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
match self {
Self::OomError(_) => write!(f, "not enough memory available"),
Self::InitializationFailed => write!(f, "initialization failed"),
Self::LayerNotPresent => write!(f, "layer not present"),
Self::ExtensionNotPresent => write!(f, "extension not present"),
Self::IncompatibleDriver => write!(f, "incompatible driver"),
Self::ExtensionRestrictionNotMet(err) => Display::fmt(err, f),
Self::RequirementNotMet {
required_for,
requires_one_of,
} => write!(
f,
"a requirement was not met for: {}; requires one of: {}",
required_for, requires_one_of,
),
}
}
}
impl From<OomError> for InstanceCreationError {
fn from(err: OomError) -> Self {
Self::OomError(err)
}
}
impl From<ExtensionRestrictionError> for InstanceCreationError {
fn from(err: ExtensionRestrictionError) -> Self {
Self::ExtensionRestrictionNotMet(err)
}
}
impl From<VulkanError> for InstanceCreationError {
fn from(err: VulkanError) -> Self {
match err {
err @ VulkanError::OutOfHostMemory => Self::OomError(OomError::from(err)),
err @ VulkanError::OutOfDeviceMemory => Self::OomError(OomError::from(err)),
VulkanError::InitializationFailed => Self::InitializationFailed,
VulkanError::LayerNotPresent => Self::LayerNotPresent,
VulkanError::ExtensionNotPresent => Self::ExtensionNotPresent,
VulkanError::IncompatibleDriver => Self::IncompatibleDriver,
_ => panic!("unexpected error: {:?}", err),
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn create_instance() {
let _ = instance!();
}
}
| {
&self.enabled_layers
} | identifier_body |
copy_check.py | #!/opt/bin/python2.7
'''
'''
import re
import time
import sys
import os
import tree
import shutil
import getpass
import pickle
from progressbar import ProgressBar,Percentage,Bar
import glob
import argparse
backupList=[] #Append list to execute backup
copiedAtThisRound=[] #Pickle dump list for the round of back up
initialList=[]
backUpTo=os.path.abspath('/volume1/CCNC_MRI') #Back up to
backUpFrom = os.path.abspath('/volumeUSB2/usbshare') #Find subj to back up from
if os.path.isfile(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt")):
f = open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'r')
alreadyCopied=pickle.load(f)
f.close()
print 'loaded log successfully'
print alreadyCopied
else:
alreadyCopied=[]
def main():
'''
test whether the source directory is a folder
'''
#directory = raw_input('\nwhere is the files ?:') ---> for different sources
if os.path.exists(backUpTo) & os.path.exists(backUpFrom) == True:
os.system('clear')
backUpConfirm(backUpFrom)
if copiedAtThisRound!=[]:
executeBackUp(backupList,backUpFrom)
def backUpConfirm(backUpFrom):
'''
show the list of folders under the backUpFrom
if it is confirmed by the user
excute backup
'''
dirList = [o for o in os.listdir(backUpFrom) if os.path.isdir(os.path.join(backUpFrom,o))]
#removing subjects already copied according to the log file
for copied in alreadyCopied:
try:
dirList.remove(copied)
except:
continue
#removing folder names begining with '.' and '$'
withDot=[i for i in dirList if i.startswith('.')]
withDol=[i for i in dirList if i.startswith('$')]
dirList = [item for item in dirList if item not in withDot]
dirList = [item for item in dirList if item not in withDol]
for folderName in dirList:
subjFolder = os.path.join(backUpFrom,folderName)
stat = os.stat(subjFolder)
created = os.stat(subjFolder).st_mtime
asciiTime = time.asctime( time.gmtime( created ) )
print '''
------------------------------------
------{0}
created on ( {1} )
------------------------------------
'''.format(folderName,asciiTime)
response = raw_input('\nIs this the name of the subject you want to back up? [Yes/No/Quit/noCall] :')
if re.search('[yY]|[yY][Ee][Ss]',response):
backUpAppend(subjFolder)
elif re.search('[Dd][Oo][Nn][Ee]|stop|[Qq][Uu][Ii][Tt]|exit',response):
break
elif re.search('[Nn][Oo][Cc][Aa][Ll][Ll]',response):
alreadyCopied.append(folderName)
post_check(backUpFrom)
else:
continue
def backUpAppend(subjFolder):
print '\n'
#countFile contains the tuples of image name and count number
#countFile=[(image name,count number)]
groupName,countFile=countCheck(subjFolder)
#groupName=(group,baseline)
subjInitial,fullname,subjNum=getName(subjFolder)
#if it is a follow up study
if groupName[1]=='baseline':
targetDir=os.path.join(backUpTo,groupName[0])
#For BADUK
if groupName[0]=='BADUK':
cntpro=raw_input('\tCNT / PRO ? :')
targetDir=os.path.join(backUpTo,groupName[0]+'/'+cntpro.upper())
maxNum = maxGroupNum(targetDir)
else:
targetDir=os.path.join(backUpTo,groupName[0])
targetDir=os.path.join(targetDir,'Follow_up')
maxNum = maxGroupNum(targetDir)
if groupName[1]=='baseline':
targetName=groupName[0]+maxNum+'_'+subjInitial
if groupName[0]=='BADUK':
targetName='BADUK_'+cntpro.upper()+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
else:
targetName='fu_'+groupName[0]+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
print '\t{0} will be saved as {1} in \n\t{2}'.format(os.path.basename(subjFolder),targetName,targetFolder)
os.system('touch .tmp{0}'.format(maxNum))
if re.search('[yY]|[yY][eE][sS]',raw_input('\tCheck? [Yes/No] :')):
birthday=raw_input('\tDate of birth? [yyyy-mm-dd] : ')
note=raw_input('\tAny note ? :')
toBackUp=(subjFolder,targetFolder,fullname,subjNum,groupName,note,targetDir,birthday)
backupList.append(toBackUp)
copiedAtThisRound.append(os.path.basename(subjFolder))
print '\t------\n\tQued to be copied!'
makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile)
def makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile):
print fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,countFile
print '{}\t{}\t{}\t{}\t{}\t'.format(fullname,subjInitial,subjNum,groupName[0],targetName),
#grep image numbers
t1 = re.compile(r'TFL\S*|\S*T1\S*|\S*t1\S*')
#dti = re.compile(r'\S*[Dd][Tt][Ii]\S*')
#dki = re.compile(r'\S*[Dd][Kk][Ii]\S*')
dti = re.compile(r'[Dd][Tt][Ii]\S*\(.\)_\d+\S*')
dki = re.compile(r'[Dd][Kk][Ii]\S*\(.\)_\d+\S*')
rest = re.compile(r'\S*[Rr][Ee][Ss][Tt]\S*')
t2flair = re.compile(r'\S*[Ff][Ll][Aa][Ii][Rr]\S*')
t2tse = re.compile(r'\S*[Tt][Ss][Ee]\S*')
#T1, DTI, DKI, REST, T2FLAIR, T2TSE
imageNums=[]
for imagePattern in t1,dti,dki,rest,t2flair,t2tse:
nameUsed = imagePattern.search(' '.join(countFile.viewkeys()))
if nameUsed:
imageNums.append(str(countFile.get(nameUsed.group(0))))
else:
imageNums.append(str(0))
print '{}\t{}\t{}\t{}\t{}\t{}'.format(imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5])
totalList=[fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5],time.ctime(time.time()),getpass.getuser()]
print totalList
f = open(os.path.join(backUpFrom,'spread.txt'),'a')
f.write('\t'.join(totalList))
f.write('\n')
f.close()
#'DKI_30D_B-VALUE_NB_06_(3)_0010' 'DTI_64D_B1K(2)_FA_0008' 'DKI_30D_B-VALUE_NB_06_(3)_COLFA_0013' 'PHOENIXZIPREPORT_0099' 'DKI_30D_B-VALUE_NB_06_(3)_EXP_0011' 'REST_FMRI_PHASE_116_(1)_0005' 'DKI_30D_B-VALUE_NB_06_(3)_FA_0012'
def countCheck(subjFolder):
emptyList = {}
countResult=tree.tree(subjFolder,emptyList,'\t')
print '\n'
if re.search('[yY]|[yY][eE][sS]',raw_input('\tDo file numbers match? [Yes/No] :')):
initialList.append(countResult)
#groupName is a tuple
groupName = group()
return groupName,countResult
print '\t{0}\n\tadded to the back up list\n\n'.format(subjFolder)
else:
print '\tNumbers does not match, will return error.\n\tCheck the directory manually'
def group():
possibleGroups = str('BADUK,CHR,DNO,EMO,FEP,GHR,NOR,OCM,ONS,OXY,PAIN,SPR,UMO').split(',')
groupName=''
while groupName=='':
groupName=raw_input('\twhich group ? [BADUK/CHR/DNO/EMO/FEP/GHR/NOR/OCM/ONS/OXY/PAIN/SPR/UMO] :')
followUp=raw_input('\tfollow up (if follow up, type the period) ? [baseline/period] :')
groupName = groupName.upper()
if groupName not in possibleGroups:
print 'not in groups, let Kevin know.'
groupName=''
else:
return (groupName,followUp)
def | (subjFolder):
'''
will try getting the name and subj number from the source folder first
if it fails,
will require user to type in the subjs' name
'''
if re.findall('\d{8}',os.path.basename(subjFolder)):
subjNum = re.search('(\d{8})',os.path.basename(subjFolder)).group(0)
subjName = re.findall('[^\W\d_]+',os.path.basename(subjFolder))
#Appending first letters
subjInitial=''
for i in subjName:
subjInitial = subjInitial + i[0]
fullname=''
for i in subjName:
fullname = fullname + i[0] + i[1:].lower()
return subjInitial, fullname, subjNum
#if the folder shows no pattern
else:
subjName = raw_input('\tEnter the name of the subject in English eg.Cho Kang Ik:')
subjNum = raw_input("\tEnter subject's 8digit number eg.45291835:")
subjwords=subjName.split(' ')
fullname=''
subjInitial=''
for i in subjwords:
fullname=fullname + i[0].upper()
fullname=fullname + i[1:]
subjInitial=subjInitial+i[0][0]
print subjInitial
return subjInitial.upper(),fullname,subjNum
def maxGroupNum(targetDir):
conpro=''
maxNumPattern=re.compile('\d+')
mx = 0
for string in maxNumPattern.findall(' '.join(os.listdir(targetDir))):
if int(string) > mx:
mx = int(string)
highest = mx +1
if highest<10:
highest ='0'+str(highest)
else:
highest = str(highest)
return conpro+highest
def executeBackUp(backupList,backUpFrom):
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(backupList)).start()
num=0
maxNum=len(backupList)
perOne=1/(maxNum*3)
for i in backupList:
shutil.copytree(i[0],i[1])
pbar.update(num+perOne)
log(i[0],i[1],i[2],i[3],i[4],i[5],i[7])
post_check(backUpFrom)
pbar.update(num+perOne)
time.sleep(0.01)
pbar.update(num+perOne)
os.system('rm {0}/.tmp*'.format(i[6]))
pbar.finish()
def log(source,destination,fullname,subjNum,groupName,note,birthday):
try:
timeInfo = time.gmtime(os.stat(source).st_mtime)
prodT=str(timeInfo.tm_year)+'_'+str(timeInfo.tm_mon)+'_'+str(timeInfo.tm_mday)
prodH=str(timeInfo.tm_hour)+':'+str(timeInfo.tm_min)
user=getpass.getuser()
currentTime=time.ctime()
with open(os.path.join(destination,'log.txt'),'w') as f:
f.write('''Subject Full Name = {6}
Subject number = {7}
Group Name = {8},{9}
Source : {0}
Date of Birth : {11}
Destination : {1}
Data produced in : {2}\t{3}
Data copied at : {4}
Copied by : {5}
Note[sex/experimenter/etc]: {10}'''.format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
with open(os.path.join(backUpFrom,'log.txt'),'a') as f:
f.write('{6}\t{8}\t{9}\t{11}\t{2}\t{3}\t{0}\t{4}\t{5}\{10}'.
format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
except:
print 'log failed'
#Pickle dump the list of subjects backed up in this round
def post_check(backUpFrom):
with open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'w') as f:
currentTime=time.ctime()
pickle.dump(alreadyCopied+copiedAtThisRound,f)
if __name__=='__main__':
argparser=argparse.ArgumentParser(prog='copy_check.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
2012_12_12
Kevin Cho
from USB hard-drive
find all new subjects
copy those folders into appropriate folders
append the log file
save patient number and name information automatically
into a log.txt
''',epilog="Kevin Cho 2013_05_17")
argparser.add_argument("--copy","-c",help="copies the data",action="store_true")
argparser.add_argument("--log","-l",help="makes the log of the copied the data",action="store_true")
args=argparser.parse_args()
if args.copy:
main()
else:
main()
#main()
| getName | identifier_name |
copy_check.py | #!/opt/bin/python2.7
'''
'''
import re
import time
import sys
import os
import tree
import shutil
import getpass
import pickle
from progressbar import ProgressBar,Percentage,Bar
import glob
import argparse
backupList=[] #Append list to execute backup
copiedAtThisRound=[] #Pickle dump list for the round of back up
initialList=[]
backUpTo=os.path.abspath('/volume1/CCNC_MRI') #Back up to
backUpFrom = os.path.abspath('/volumeUSB2/usbshare') #Find subj to back up from
if os.path.isfile(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt")):
f = open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'r')
alreadyCopied=pickle.load(f)
f.close()
print 'loaded log successfully'
print alreadyCopied
else:
alreadyCopied=[]
def main():
'''
test whether the source directory is a folder
'''
#directory = raw_input('\nwhere is the files ?:') ---> for different sources
if os.path.exists(backUpTo) & os.path.exists(backUpFrom) == True:
os.system('clear')
backUpConfirm(backUpFrom)
if copiedAtThisRound!=[]:
executeBackUp(backupList,backUpFrom)
def backUpConfirm(backUpFrom):
'''
show the list of folders under the backUpFrom
if it is confirmed by the user
excute backup
'''
dirList = [o for o in os.listdir(backUpFrom) if os.path.isdir(os.path.join(backUpFrom,o))]
#removing subjects already copied according to the log file
for copied in alreadyCopied:
try:
dirList.remove(copied)
except:
continue
#removing folder names begining with '.' and '$'
withDot=[i for i in dirList if i.startswith('.')]
withDol=[i for i in dirList if i.startswith('$')]
dirList = [item for item in dirList if item not in withDot]
dirList = [item for item in dirList if item not in withDol]
for folderName in dirList:
subjFolder = os.path.join(backUpFrom,folderName)
stat = os.stat(subjFolder)
created = os.stat(subjFolder).st_mtime
asciiTime = time.asctime( time.gmtime( created ) )
print '''
------------------------------------
------{0}
created on ( {1} )
------------------------------------
'''.format(folderName,asciiTime)
response = raw_input('\nIs this the name of the subject you want to back up? [Yes/No/Quit/noCall] :')
if re.search('[yY]|[yY][Ee][Ss]',response):
backUpAppend(subjFolder)
elif re.search('[Dd][Oo][Nn][Ee]|stop|[Qq][Uu][Ii][Tt]|exit',response):
break
elif re.search('[Nn][Oo][Cc][Aa][Ll][Ll]',response):
alreadyCopied.append(folderName)
post_check(backUpFrom)
else:
continue
def backUpAppend(subjFolder):
print '\n'
#countFile contains the tuples of image name and count number
#countFile=[(image name,count number)]
groupName,countFile=countCheck(subjFolder)
#groupName=(group,baseline)
subjInitial,fullname,subjNum=getName(subjFolder)
#if it is a follow up study
if groupName[1]=='baseline':
targetDir=os.path.join(backUpTo,groupName[0])
#For BADUK
if groupName[0]=='BADUK':
cntpro=raw_input('\tCNT / PRO ? :')
targetDir=os.path.join(backUpTo,groupName[0]+'/'+cntpro.upper())
maxNum = maxGroupNum(targetDir)
else:
targetDir=os.path.join(backUpTo,groupName[0])
targetDir=os.path.join(targetDir,'Follow_up')
maxNum = maxGroupNum(targetDir)
if groupName[1]=='baseline':
targetName=groupName[0]+maxNum+'_'+subjInitial
if groupName[0]=='BADUK':
targetName='BADUK_'+cntpro.upper()+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
else:
targetName='fu_'+groupName[0]+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
print '\t{0} will be saved as {1} in \n\t{2}'.format(os.path.basename(subjFolder),targetName,targetFolder)
os.system('touch .tmp{0}'.format(maxNum))
if re.search('[yY]|[yY][eE][sS]',raw_input('\tCheck? [Yes/No] :')):
birthday=raw_input('\tDate of birth? [yyyy-mm-dd] : ')
note=raw_input('\tAny note ? :')
toBackUp=(subjFolder,targetFolder,fullname,subjNum,groupName,note,targetDir,birthday)
backupList.append(toBackUp)
copiedAtThisRound.append(os.path.basename(subjFolder))
print '\t------\n\tQued to be copied!'
makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile)
def makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile):
print fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,countFile
print '{}\t{}\t{}\t{}\t{}\t'.format(fullname,subjInitial,subjNum,groupName[0],targetName),
#grep image numbers
t1 = re.compile(r'TFL\S*|\S*T1\S*|\S*t1\S*')
#dti = re.compile(r'\S*[Dd][Tt][Ii]\S*')
#dki = re.compile(r'\S*[Dd][Kk][Ii]\S*')
dti = re.compile(r'[Dd][Tt][Ii]\S*\(.\)_\d+\S*')
dki = re.compile(r'[Dd][Kk][Ii]\S*\(.\)_\d+\S*')
rest = re.compile(r'\S*[Rr][Ee][Ss][Tt]\S*')
t2flair = re.compile(r'\S*[Ff][Ll][Aa][Ii][Rr]\S*')
t2tse = re.compile(r'\S*[Tt][Ss][Ee]\S*')
#T1, DTI, DKI, REST, T2FLAIR, T2TSE
imageNums=[]
for imagePattern in t1,dti,dki,rest,t2flair,t2tse:
nameUsed = imagePattern.search(' '.join(countFile.viewkeys()))
if nameUsed:
imageNums.append(str(countFile.get(nameUsed.group(0))))
else:
imageNums.append(str(0))
print '{}\t{}\t{}\t{}\t{}\t{}'.format(imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5])
totalList=[fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5],time.ctime(time.time()),getpass.getuser()]
print totalList
f = open(os.path.join(backUpFrom,'spread.txt'),'a')
f.write('\t'.join(totalList))
f.write('\n')
f.close()
#'DKI_30D_B-VALUE_NB_06_(3)_0010' 'DTI_64D_B1K(2)_FA_0008' 'DKI_30D_B-VALUE_NB_06_(3)_COLFA_0013' 'PHOENIXZIPREPORT_0099' 'DKI_30D_B-VALUE_NB_06_(3)_EXP_0011' 'REST_FMRI_PHASE_116_(1)_0005' 'DKI_30D_B-VALUE_NB_06_(3)_FA_0012'
def countCheck(subjFolder):
emptyList = {}
countResult=tree.tree(subjFolder,emptyList,'\t')
print '\n'
if re.search('[yY]|[yY][eE][sS]',raw_input('\tDo file numbers match? [Yes/No] :')):
initialList.append(countResult)
#groupName is a tuple
groupName = group()
return groupName,countResult
print '\t{0}\n\tadded to the back up list\n\n'.format(subjFolder)
else:
print '\tNumbers does not match, will return error.\n\tCheck the directory manually'
def group():
|
def getName(subjFolder):
'''
will try getting the name and subj number from the source folder first
if it fails,
will require user to type in the subjs' name
'''
if re.findall('\d{8}',os.path.basename(subjFolder)):
subjNum = re.search('(\d{8})',os.path.basename(subjFolder)).group(0)
subjName = re.findall('[^\W\d_]+',os.path.basename(subjFolder))
#Appending first letters
subjInitial=''
for i in subjName:
subjInitial = subjInitial + i[0]
fullname=''
for i in subjName:
fullname = fullname + i[0] + i[1:].lower()
return subjInitial, fullname, subjNum
#if the folder shows no pattern
else:
subjName = raw_input('\tEnter the name of the subject in English eg.Cho Kang Ik:')
subjNum = raw_input("\tEnter subject's 8digit number eg.45291835:")
subjwords=subjName.split(' ')
fullname=''
subjInitial=''
for i in subjwords:
fullname=fullname + i[0].upper()
fullname=fullname + i[1:]
subjInitial=subjInitial+i[0][0]
print subjInitial
return subjInitial.upper(),fullname,subjNum
def maxGroupNum(targetDir):
conpro=''
maxNumPattern=re.compile('\d+')
mx = 0
for string in maxNumPattern.findall(' '.join(os.listdir(targetDir))):
if int(string) > mx:
mx = int(string)
highest = mx +1
if highest<10:
highest ='0'+str(highest)
else:
highest = str(highest)
return conpro+highest
def executeBackUp(backupList,backUpFrom):
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(backupList)).start()
num=0
maxNum=len(backupList)
perOne=1/(maxNum*3)
for i in backupList:
shutil.copytree(i[0],i[1])
pbar.update(num+perOne)
log(i[0],i[1],i[2],i[3],i[4],i[5],i[7])
post_check(backUpFrom)
pbar.update(num+perOne)
time.sleep(0.01)
pbar.update(num+perOne)
os.system('rm {0}/.tmp*'.format(i[6]))
pbar.finish()
def log(source,destination,fullname,subjNum,groupName,note,birthday):
try:
timeInfo = time.gmtime(os.stat(source).st_mtime)
prodT=str(timeInfo.tm_year)+'_'+str(timeInfo.tm_mon)+'_'+str(timeInfo.tm_mday)
prodH=str(timeInfo.tm_hour)+':'+str(timeInfo.tm_min)
user=getpass.getuser()
currentTime=time.ctime()
with open(os.path.join(destination,'log.txt'),'w') as f:
f.write('''Subject Full Name = {6}
Subject number = {7}
Group Name = {8},{9}
Source : {0}
Date of Birth : {11}
Destination : {1}
Data produced in : {2}\t{3}
Data copied at : {4}
Copied by : {5}
Note[sex/experimenter/etc]: {10}'''.format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
with open(os.path.join(backUpFrom,'log.txt'),'a') as f:
f.write('{6}\t{8}\t{9}\t{11}\t{2}\t{3}\t{0}\t{4}\t{5}\{10}'.
format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
except:
print 'log failed'
#Pickle dump the list of subjects backed up in this round
def post_check(backUpFrom):
with open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'w') as f:
currentTime=time.ctime()
pickle.dump(alreadyCopied+copiedAtThisRound,f)
if __name__=='__main__':
argparser=argparse.ArgumentParser(prog='copy_check.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
2012_12_12
Kevin Cho
from USB hard-drive
find all new subjects
copy those folders into appropriate folders
append the log file
save patient number and name information automatically
into a log.txt
''',epilog="Kevin Cho 2013_05_17")
argparser.add_argument("--copy","-c",help="copies the data",action="store_true")
argparser.add_argument("--log","-l",help="makes the log of the copied the data",action="store_true")
args=argparser.parse_args()
if args.copy:
main()
else:
main()
#main()
| possibleGroups = str('BADUK,CHR,DNO,EMO,FEP,GHR,NOR,OCM,ONS,OXY,PAIN,SPR,UMO').split(',')
groupName=''
while groupName=='':
groupName=raw_input('\twhich group ? [BADUK/CHR/DNO/EMO/FEP/GHR/NOR/OCM/ONS/OXY/PAIN/SPR/UMO] :')
followUp=raw_input('\tfollow up (if follow up, type the period) ? [baseline/period] :')
groupName = groupName.upper()
if groupName not in possibleGroups:
print 'not in groups, let Kevin know.'
groupName=''
else:
return (groupName,followUp) | identifier_body |
copy_check.py | #!/opt/bin/python2.7
'''
'''
import re
import time
import sys
import os
import tree
import shutil
import getpass
import pickle
from progressbar import ProgressBar,Percentage,Bar
import glob
import argparse
backupList=[] #Append list to execute backup
copiedAtThisRound=[] #Pickle dump list for the round of back up
initialList=[]
backUpTo=os.path.abspath('/volume1/CCNC_MRI') #Back up to
backUpFrom = os.path.abspath('/volumeUSB2/usbshare') #Find subj to back up from
if os.path.isfile(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt")):
f = open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'r')
alreadyCopied=pickle.load(f)
f.close()
print 'loaded log successfully'
print alreadyCopied
else:
alreadyCopied=[]
def main():
'''
test whether the source directory is a folder
'''
#directory = raw_input('\nwhere is the files ?:') ---> for different sources
if os.path.exists(backUpTo) & os.path.exists(backUpFrom) == True:
os.system('clear')
backUpConfirm(backUpFrom)
if copiedAtThisRound!=[]:
executeBackUp(backupList,backUpFrom)
def backUpConfirm(backUpFrom):
'''
show the list of folders under the backUpFrom
if it is confirmed by the user
excute backup
'''
dirList = [o for o in os.listdir(backUpFrom) if os.path.isdir(os.path.join(backUpFrom,o))]
#removing subjects already copied according to the log file
for copied in alreadyCopied:
try:
dirList.remove(copied)
except:
continue
#removing folder names begining with '.' and '$'
withDot=[i for i in dirList if i.startswith('.')]
withDol=[i for i in dirList if i.startswith('$')]
dirList = [item for item in dirList if item not in withDot]
dirList = [item for item in dirList if item not in withDol]
for folderName in dirList:
subjFolder = os.path.join(backUpFrom,folderName)
stat = os.stat(subjFolder)
created = os.stat(subjFolder).st_mtime
asciiTime = time.asctime( time.gmtime( created ) )
print '''
------------------------------------
------{0}
created on ( {1} )
------------------------------------
'''.format(folderName,asciiTime)
response = raw_input('\nIs this the name of the subject you want to back up? [Yes/No/Quit/noCall] :')
if re.search('[yY]|[yY][Ee][Ss]',response):
backUpAppend(subjFolder)
elif re.search('[Dd][Oo][Nn][Ee]|stop|[Qq][Uu][Ii][Tt]|exit',response):
break
elif re.search('[Nn][Oo][Cc][Aa][Ll][Ll]',response): |
def backUpAppend(subjFolder):
print '\n'
#countFile contains the tuples of image name and count number
#countFile=[(image name,count number)]
groupName,countFile=countCheck(subjFolder)
#groupName=(group,baseline)
subjInitial,fullname,subjNum=getName(subjFolder)
#if it is a follow up study
if groupName[1]=='baseline':
targetDir=os.path.join(backUpTo,groupName[0])
#For BADUK
if groupName[0]=='BADUK':
cntpro=raw_input('\tCNT / PRO ? :')
targetDir=os.path.join(backUpTo,groupName[0]+'/'+cntpro.upper())
maxNum = maxGroupNum(targetDir)
else:
targetDir=os.path.join(backUpTo,groupName[0])
targetDir=os.path.join(targetDir,'Follow_up')
maxNum = maxGroupNum(targetDir)
if groupName[1]=='baseline':
targetName=groupName[0]+maxNum+'_'+subjInitial
if groupName[0]=='BADUK':
targetName='BADUK_'+cntpro.upper()+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
else:
targetName='fu_'+groupName[0]+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
print '\t{0} will be saved as {1} in \n\t{2}'.format(os.path.basename(subjFolder),targetName,targetFolder)
os.system('touch .tmp{0}'.format(maxNum))
if re.search('[yY]|[yY][eE][sS]',raw_input('\tCheck? [Yes/No] :')):
birthday=raw_input('\tDate of birth? [yyyy-mm-dd] : ')
note=raw_input('\tAny note ? :')
toBackUp=(subjFolder,targetFolder,fullname,subjNum,groupName,note,targetDir,birthday)
backupList.append(toBackUp)
copiedAtThisRound.append(os.path.basename(subjFolder))
print '\t------\n\tQued to be copied!'
makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile)
def makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile):
print fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,countFile
print '{}\t{}\t{}\t{}\t{}\t'.format(fullname,subjInitial,subjNum,groupName[0],targetName),
#grep image numbers
t1 = re.compile(r'TFL\S*|\S*T1\S*|\S*t1\S*')
#dti = re.compile(r'\S*[Dd][Tt][Ii]\S*')
#dki = re.compile(r'\S*[Dd][Kk][Ii]\S*')
dti = re.compile(r'[Dd][Tt][Ii]\S*\(.\)_\d+\S*')
dki = re.compile(r'[Dd][Kk][Ii]\S*\(.\)_\d+\S*')
rest = re.compile(r'\S*[Rr][Ee][Ss][Tt]\S*')
t2flair = re.compile(r'\S*[Ff][Ll][Aa][Ii][Rr]\S*')
t2tse = re.compile(r'\S*[Tt][Ss][Ee]\S*')
#T1, DTI, DKI, REST, T2FLAIR, T2TSE
imageNums=[]
for imagePattern in t1,dti,dki,rest,t2flair,t2tse:
nameUsed = imagePattern.search(' '.join(countFile.viewkeys()))
if nameUsed:
imageNums.append(str(countFile.get(nameUsed.group(0))))
else:
imageNums.append(str(0))
print '{}\t{}\t{}\t{}\t{}\t{}'.format(imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5])
totalList=[fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5],time.ctime(time.time()),getpass.getuser()]
print totalList
f = open(os.path.join(backUpFrom,'spread.txt'),'a')
f.write('\t'.join(totalList))
f.write('\n')
f.close()
#'DKI_30D_B-VALUE_NB_06_(3)_0010' 'DTI_64D_B1K(2)_FA_0008' 'DKI_30D_B-VALUE_NB_06_(3)_COLFA_0013' 'PHOENIXZIPREPORT_0099' 'DKI_30D_B-VALUE_NB_06_(3)_EXP_0011' 'REST_FMRI_PHASE_116_(1)_0005' 'DKI_30D_B-VALUE_NB_06_(3)_FA_0012'
def countCheck(subjFolder):
emptyList = {}
countResult=tree.tree(subjFolder,emptyList,'\t')
print '\n'
if re.search('[yY]|[yY][eE][sS]',raw_input('\tDo file numbers match? [Yes/No] :')):
initialList.append(countResult)
#groupName is a tuple
groupName = group()
return groupName,countResult
print '\t{0}\n\tadded to the back up list\n\n'.format(subjFolder)
else:
print '\tNumbers does not match, will return error.\n\tCheck the directory manually'
def group():
possibleGroups = str('BADUK,CHR,DNO,EMO,FEP,GHR,NOR,OCM,ONS,OXY,PAIN,SPR,UMO').split(',')
groupName=''
while groupName=='':
groupName=raw_input('\twhich group ? [BADUK/CHR/DNO/EMO/FEP/GHR/NOR/OCM/ONS/OXY/PAIN/SPR/UMO] :')
followUp=raw_input('\tfollow up (if follow up, type the period) ? [baseline/period] :')
groupName = groupName.upper()
if groupName not in possibleGroups:
print 'not in groups, let Kevin know.'
groupName=''
else:
return (groupName,followUp)
def getName(subjFolder):
'''
will try getting the name and subj number from the source folder first
if it fails,
will require user to type in the subjs' name
'''
if re.findall('\d{8}',os.path.basename(subjFolder)):
subjNum = re.search('(\d{8})',os.path.basename(subjFolder)).group(0)
subjName = re.findall('[^\W\d_]+',os.path.basename(subjFolder))
#Appending first letters
subjInitial=''
for i in subjName:
subjInitial = subjInitial + i[0]
fullname=''
for i in subjName:
fullname = fullname + i[0] + i[1:].lower()
return subjInitial, fullname, subjNum
#if the folder shows no pattern
else:
subjName = raw_input('\tEnter the name of the subject in English eg.Cho Kang Ik:')
subjNum = raw_input("\tEnter subject's 8digit number eg.45291835:")
subjwords=subjName.split(' ')
fullname=''
subjInitial=''
for i in subjwords:
fullname=fullname + i[0].upper()
fullname=fullname + i[1:]
subjInitial=subjInitial+i[0][0]
print subjInitial
return subjInitial.upper(),fullname,subjNum
def maxGroupNum(targetDir):
conpro=''
maxNumPattern=re.compile('\d+')
mx = 0
for string in maxNumPattern.findall(' '.join(os.listdir(targetDir))):
if int(string) > mx:
mx = int(string)
highest = mx +1
if highest<10:
highest ='0'+str(highest)
else:
highest = str(highest)
return conpro+highest
def executeBackUp(backupList,backUpFrom):
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(backupList)).start()
num=0
maxNum=len(backupList)
perOne=1/(maxNum*3)
for i in backupList:
shutil.copytree(i[0],i[1])
pbar.update(num+perOne)
log(i[0],i[1],i[2],i[3],i[4],i[5],i[7])
post_check(backUpFrom)
pbar.update(num+perOne)
time.sleep(0.01)
pbar.update(num+perOne)
os.system('rm {0}/.tmp*'.format(i[6]))
pbar.finish()
def log(source,destination,fullname,subjNum,groupName,note,birthday):
try:
timeInfo = time.gmtime(os.stat(source).st_mtime)
prodT=str(timeInfo.tm_year)+'_'+str(timeInfo.tm_mon)+'_'+str(timeInfo.tm_mday)
prodH=str(timeInfo.tm_hour)+':'+str(timeInfo.tm_min)
user=getpass.getuser()
currentTime=time.ctime()
with open(os.path.join(destination,'log.txt'),'w') as f:
f.write('''Subject Full Name = {6}
Subject number = {7}
Group Name = {8},{9}
Source : {0}
Date of Birth : {11}
Destination : {1}
Data produced in : {2}\t{3}
Data copied at : {4}
Copied by : {5}
Note[sex/experimenter/etc]: {10}'''.format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
with open(os.path.join(backUpFrom,'log.txt'),'a') as f:
f.write('{6}\t{8}\t{9}\t{11}\t{2}\t{3}\t{0}\t{4}\t{5}\{10}'.
format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
except:
print 'log failed'
#Pickle dump the list of subjects backed up in this round
def post_check(backUpFrom):
with open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'w') as f:
currentTime=time.ctime()
pickle.dump(alreadyCopied+copiedAtThisRound,f)
if __name__=='__main__':
argparser=argparse.ArgumentParser(prog='copy_check.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
2012_12_12
Kevin Cho
from USB hard-drive
find all new subjects
copy those folders into appropriate folders
append the log file
save patient number and name information automatically
into a log.txt
''',epilog="Kevin Cho 2013_05_17")
argparser.add_argument("--copy","-c",help="copies the data",action="store_true")
argparser.add_argument("--log","-l",help="makes the log of the copied the data",action="store_true")
args=argparser.parse_args()
if args.copy:
main()
else:
main()
#main() | alreadyCopied.append(folderName)
post_check(backUpFrom)
else:
continue | random_line_split |
copy_check.py | #!/opt/bin/python2.7
'''
'''
import re
import time
import sys
import os
import tree
import shutil
import getpass
import pickle
from progressbar import ProgressBar,Percentage,Bar
import glob
import argparse
backupList=[] #Append list to execute backup
copiedAtThisRound=[] #Pickle dump list for the round of back up
initialList=[]
backUpTo=os.path.abspath('/volume1/CCNC_MRI') #Back up to
backUpFrom = os.path.abspath('/volumeUSB2/usbshare') #Find subj to back up from
if os.path.isfile(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt")):
f = open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'r')
alreadyCopied=pickle.load(f)
f.close()
print 'loaded log successfully'
print alreadyCopied
else:
alreadyCopied=[]
def main():
'''
test whether the source directory is a folder
'''
#directory = raw_input('\nwhere is the files ?:') ---> for different sources
if os.path.exists(backUpTo) & os.path.exists(backUpFrom) == True:
os.system('clear')
backUpConfirm(backUpFrom)
if copiedAtThisRound!=[]:
executeBackUp(backupList,backUpFrom)
def backUpConfirm(backUpFrom):
'''
show the list of folders under the backUpFrom
if it is confirmed by the user
excute backup
'''
dirList = [o for o in os.listdir(backUpFrom) if os.path.isdir(os.path.join(backUpFrom,o))]
#removing subjects already copied according to the log file
for copied in alreadyCopied:
try:
dirList.remove(copied)
except:
continue
#removing folder names begining with '.' and '$'
withDot=[i for i in dirList if i.startswith('.')]
withDol=[i for i in dirList if i.startswith('$')]
dirList = [item for item in dirList if item not in withDot]
dirList = [item for item in dirList if item not in withDol]
for folderName in dirList:
subjFolder = os.path.join(backUpFrom,folderName)
stat = os.stat(subjFolder)
created = os.stat(subjFolder).st_mtime
asciiTime = time.asctime( time.gmtime( created ) )
print '''
------------------------------------
------{0}
created on ( {1} )
------------------------------------
'''.format(folderName,asciiTime)
response = raw_input('\nIs this the name of the subject you want to back up? [Yes/No/Quit/noCall] :')
if re.search('[yY]|[yY][Ee][Ss]',response):
backUpAppend(subjFolder)
elif re.search('[Dd][Oo][Nn][Ee]|stop|[Qq][Uu][Ii][Tt]|exit',response):
break
elif re.search('[Nn][Oo][Cc][Aa][Ll][Ll]',response):
alreadyCopied.append(folderName)
post_check(backUpFrom)
else:
continue
def backUpAppend(subjFolder):
print '\n'
#countFile contains the tuples of image name and count number
#countFile=[(image name,count number)]
groupName,countFile=countCheck(subjFolder)
#groupName=(group,baseline)
subjInitial,fullname,subjNum=getName(subjFolder)
#if it is a follow up study
if groupName[1]=='baseline':
targetDir=os.path.join(backUpTo,groupName[0])
#For BADUK
if groupName[0]=='BADUK':
cntpro=raw_input('\tCNT / PRO ? :')
targetDir=os.path.join(backUpTo,groupName[0]+'/'+cntpro.upper())
maxNum = maxGroupNum(targetDir)
else:
targetDir=os.path.join(backUpTo,groupName[0])
targetDir=os.path.join(targetDir,'Follow_up')
maxNum = maxGroupNum(targetDir)
if groupName[1]=='baseline':
targetName=groupName[0]+maxNum+'_'+subjInitial
if groupName[0]=='BADUK':
targetName='BADUK_'+cntpro.upper()+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
else:
targetName='fu_'+groupName[0]+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
print '\t{0} will be saved as {1} in \n\t{2}'.format(os.path.basename(subjFolder),targetName,targetFolder)
os.system('touch .tmp{0}'.format(maxNum))
if re.search('[yY]|[yY][eE][sS]',raw_input('\tCheck? [Yes/No] :')):
birthday=raw_input('\tDate of birth? [yyyy-mm-dd] : ')
note=raw_input('\tAny note ? :')
toBackUp=(subjFolder,targetFolder,fullname,subjNum,groupName,note,targetDir,birthday)
backupList.append(toBackUp)
copiedAtThisRound.append(os.path.basename(subjFolder))
print '\t------\n\tQued to be copied!'
makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile)
def makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile):
print fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,countFile
print '{}\t{}\t{}\t{}\t{}\t'.format(fullname,subjInitial,subjNum,groupName[0],targetName),
#grep image numbers
t1 = re.compile(r'TFL\S*|\S*T1\S*|\S*t1\S*')
#dti = re.compile(r'\S*[Dd][Tt][Ii]\S*')
#dki = re.compile(r'\S*[Dd][Kk][Ii]\S*')
dti = re.compile(r'[Dd][Tt][Ii]\S*\(.\)_\d+\S*')
dki = re.compile(r'[Dd][Kk][Ii]\S*\(.\)_\d+\S*')
rest = re.compile(r'\S*[Rr][Ee][Ss][Tt]\S*')
t2flair = re.compile(r'\S*[Ff][Ll][Aa][Ii][Rr]\S*')
t2tse = re.compile(r'\S*[Tt][Ss][Ee]\S*')
#T1, DTI, DKI, REST, T2FLAIR, T2TSE
imageNums=[]
for imagePattern in t1,dti,dki,rest,t2flair,t2tse:
nameUsed = imagePattern.search(' '.join(countFile.viewkeys()))
if nameUsed:
imageNums.append(str(countFile.get(nameUsed.group(0))))
else:
imageNums.append(str(0))
print '{}\t{}\t{}\t{}\t{}\t{}'.format(imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5])
totalList=[fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5],time.ctime(time.time()),getpass.getuser()]
print totalList
f = open(os.path.join(backUpFrom,'spread.txt'),'a')
f.write('\t'.join(totalList))
f.write('\n')
f.close()
#'DKI_30D_B-VALUE_NB_06_(3)_0010' 'DTI_64D_B1K(2)_FA_0008' 'DKI_30D_B-VALUE_NB_06_(3)_COLFA_0013' 'PHOENIXZIPREPORT_0099' 'DKI_30D_B-VALUE_NB_06_(3)_EXP_0011' 'REST_FMRI_PHASE_116_(1)_0005' 'DKI_30D_B-VALUE_NB_06_(3)_FA_0012'
def countCheck(subjFolder):
emptyList = {}
countResult=tree.tree(subjFolder,emptyList,'\t')
print '\n'
if re.search('[yY]|[yY][eE][sS]',raw_input('\tDo file numbers match? [Yes/No] :')):
initialList.append(countResult)
#groupName is a tuple
groupName = group()
return groupName,countResult
print '\t{0}\n\tadded to the back up list\n\n'.format(subjFolder)
else:
print '\tNumbers does not match, will return error.\n\tCheck the directory manually'
def group():
possibleGroups = str('BADUK,CHR,DNO,EMO,FEP,GHR,NOR,OCM,ONS,OXY,PAIN,SPR,UMO').split(',')
groupName=''
while groupName=='':
|
def getName(subjFolder):
'''
will try getting the name and subj number from the source folder first
if it fails,
will require user to type in the subjs' name
'''
if re.findall('\d{8}',os.path.basename(subjFolder)):
subjNum = re.search('(\d{8})',os.path.basename(subjFolder)).group(0)
subjName = re.findall('[^\W\d_]+',os.path.basename(subjFolder))
#Appending first letters
subjInitial=''
for i in subjName:
subjInitial = subjInitial + i[0]
fullname=''
for i in subjName:
fullname = fullname + i[0] + i[1:].lower()
return subjInitial, fullname, subjNum
#if the folder shows no pattern
else:
subjName = raw_input('\tEnter the name of the subject in English eg.Cho Kang Ik:')
subjNum = raw_input("\tEnter subject's 8digit number eg.45291835:")
subjwords=subjName.split(' ')
fullname=''
subjInitial=''
for i in subjwords:
fullname=fullname + i[0].upper()
fullname=fullname + i[1:]
subjInitial=subjInitial+i[0][0]
print subjInitial
return subjInitial.upper(),fullname,subjNum
def maxGroupNum(targetDir):
conpro=''
maxNumPattern=re.compile('\d+')
mx = 0
for string in maxNumPattern.findall(' '.join(os.listdir(targetDir))):
if int(string) > mx:
mx = int(string)
highest = mx +1
if highest<10:
highest ='0'+str(highest)
else:
highest = str(highest)
return conpro+highest
def executeBackUp(backupList,backUpFrom):
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(backupList)).start()
num=0
maxNum=len(backupList)
perOne=1/(maxNum*3)
for i in backupList:
shutil.copytree(i[0],i[1])
pbar.update(num+perOne)
log(i[0],i[1],i[2],i[3],i[4],i[5],i[7])
post_check(backUpFrom)
pbar.update(num+perOne)
time.sleep(0.01)
pbar.update(num+perOne)
os.system('rm {0}/.tmp*'.format(i[6]))
pbar.finish()
def log(source,destination,fullname,subjNum,groupName,note,birthday):
try:
timeInfo = time.gmtime(os.stat(source).st_mtime)
prodT=str(timeInfo.tm_year)+'_'+str(timeInfo.tm_mon)+'_'+str(timeInfo.tm_mday)
prodH=str(timeInfo.tm_hour)+':'+str(timeInfo.tm_min)
user=getpass.getuser()
currentTime=time.ctime()
with open(os.path.join(destination,'log.txt'),'w') as f:
f.write('''Subject Full Name = {6}
Subject number = {7}
Group Name = {8},{9}
Source : {0}
Date of Birth : {11}
Destination : {1}
Data produced in : {2}\t{3}
Data copied at : {4}
Copied by : {5}
Note[sex/experimenter/etc]: {10}'''.format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
with open(os.path.join(backUpFrom,'log.txt'),'a') as f:
f.write('{6}\t{8}\t{9}\t{11}\t{2}\t{3}\t{0}\t{4}\t{5}\{10}'.
format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
except:
print 'log failed'
#Pickle dump the list of subjects backed up in this round
def post_check(backUpFrom):
with open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'w') as f:
currentTime=time.ctime()
pickle.dump(alreadyCopied+copiedAtThisRound,f)
if __name__=='__main__':
argparser=argparse.ArgumentParser(prog='copy_check.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
2012_12_12
Kevin Cho
from USB hard-drive
find all new subjects
copy those folders into appropriate folders
append the log file
save patient number and name information automatically
into a log.txt
''',epilog="Kevin Cho 2013_05_17")
argparser.add_argument("--copy","-c",help="copies the data",action="store_true")
argparser.add_argument("--log","-l",help="makes the log of the copied the data",action="store_true")
args=argparser.parse_args()
if args.copy:
main()
else:
main()
#main()
| groupName=raw_input('\twhich group ? [BADUK/CHR/DNO/EMO/FEP/GHR/NOR/OCM/ONS/OXY/PAIN/SPR/UMO] :')
followUp=raw_input('\tfollow up (if follow up, type the period) ? [baseline/period] :')
groupName = groupName.upper()
if groupName not in possibleGroups:
print 'not in groups, let Kevin know.'
groupName=''
else:
return (groupName,followUp) | conditional_block |
client.js | jQuery.fn.serializeObject = function() {
var o = {};
var a = this.serializeArray();
$.each(a, function() {
if (o[this.name]) {
if (!o[this.name].push) {
o[this.name] = [o[this.name]];
}
o[this.name].push(this.value || '');
} else {
o[this.name] = this.value || '';
}
});
return o;
};
function getCookie(name) {
var nameEQ = name + "=";
var ca = document.cookie.split(';');
for(var i=0;i < ca.length;i++) {
var c = ca[i];
while (c.charAt(0)==' ') c = c.substring(1,c.length);
if (c.indexOf(nameEQ) == 0) return c.substring(nameEQ.length,c.length);
}
return null;
}
jQuery(function(){
if(jQuery("#farmers").length > 0){
var dialog, form,
// From http://www.whatwg.org/specs/web-apps/current-work/multipage/states-of-the-type-attribute.html#e-mail-state-%28type=email%29
emailRegex = /^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/,
name = $( "#name" ),
email = $( "#email" ),
password = $( "#password" ),
allFields = $( [] ).add( name ).add( email ).add( password ),
tips = $( ".validateTips" );
function updateTips( t ) {
tips
.text( t )
.addClass( "ui-state-highlight" );
setTimeout(function() {
tips.removeClass( "ui-state-highlight", 1500 );
}, 500 ); | }
function checkLength( o, n, min, max ) {
if ( o.val().length > max || o.val().length < min ) {
o.addClass( "ui-state-error" );
updateTips( "Length of " + n + " must be between " +
min + " and " + max + "." );
return false;
} else {
return true;
}
}
function checkRegexp( o, regexp, n ) {
if ( !( regexp.test( o.val() ) ) ) {
o.addClass( "ui-state-error" );
updateTips( n );
return false;
} else {
return true;
}
}
function addUser() {
// bypass all validations
//
var jFrm = jQuery("form.cls_frm_add_farmer");
data = jFrm.serializeObject();
console.log(jFrm.serializeObject());
var valid = true;
jQuery.ajax({
url: "/api/farmers/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
$( "#farmers tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.contact_no + "</td>" +
"<td>" + r.pin + "</td>" +
"<td>" + r.address + "</td>" +
"<td>remove</td>" +
"</tr>" );
dialog.dialog( "close" );
}
}
});
allFields.removeClass( "ui-state-error" );
return valid;
}
dialog = $( "#farmer-dialog-form" ).dialog({
autoOpen: false,
height: 400,
width: 350,
modal: true,
buttons: {
"Create farmer": addUser,
Cancel: function() {
dialog.dialog( "close" );
}
},
close: function() {
form[ 0 ].reset();
allFields.removeClass( "ui-state-error" );
}
});
jQuery("body").on("submit", "form.cls_frm_add_farmer", function( e ) {
e.preventDefault();
addUser();
});
jQuery('body').on("click", ".cls_add_farmer", function(){
dialog.dialog( "open" );
});
// cls_add_farmer, cls_remove_farmer, cls_farmer_edit
jQuery.get("/api/farmers/", function(r){
jQuery.each(r.results, function(i, obj){
obj.remove = "<a href='#' data-id='"+obj.id+"' class='cls_remove_farmer'>remove</a>";
obj.name1 = '<a href="#" class="cls_farmer_edit" data-id="'+obj.id+'">'+obj.name+'</a>';
});
jQuery("#farmers").DataTable( {
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "contact_no" },
{ "data": "pin" },
{ "data": "address" },
{ "data": "remove" },
]
} );
});
}
jQuery("body").on("click touch", "a.cls_add_farm", function(){
jQuery("div#id_frm_html").show('slow');
});
jQuery("body").on("submit", "form.cls_frm_save_farm", function(){
var jFrm = jQuery("form.cls_frm_save_farm");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.details + "</td>" +
"<td>" + r.farmer.name + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_frm_html").hide();
}
}
});
});
if(jQuery("table#farm").length > 0){
jQuery.get("/api/farm/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farmer_name = obj.farmer.name;
});
jQuery("table#farm").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "details" },
{ "data": "farmer_name" },
{ "data": "remove" },
]
});
});
}
if(jQuery("table#farm_field").length > 0){
jQuery.get("/api/farm-field/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farm_name = obj.farm.name;
});
jQuery("table#farm_field").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "farm_name" },
{ "data": "crop_type" },
{ "data": "season" },
{ "data": "field_from" },
{ "data": "field_to" },
{ "data": "remove" },
]
});
});
}
jQuery("body").on("submit", "form#id_frm_add_farm_field", function(){
var jFrm = jQuery("form#id_frm_add_farm_field");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm-field/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.farm.name + "</td>" +
"<td>" + r.crop_type + "</td>" +
"<td>" + r.season + "</td>" +
"<td>" + r.field_from + "</td>" +
"<td>" + r.field_to + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_html_add_farm_field").hide('slow');
}
}
});
});
jQuery("body").on("click", ".cls_open_add_farm_field", function(){
jQuery("#id_html_add_farm_field").show('slow');
if(window.leaf_map){
var mymap = L.map('mapid', {drawControl: true}).setView([17.4062917, 78.4390537], 16);
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', {
attribution: 'Map data © <a href="http://openstreetmap.org">OpenStreetMap</a> contributors, <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, Imagery © <a href="http://mapbox.com">Mapbox</a>',
maxZoom: 18,
id: 'mapbox.streets',
accessToken: 'pk.eyJ1IjoiamtuYXJlc2giLCJhIjoiY2o4YmFmaWlpMGMwcDJxcDYxaDgxaXIwciJ9.qBkVUuA-N3PkyMBgPddVUA'
}).addTo(mymap);
// add a marker in the given location
// L.marker(center).addTo(mymap);
// Initialise the FeatureGroup to store editable layers
var editableLayers = new L.FeatureGroup();
mymap.addLayer(editableLayers);
var drawPluginOptions = {
position: 'topright',
draw: {
polygon: {
allowIntersection: false, // Restricts shapes to simple polygons
drawError: {
color: '#e1e100', // Color the shape will turn when intersects
message: '<strong>Oh snap!<strong> you can\'t draw that!' // Message that will show when intersect
},
shapeOptions: {
color: '#97009c'
}
},
// disable toolbar item by setting it to false
polyline: false,
circle: false, // Turns off this drawing tool
rectangle: false,
marker: false,
},
edit: {
featureGroup: editableLayers, //REQUIRED!!
remove: false
}
};
// Initialise the draw control and pass it the FeatureGroup of editable layers
var drawControl = new L.Control.Draw(drawPluginOptions);
mymap.addControl(drawControl);
var editableLayers = new L.FeatureGroup();
mymap.addLayer(editableLayers);
mymap.on('draw:created', function(e) {
var type = e.layerType,
layer = e.layer;
if (type === 'marker') {
layer.bindPopup('A popup!');
}
editableLayers.addLayer(layer);
console.log(layer);
gj=layer.toGeoJSON();
gj = gj.geometry.coordinates[0];
geo_arr = []
jQuery.each(gj,function(i, itm){
geo_arr.push(itm.join(","));
});
geo_json = geo_arr.join(":");
console.log(geo_json);
jQuery("#id_land_coordinates").val(geo_json);
});
}
});
}); | random_line_split | |
client.js | jQuery.fn.serializeObject = function() {
var o = {};
var a = this.serializeArray();
$.each(a, function() {
if (o[this.name]) {
if (!o[this.name].push) {
o[this.name] = [o[this.name]];
}
o[this.name].push(this.value || '');
} else {
o[this.name] = this.value || '';
}
});
return o;
};
function getCookie(name) {
var nameEQ = name + "=";
var ca = document.cookie.split(';');
for(var i=0;i < ca.length;i++) {
var c = ca[i];
while (c.charAt(0)==' ') c = c.substring(1,c.length);
if (c.indexOf(nameEQ) == 0) return c.substring(nameEQ.length,c.length);
}
return null;
}
jQuery(function(){
if(jQuery("#farmers").length > 0){
var dialog, form,
// From http://www.whatwg.org/specs/web-apps/current-work/multipage/states-of-the-type-attribute.html#e-mail-state-%28type=email%29
emailRegex = /^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/,
name = $( "#name" ),
email = $( "#email" ),
password = $( "#password" ),
allFields = $( [] ).add( name ).add( email ).add( password ),
tips = $( ".validateTips" );
function updateTips( t ) {
tips
.text( t )
.addClass( "ui-state-highlight" );
setTimeout(function() {
tips.removeClass( "ui-state-highlight", 1500 );
}, 500 );
}
function | ( o, n, min, max ) {
if ( o.val().length > max || o.val().length < min ) {
o.addClass( "ui-state-error" );
updateTips( "Length of " + n + " must be between " +
min + " and " + max + "." );
return false;
} else {
return true;
}
}
function checkRegexp( o, regexp, n ) {
if ( !( regexp.test( o.val() ) ) ) {
o.addClass( "ui-state-error" );
updateTips( n );
return false;
} else {
return true;
}
}
function addUser() {
// bypass all validations
//
var jFrm = jQuery("form.cls_frm_add_farmer");
data = jFrm.serializeObject();
console.log(jFrm.serializeObject());
var valid = true;
jQuery.ajax({
url: "/api/farmers/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
$( "#farmers tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.contact_no + "</td>" +
"<td>" + r.pin + "</td>" +
"<td>" + r.address + "</td>" +
"<td>remove</td>" +
"</tr>" );
dialog.dialog( "close" );
}
}
});
allFields.removeClass( "ui-state-error" );
return valid;
}
dialog = $( "#farmer-dialog-form" ).dialog({
autoOpen: false,
height: 400,
width: 350,
modal: true,
buttons: {
"Create farmer": addUser,
Cancel: function() {
dialog.dialog( "close" );
}
},
close: function() {
form[ 0 ].reset();
allFields.removeClass( "ui-state-error" );
}
});
jQuery("body").on("submit", "form.cls_frm_add_farmer", function( e ) {
e.preventDefault();
addUser();
});
jQuery('body').on("click", ".cls_add_farmer", function(){
dialog.dialog( "open" );
});
// cls_add_farmer, cls_remove_farmer, cls_farmer_edit
jQuery.get("/api/farmers/", function(r){
jQuery.each(r.results, function(i, obj){
obj.remove = "<a href='#' data-id='"+obj.id+"' class='cls_remove_farmer'>remove</a>";
obj.name1 = '<a href="#" class="cls_farmer_edit" data-id="'+obj.id+'">'+obj.name+'</a>';
});
jQuery("#farmers").DataTable( {
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "contact_no" },
{ "data": "pin" },
{ "data": "address" },
{ "data": "remove" },
]
} );
});
}
jQuery("body").on("click touch", "a.cls_add_farm", function(){
jQuery("div#id_frm_html").show('slow');
});
jQuery("body").on("submit", "form.cls_frm_save_farm", function(){
var jFrm = jQuery("form.cls_frm_save_farm");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.details + "</td>" +
"<td>" + r.farmer.name + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_frm_html").hide();
}
}
});
});
if(jQuery("table#farm").length > 0){
jQuery.get("/api/farm/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farmer_name = obj.farmer.name;
});
jQuery("table#farm").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "details" },
{ "data": "farmer_name" },
{ "data": "remove" },
]
});
});
}
if(jQuery("table#farm_field").length > 0){
jQuery.get("/api/farm-field/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farm_name = obj.farm.name;
});
jQuery("table#farm_field").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "farm_name" },
{ "data": "crop_type" },
{ "data": "season" },
{ "data": "field_from" },
{ "data": "field_to" },
{ "data": "remove" },
]
});
});
}
jQuery("body").on("submit", "form#id_frm_add_farm_field", function(){
var jFrm = jQuery("form#id_frm_add_farm_field");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm-field/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.farm.name + "</td>" +
"<td>" + r.crop_type + "</td>" +
"<td>" + r.season + "</td>" +
"<td>" + r.field_from + "</td>" +
"<td>" + r.field_to + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_html_add_farm_field").hide('slow');
}
}
});
});
jQuery("body").on("click", ".cls_open_add_farm_field", function(){
jQuery("#id_html_add_farm_field").show('slow');
if(window.leaf_map){
var mymap = L.map('mapid', {drawControl: true}).setView([17.4062917, 78.4390537], 16);
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', {
attribution: 'Map data © <a href="http://openstreetmap.org">OpenStreetMap</a> contributors, <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, Imagery © <a href="http://mapbox.com">Mapbox</a>',
maxZoom: 18,
id: 'mapbox.streets',
accessToken: 'pk.eyJ1IjoiamtuYXJlc2giLCJhIjoiY2o4YmFmaWlpMGMwcDJxcDYxaDgxaXIwciJ9.qBkVUuA-N3PkyMBgPddVUA'
}).addTo(mymap);
// add a marker in the given location
// L.marker(center).addTo(mymap);
// Initialise the FeatureGroup to store editable layers
var editableLayers = new L.FeatureGroup();
mymap.addLayer(editableLayers);
var drawPluginOptions = {
position: 'topright',
draw: {
polygon: {
allowIntersection: false, // Restricts shapes to simple polygons
drawError: {
color: '#e1e100', // Color the shape will turn when intersects
message: '<strong>Oh snap!<strong> you can\'t draw that!' // Message that will show when intersect
},
shapeOptions: {
color: '#97009c'
}
},
// disable toolbar item by setting it to false
polyline: false,
circle: false, // Turns off this drawing tool
rectangle: false,
marker: false,
},
edit: {
featureGroup: editableLayers, //REQUIRED!!
remove: false
}
};
// Initialise the draw control and pass it the FeatureGroup of editable layers
var drawControl = new L.Control.Draw(drawPluginOptions);
mymap.addControl(drawControl);
var editableLayers = new L.FeatureGroup();
mymap.addLayer(editableLayers);
mymap.on('draw:created', function(e) {
var type = e.layerType,
layer = e.layer;
if (type === 'marker') {
layer.bindPopup('A popup!');
}
editableLayers.addLayer(layer);
console.log(layer);
gj=layer.toGeoJSON();
gj = gj.geometry.coordinates[0];
geo_arr = []
jQuery.each(gj,function(i, itm){
geo_arr.push(itm.join(","));
});
geo_json = geo_arr.join(":");
console.log(geo_json);
jQuery("#id_land_coordinates").val(geo_json);
});
}
});
}); | checkLength | identifier_name |
client.js | jQuery.fn.serializeObject = function() {
var o = {};
var a = this.serializeArray();
$.each(a, function() {
if (o[this.name]) {
if (!o[this.name].push) {
o[this.name] = [o[this.name]];
}
o[this.name].push(this.value || '');
} else {
o[this.name] = this.value || '';
}
});
return o;
};
function getCookie(name) |
jQuery(function(){
if(jQuery("#farmers").length > 0){
var dialog, form,
// From http://www.whatwg.org/specs/web-apps/current-work/multipage/states-of-the-type-attribute.html#e-mail-state-%28type=email%29
emailRegex = /^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/,
name = $( "#name" ),
email = $( "#email" ),
password = $( "#password" ),
allFields = $( [] ).add( name ).add( email ).add( password ),
tips = $( ".validateTips" );
function updateTips( t ) {
tips
.text( t )
.addClass( "ui-state-highlight" );
setTimeout(function() {
tips.removeClass( "ui-state-highlight", 1500 );
}, 500 );
}
function checkLength( o, n, min, max ) {
if ( o.val().length > max || o.val().length < min ) {
o.addClass( "ui-state-error" );
updateTips( "Length of " + n + " must be between " +
min + " and " + max + "." );
return false;
} else {
return true;
}
}
function checkRegexp( o, regexp, n ) {
if ( !( regexp.test( o.val() ) ) ) {
o.addClass( "ui-state-error" );
updateTips( n );
return false;
} else {
return true;
}
}
function addUser() {
// bypass all validations
//
var jFrm = jQuery("form.cls_frm_add_farmer");
data = jFrm.serializeObject();
console.log(jFrm.serializeObject());
var valid = true;
jQuery.ajax({
url: "/api/farmers/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
$( "#farmers tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.contact_no + "</td>" +
"<td>" + r.pin + "</td>" +
"<td>" + r.address + "</td>" +
"<td>remove</td>" +
"</tr>" );
dialog.dialog( "close" );
}
}
});
allFields.removeClass( "ui-state-error" );
return valid;
}
dialog = $( "#farmer-dialog-form" ).dialog({
autoOpen: false,
height: 400,
width: 350,
modal: true,
buttons: {
"Create farmer": addUser,
Cancel: function() {
dialog.dialog( "close" );
}
},
close: function() {
form[ 0 ].reset();
allFields.removeClass( "ui-state-error" );
}
});
jQuery("body").on("submit", "form.cls_frm_add_farmer", function( e ) {
e.preventDefault();
addUser();
});
jQuery('body').on("click", ".cls_add_farmer", function(){
dialog.dialog( "open" );
});
// cls_add_farmer, cls_remove_farmer, cls_farmer_edit
jQuery.get("/api/farmers/", function(r){
jQuery.each(r.results, function(i, obj){
obj.remove = "<a href='#' data-id='"+obj.id+"' class='cls_remove_farmer'>remove</a>";
obj.name1 = '<a href="#" class="cls_farmer_edit" data-id="'+obj.id+'">'+obj.name+'</a>';
});
jQuery("#farmers").DataTable( {
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "contact_no" },
{ "data": "pin" },
{ "data": "address" },
{ "data": "remove" },
]
} );
});
}
jQuery("body").on("click touch", "a.cls_add_farm", function(){
jQuery("div#id_frm_html").show('slow');
});
jQuery("body").on("submit", "form.cls_frm_save_farm", function(){
var jFrm = jQuery("form.cls_frm_save_farm");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.details + "</td>" +
"<td>" + r.farmer.name + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_frm_html").hide();
}
}
});
});
if(jQuery("table#farm").length > 0){
jQuery.get("/api/farm/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farmer_name = obj.farmer.name;
});
jQuery("table#farm").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "details" },
{ "data": "farmer_name" },
{ "data": "remove" },
]
});
});
}
if(jQuery("table#farm_field").length > 0){
jQuery.get("/api/farm-field/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farm_name = obj.farm.name;
});
jQuery("table#farm_field").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "farm_name" },
{ "data": "crop_type" },
{ "data": "season" },
{ "data": "field_from" },
{ "data": "field_to" },
{ "data": "remove" },
]
});
});
}
jQuery("body").on("submit", "form#id_frm_add_farm_field", function(){
var jFrm = jQuery("form#id_frm_add_farm_field");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm-field/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.farm.name + "</td>" +
"<td>" + r.crop_type + "</td>" +
"<td>" + r.season + "</td>" +
"<td>" + r.field_from + "</td>" +
"<td>" + r.field_to + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_html_add_farm_field").hide('slow');
}
}
});
});
jQuery("body").on("click", ".cls_open_add_farm_field", function(){
jQuery("#id_html_add_farm_field").show('slow');
if(window.leaf_map){
var mymap = L.map('mapid', {drawControl: true}).setView([17.4062917, 78.4390537], 16);
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', {
attribution: 'Map data © <a href="http://openstreetmap.org">OpenStreetMap</a> contributors, <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, Imagery © <a href="http://mapbox.com">Mapbox</a>',
maxZoom: 18,
id: 'mapbox.streets',
accessToken: 'pk.eyJ1IjoiamtuYXJlc2giLCJhIjoiY2o4YmFmaWlpMGMwcDJxcDYxaDgxaXIwciJ9.qBkVUuA-N3PkyMBgPddVUA'
}).addTo(mymap);
// add a marker in the given location
// L.marker(center).addTo(mymap);
// Initialise the FeatureGroup to store editable layers
var editableLayers = new L.FeatureGroup();
mymap.addLayer(editableLayers);
var drawPluginOptions = {
position: 'topright',
draw: {
polygon: {
allowIntersection: false, // Restricts shapes to simple polygons
drawError: {
color: '#e1e100', // Color the shape will turn when intersects
message: '<strong>Oh snap!<strong> you can\'t draw that!' // Message that will show when intersect
},
shapeOptions: {
color: '#97009c'
}
},
// disable toolbar item by setting it to false
polyline: false,
circle: false, // Turns off this drawing tool
rectangle: false,
marker: false,
},
edit: {
featureGroup: editableLayers, //REQUIRED!!
remove: false
}
};
// Initialise the draw control and pass it the FeatureGroup of editable layers
var drawControl = new L.Control.Draw(drawPluginOptions);
mymap.addControl(drawControl);
var editableLayers = new L.FeatureGroup();
mymap.addLayer(editableLayers);
mymap.on('draw:created', function(e) {
var type = e.layerType,
layer = e.layer;
if (type === 'marker') {
layer.bindPopup('A popup!');
}
editableLayers.addLayer(layer);
console.log(layer);
gj=layer.toGeoJSON();
gj = gj.geometry.coordinates[0];
geo_arr = []
jQuery.each(gj,function(i, itm){
geo_arr.push(itm.join(","));
});
geo_json = geo_arr.join(":");
console.log(geo_json);
jQuery("#id_land_coordinates").val(geo_json);
});
}
});
}); | {
var nameEQ = name + "=";
var ca = document.cookie.split(';');
for(var i=0;i < ca.length;i++) {
var c = ca[i];
while (c.charAt(0)==' ') c = c.substring(1,c.length);
if (c.indexOf(nameEQ) == 0) return c.substring(nameEQ.length,c.length);
}
return null;
} | identifier_body |
client.js | jQuery.fn.serializeObject = function() {
var o = {};
var a = this.serializeArray();
$.each(a, function() {
if (o[this.name]) {
if (!o[this.name].push) {
o[this.name] = [o[this.name]];
}
o[this.name].push(this.value || '');
} else {
o[this.name] = this.value || '';
}
});
return o;
};
function getCookie(name) {
var nameEQ = name + "=";
var ca = document.cookie.split(';');
for(var i=0;i < ca.length;i++) {
var c = ca[i];
while (c.charAt(0)==' ') c = c.substring(1,c.length);
if (c.indexOf(nameEQ) == 0) return c.substring(nameEQ.length,c.length);
}
return null;
}
jQuery(function(){
if(jQuery("#farmers").length > 0){
var dialog, form,
// From http://www.whatwg.org/specs/web-apps/current-work/multipage/states-of-the-type-attribute.html#e-mail-state-%28type=email%29
emailRegex = /^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/,
name = $( "#name" ),
email = $( "#email" ),
password = $( "#password" ),
allFields = $( [] ).add( name ).add( email ).add( password ),
tips = $( ".validateTips" );
function updateTips( t ) {
tips
.text( t )
.addClass( "ui-state-highlight" );
setTimeout(function() {
tips.removeClass( "ui-state-highlight", 1500 );
}, 500 );
}
function checkLength( o, n, min, max ) {
if ( o.val().length > max || o.val().length < min ) {
o.addClass( "ui-state-error" );
updateTips( "Length of " + n + " must be between " +
min + " and " + max + "." );
return false;
} else {
return true;
}
}
function checkRegexp( o, regexp, n ) {
if ( !( regexp.test( o.val() ) ) ) {
o.addClass( "ui-state-error" );
updateTips( n );
return false;
} else {
return true;
}
}
function addUser() {
// bypass all validations
//
var jFrm = jQuery("form.cls_frm_add_farmer");
data = jFrm.serializeObject();
console.log(jFrm.serializeObject());
var valid = true;
jQuery.ajax({
url: "/api/farmers/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
$( "#farmers tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.contact_no + "</td>" +
"<td>" + r.pin + "</td>" +
"<td>" + r.address + "</td>" +
"<td>remove</td>" +
"</tr>" );
dialog.dialog( "close" );
}
}
});
allFields.removeClass( "ui-state-error" );
return valid;
}
dialog = $( "#farmer-dialog-form" ).dialog({
autoOpen: false,
height: 400,
width: 350,
modal: true,
buttons: {
"Create farmer": addUser,
Cancel: function() {
dialog.dialog( "close" );
}
},
close: function() {
form[ 0 ].reset();
allFields.removeClass( "ui-state-error" );
}
});
jQuery("body").on("submit", "form.cls_frm_add_farmer", function( e ) {
e.preventDefault();
addUser();
});
jQuery('body').on("click", ".cls_add_farmer", function(){
dialog.dialog( "open" );
});
// cls_add_farmer, cls_remove_farmer, cls_farmer_edit
jQuery.get("/api/farmers/", function(r){
jQuery.each(r.results, function(i, obj){
obj.remove = "<a href='#' data-id='"+obj.id+"' class='cls_remove_farmer'>remove</a>";
obj.name1 = '<a href="#" class="cls_farmer_edit" data-id="'+obj.id+'">'+obj.name+'</a>';
});
jQuery("#farmers").DataTable( {
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "contact_no" },
{ "data": "pin" },
{ "data": "address" },
{ "data": "remove" },
]
} );
});
}
jQuery("body").on("click touch", "a.cls_add_farm", function(){
jQuery("div#id_frm_html").show('slow');
});
jQuery("body").on("submit", "form.cls_frm_save_farm", function(){
var jFrm = jQuery("form.cls_frm_save_farm");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) |
}
});
});
if(jQuery("table#farm").length > 0){
jQuery.get("/api/farm/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farmer_name = obj.farmer.name;
});
jQuery("table#farm").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "details" },
{ "data": "farmer_name" },
{ "data": "remove" },
]
});
});
}
if(jQuery("table#farm_field").length > 0){
jQuery.get("/api/farm-field/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farm_name = obj.farm.name;
});
jQuery("table#farm_field").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "farm_name" },
{ "data": "crop_type" },
{ "data": "season" },
{ "data": "field_from" },
{ "data": "field_to" },
{ "data": "remove" },
]
});
});
}
jQuery("body").on("submit", "form#id_frm_add_farm_field", function(){
var jFrm = jQuery("form#id_frm_add_farm_field");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm-field/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.farm.name + "</td>" +
"<td>" + r.crop_type + "</td>" +
"<td>" + r.season + "</td>" +
"<td>" + r.field_from + "</td>" +
"<td>" + r.field_to + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_html_add_farm_field").hide('slow');
}
}
});
});
jQuery("body").on("click", ".cls_open_add_farm_field", function(){
jQuery("#id_html_add_farm_field").show('slow');
if(window.leaf_map){
var mymap = L.map('mapid', {drawControl: true}).setView([17.4062917, 78.4390537], 16);
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', {
attribution: 'Map data © <a href="http://openstreetmap.org">OpenStreetMap</a> contributors, <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, Imagery © <a href="http://mapbox.com">Mapbox</a>',
maxZoom: 18,
id: 'mapbox.streets',
accessToken: 'pk.eyJ1IjoiamtuYXJlc2giLCJhIjoiY2o4YmFmaWlpMGMwcDJxcDYxaDgxaXIwciJ9.qBkVUuA-N3PkyMBgPddVUA'
}).addTo(mymap);
// add a marker in the given location
// L.marker(center).addTo(mymap);
// Initialise the FeatureGroup to store editable layers
var editableLayers = new L.FeatureGroup();
mymap.addLayer(editableLayers);
var drawPluginOptions = {
position: 'topright',
draw: {
polygon: {
allowIntersection: false, // Restricts shapes to simple polygons
drawError: {
color: '#e1e100', // Color the shape will turn when intersects
message: '<strong>Oh snap!<strong> you can\'t draw that!' // Message that will show when intersect
},
shapeOptions: {
color: '#97009c'
}
},
// disable toolbar item by setting it to false
polyline: false,
circle: false, // Turns off this drawing tool
rectangle: false,
marker: false,
},
edit: {
featureGroup: editableLayers, //REQUIRED!!
remove: false
}
};
// Initialise the draw control and pass it the FeatureGroup of editable layers
var drawControl = new L.Control.Draw(drawPluginOptions);
mymap.addControl(drawControl);
var editableLayers = new L.FeatureGroup();
mymap.addLayer(editableLayers);
mymap.on('draw:created', function(e) {
var type = e.layerType,
layer = e.layer;
if (type === 'marker') {
layer.bindPopup('A popup!');
}
editableLayers.addLayer(layer);
console.log(layer);
gj=layer.toGeoJSON();
gj = gj.geometry.coordinates[0];
geo_arr = []
jQuery.each(gj,function(i, itm){
geo_arr.push(itm.join(","));
});
geo_json = geo_arr.join(":");
console.log(geo_json);
jQuery("#id_land_coordinates").val(geo_json);
});
}
});
}); | {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.details + "</td>" +
"<td>" + r.farmer.name + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_frm_html").hide();
} | conditional_block |
vcf.rs | use std::{
fmt::{self, Write as fmtWrite},
io::{self, BufWriter, Write},
};
use log::Level::Trace;
use compress_io::{
compress::{CompressIo, Writer},
compress_type::CompressType,
};
use r_htslib::*;
use crate::{
model::{freq_mle, AlleleRes, N_QUAL, MAX_PHRED, ModelRes},
mann_whitney::mann_whitney,
cli::{Config, ThresholdType},
fisher::FisherTest,
reference::RefPos,
alleles::{AllDesc, LargeDeletion},
stat_funcs::pnormc,
};
const FLT_FS: u32 = 1;
const FLT_QUAL_BIAS: u32 = 2;
const FLT_BLACKLIST: u32 = 4;
const FLT_Q30: u32 = 8;
const FLT_LOW_FREQ: u32 = 16;
const FLT_HOMO_POLY: u32 = 32;
const FLT_STR: [(&str, &str); 6] = [
("strand_bias", "Alleles unevenely distributed across strands", ),
("qual_bias", "Minor allele has lower quality values"),
("blacklist", "Position on black list"),
("q30", "Quality < 30"),
("low_freq", "Low heteroplasmy frequency"),
("homopolymer", "Indel starting in homopolymer region"),
];
pub(crate) struct VcfCalc<'a, 'b, 'c> {
pub(crate) ftest: &'a FisherTest,
pub(crate) seq_len: usize,
pub(crate) ref_seq: &'b [RefPos],
pub(crate) homopolymer_limit: u8,
pub(crate) all_desc: Vec<AllDesc>, // AllDesc for 'standard' 5 alleles (A, C, G, T, Del)
pub(crate) cfg: &'c Config,
}
pub(crate) struct Allele {
pub(crate) res: AlleleRes,
pub(crate) ix: usize,
}
pub(crate) struct VcfRes {
pub(crate) alleles: Vec<Allele>,
pub(crate) adesc: Option<Vec<AllDesc>>,
pub(crate) x: usize,
pub(crate) phred: u8,
}
// Additional allele specific results
#[derive(Default, Copy, Clone)]
struct ExtraRes {
flt: u32,
avg_qual: f64,
fisher_strand: f64,
wilcox: f64,
}
impl<'a, 'b, 'c> VcfCalc<'a, 'b, 'c> {
pub fn get_allele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> VcfRes {
// Should only be used for single base variants where we expect 5 'alleles'
// for A, C, G, T and Del
assert_eq!(cts.len(), 5);
let indel_flags = [false, false, false, false, true];
let ref_ix = (self.ref_seq[x].base()) as usize;
self.est_allele_freqs(x, ref_ix, cts, qcts, &indel_flags)
}
pub fn get_mallele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
self.est_allele_freqs(x,0, cts, qcts, indel_flags)
}
pub fn est_allele_freqs(&self, x: usize, ref_ix: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
let n_alls = cts.len();
assert_eq!(n_alls, qcts.len());
let qual_model = self.cfg.qual_table();
// Fold across strands
let jcts: Vec<usize> = cts.iter().map(|x| x[0] + x[1]).collect();
// Sort possible alleles by reverse numeric order on counts except that the reference base is always first
let alleles: Vec<usize> = {
let mut ix: Vec<usize> = (0..n_alls).filter(|x| *x != ref_ix).collect();
ix.sort_unstable_by(|a, b| jcts[*b].cmp(&jcts[*a]));
// Remove alleles where alleles not seen on both strands (apart from reference)
let mut ix1 = Vec::with_capacity(n_alls);
ix1.push(ref_ix); // Reference allele;
for &k in ix.iter() {
if cts[k][0] > 0 && cts[k][1] > 0 && cts[k][0] + cts[k][1] > 2 {
ix1.push(k)
}
}
ix1
};
let mut mr = freq_mle(&alleles, qcts, qual_model);
if log_enabled!(Trace) {
trace!("mle freq. estimates");
for &k in alleles.iter() {
trace!("{}\t{}\t{}", k, mr.alleles[k].freq, indel_flags[k]);
}
}
// Remove non-reference alleles where the frequencies were estimated below the thresholds
let snv_lim = self.cfg.snv_threshold(ThresholdType::Hard);
let indel_lim = self.cfg.indel_threshold(ThresholdType::Hard);
for ar in mr.alleles.iter_mut() { ar.flag = false }
let alleles: Vec<_> = alleles.iter().enumerate()
.filter(|(i, &k)| *i == 0 || mr.alleles[k].freq >= match indel_flags[k] {
true => indel_lim,
false => snv_lim,
})
.map(|(_, &k)| k).collect();
for &i in alleles.iter() {
mr.alleles[i].flag =true;
}
// Adjust the frequencies to account for any alleles that have been filtered
let tot = alleles.iter().fold(0.0, |s, &x| s + mr.alleles[x].freq);
// Rescale if necessary
if tot < 1.0 {
assert!(tot > 0.0);
for ar in mr.alleles.iter_mut() {
if ar.flag {
ar.freq /= tot
} else {
ar.freq = 0.0;
}
}
}
let ModelRes{alleles: all, phred} = mr;
let (all, phred) = (all, phred);
let alleles: Vec<_> = alleles.iter().filter(|&&k| all[k].flag)
.map(|&k| Allele{ix: k, res: all[k]}).collect();
VcfRes{alleles, adesc: None, x, phred}
}
// Generate VCF output line for large deletions
pub fn del_output(&self, del: &LargeDeletion) -> String {
let mut f = String::new();
let cts = del.counts;
let fq = del.fq();
let sd = (fq * (1.0 - fq) / (del.n() as f64)).sqrt();
let z = fq / sd;
let phred = if z > 10.0 { MAX_PHRED }
else {
(pnormc(z).log10()*-10.0).round().min(MAX_PHRED as f64) as u8
};
let flt = if phred >= 30 { 0 } else { FLT_Q30 };
// ALT, QUAL, FILTER
let _ = write!(f, "<DEL>\t{}\t{}", phred, Filter(flt));
// INFO
let _ = write!(f, "\tSVTYPE=DEL;END={};SVLEN={};CIPOS={},{};CILEN={},{}", del.end() + 1,
del.length, del.pos_ci(0), del.pos_ci(1), del.len_ci(0), del.len_ci(1));
// FORMAT
let _ = write!(f, "\tGT:ADF:ADR:HPL\t0/1:{},{}:{},{}:{:.5}",
cts[0][0], cts[1][0], cts[0][1], cts[1][1], fq);
f
}
// Generate Optional String with VCF output line
pub fn output(&self, vr: &mut VcfRes, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> Option<String> {
let x = vr.x;
let raw_depth = cts.iter().fold(0, |t, x| t + x[0] + x[1]);
let thresh = 0.05 / (self.seq_len as f64);
// Sort alleles by frequency (keeping reference alleles at position 0)
vr.alleles[1..].sort_unstable_by(|a1, a2| a2.res.freq.partial_cmp(&a1.res.freq).unwrap());
// Find index of major allele
let (major_idx, mj_idx) = vr.alleles.iter().enumerate().max_by(|(_, ar1), (_, ar2)| ar1.res.freq.partial_cmp(&ar2.res.freq).unwrap())
.map(|(i, ar)| (ar.ix, i)).unwrap(); |
// Filter cutoffs
let snv_soft_lim = self.cfg.snv_threshold(ThresholdType::Soft);
let indel_soft_lim = self.cfg.indel_threshold(ThresholdType::Soft);
let desc = vr.adesc.as_ref().unwrap_or(&self.all_desc);
// Extra per allele results
let mut all_res: Vec<_> = vr.alleles.iter().map(|ar| {
// Average quality
let (n, s) = qcts[ar.ix].iter().enumerate().fold((0, 0), |(n, s), (q, &ct)| {
(n + ct, s + ct * q)
});
let avg_qual = if n > 0 { s as f64 / n as f64 } else { 0.0 };
let mut flt = 0;
// Fisher strand test
let fisher_strand = if ar.ix != major_idx {
let k = ar.ix;
let k1 = major_idx;
let all_cts = [cts[k][0], cts[k1][0], cts[k][1], cts[k1][1]];
let fs = self.ftest.fisher(&all_cts);
if ar.res.freq < 0.75 && fs <= thresh {
flt |= FLT_FS
}
fs
} else { 1.0 };
// Wilcoxon-Mann-Whitney test for quality bias between the major (most frequent)
// allele and all minor alleles
let wilcox = if ar.ix != major_idx {
mann_whitney(qcts, major_idx, ar.ix).unwrap_or(1.0)
} else { 1.0 };
// Set allele freq. flags
let (lim, pos_adjust) = if desc[ar.ix].len() != desc[ref_ix].len() {
// This is an indel (size difference from reference)
let hp_size = (self.ref_seq[x].hpoly() & 0xf).max(self.ref_seq[x + 1].hpoly() & 0xf) + 1;
if hp_size >= self.homopolymer_limit {
flt |= FLT_HOMO_POLY
}
(indel_soft_lim, 0)
} else {
// In a complex variant, a SNV could start a few bases after the location of the variant
let x = if ar.ix == ref_ix { 0 } else {
// Find position of first base that differs between this allele and the reference
desc[ref_ix].iter().zip(desc[ar.ix].iter()).enumerate()
.find(|(_, (c1, c2))| *c1 != *c2).map(|(ix, _)| ix as u32).unwrap()
};
(snv_soft_lim, x)
};
if ar.res.freq < lim {
flt |= FLT_LOW_FREQ
}
// LR flags
if ar.res.lr_test < 30 {
flt |= FLT_Q30
}
// Blacklist
if self.cfg.blacklist(x + 1 + pos_adjust as usize) {
flt |= FLT_BLACKLIST
}
ExtraRes{ flt, avg_qual, fisher_strand, wilcox}
}).collect();
// Set wilcox allele flags
let mjr_qual = all_res[mj_idx].avg_qual;
for res in all_res.iter_mut() {
if res.wilcox <= thresh && mjr_qual - res.avg_qual > 2.0 {
res.flt |= FLT_QUAL_BIAS
}
}
// Genotype call
let f0 = vr.alleles[0].res.freq >= 1.0e-5;
let gt = match vr.alleles.len() {
1 => String::from("0"),
n => if f0 {
let mut s = String::from("0");
for i in 1..n {
s = format!("{}/{}", s, i);
}
s
} else {
let mut s = String::from("1");
for i in 2..n {
s = format!("{}/{}", s, i);
}
s
},
};
// Collect global flags
let mut flt = if vr.phred < 30 { FLT_Q30 } else { 0 };
// If no non-reference allele has no flags set, then set global flags
// to union of all allele flags
if all_res[1..].iter().all(|ar| ar.flt != 0) {
for ar in all_res[1..].iter() {
flt |= ar.flt
}
}
if vr.alleles.len() > 1 {
let mut f = String::new();
write!(f, "{}\t{}", desc[ref_ix], desc[vr.alleles[1].ix]).ok()?;
for s in vr.alleles[2..].iter().map(|a| &desc[a.ix]) {
write!(f, ",{}", s).ok()?;
}
write!(f, "\t{}\t{}", vr.phred, Filter(flt)).ok()?;
// INFO field
write!(f, "\tDP={}", raw_depth).ok()?;
for ar in vr.alleles[1..].iter() {
if desc[ar.ix].len() != desc[ref_ix].len() {
write!(f, ";INDEL").ok()?;
break
}
}
// FORMAT field
write!(f, "\tGT:ADF:ADR:HPL:FQSE:AQ:AFLT:QAVG:FSB:QBS").ok()?;
// GT field
write!(f, "\t{}:{}", gt, cts[vr.alleles[0].ix][0]).ok()?;
// ADF
for all in vr.alleles[1..].iter() {
write!(f, ",{}", cts[all.ix][0]).ok()?;
}
// ADR
write!(f, ":{}", cts[vr.alleles[0].ix][1]).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{}", cts[all.ix][1]).ok()?;
}
// HPL
write!(f, ":{:.5}", vr.alleles[1].res.freq).ok()?;
for all in vr.alleles[2..].iter() {
write!(f, ",{:.5}", all.res.freq).ok()?;
}
// FQSE
write!(f, ":{:.5}", vr.alleles[0].res.se).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{:.5}", all.res.se).ok()?;
}
// AQ
write!(f, ":{}", vr.alleles[0].res.lr_test).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{}", all.res.lr_test).ok()?;
}
// AFLT
write!(f, ":{}", Filter(all_res[0].flt)).ok()?;
for ar in &all_res[1..] {
write!(f, ",{}", Filter(ar.flt)).ok()?;
}
// QAVG
write!(f, ":{:.2}", all_res[0].avg_qual).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2}", ar.avg_qual).ok()?;
}
// FS
write!(f, ":{:.2e}", all_res[0].fisher_strand).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2e}", ar.fisher_strand).ok()?;
}
// QSB
write!(f, ":{:.2e}", all_res[0].wilcox).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2e}", ar.wilcox).ok()?;
}
Some(f)
} else {
None
}
}
}
#[derive(Default, Copy, Clone)]
struct Filter(u32);
impl fmt::Display for Filter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 == 0 {
write!(f, "PASS")
} else {
let mut first = true;
let mut x = self.0;
for (s, _) in FLT_STR.iter() {
if (x & 1) == 1 {
if !first {
write!(f, ";{}", s)?
} else {
first = false;
write!(f, "{}", s)?
}
}
x >>= 1;
if x == 0 {
break;
}
}
Ok(())
}
}
}
pub fn write_vcf_header(sam_hdr: &SamHeader, cfg: &Config) -> io::Result<BufWriter<Writer>> {
let vcf_output = format!("{}.vcf", cfg.output_prefix());
let reg = cfg.region();
let mut vcf_wrt = CompressIo::new()
.path(vcf_output)
.ctype(CompressType::Bgzip)
.bufwriter()?;
let sample = cfg.sample().unwrap_or("SAMPLE");
// Write VCF Headers
writeln!(vcf_wrt, "##fileformat=VCFv4.2")?;
writeln!(vcf_wrt, "##contig=<ID={},length={}>", sam_hdr.tid2name(reg.tid()), reg.ctg_size())?;
writeln!(vcf_wrt, "##FILTER=<ID=PASS,Description=\"Site contains at least one allele that passes filters\">")?;
for (s1, s2) in FLT_STR.iter() { writeln!(vcf_wrt, "##FILTER=<ID={},Description=\"{}\">", s1, s2)?; }
writeln!(vcf_wrt, "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=ADF,Number=R,Type=Integer,Description=\"Allelic depths on the forward strand (high-quality bases)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=ADR,Number=R,Type=Integer,Description=\"Allelic depths on the reverse strand (high-quality bases)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=HPL,Number=A,Type=Float,Description=\"Estimate of heteroplasmy frequency for alternate alleles\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=FQSE,Number=R,Type=Float,Description=\"Standard errors of allele frequency estimates per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=AQ,Number=R,Type=Float,Description=\"Phred scaled likelihood ratio for each allele (H0: allele freq is zero)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=AFLT,Number=R,Type=String,Description=\"Filters per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=FSB,Number=R,Type=Float,Description=\"Fisher test of allele strand bias per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=QAVG,Number=R,Type=Float,Description=\"Average allele base quality scores\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=QBS,Number=R,Type=Float,Description=\"Mann-Whitney-Wilcoxon test of minor allele base quality scores\">")?;
writeln!(vcf_wrt, "##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw read depth\">")?;
writeln!(vcf_wrt, "##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is an INDEL\">")?;
writeln!(vcf_wrt, "##INFO=<ID=SVTYPE,Number=1,Type=String,Description=\"Type of structural variant\">")?;
writeln!(vcf_wrt, "##INFO=<ID=END,Number=1,Type=Integer,Description=\"End position of structural variant\">")?;
writeln!(vcf_wrt, "##INFO=<ID=SVLEN,Number=1,Type=Integer,Description=\"Difference in length between REF and ALT alleles\">")?;
writeln!(vcf_wrt, "##INFO=<ID=CIPOS,Number=2,Type=Integer,Description=\"95% confidence interval around POS for structural variants\">")?;
writeln!(vcf_wrt, "##INFO=<ID=CILEN,Number=2,Type=Integer,Description=\"95% confidence interval around SVLEN for structural variants\">")?;
writeln!(vcf_wrt, "##ALT=<ID=DEL, Description=\"Deletion\">")?;
writeln!(vcf_wrt, "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{}", sample)?;
Ok(vcf_wrt)
} |
// Reference allele
let ref_ix = vr.alleles[0].ix; | random_line_split |
vcf.rs | use std::{
fmt::{self, Write as fmtWrite},
io::{self, BufWriter, Write},
};
use log::Level::Trace;
use compress_io::{
compress::{CompressIo, Writer},
compress_type::CompressType,
};
use r_htslib::*;
use crate::{
model::{freq_mle, AlleleRes, N_QUAL, MAX_PHRED, ModelRes},
mann_whitney::mann_whitney,
cli::{Config, ThresholdType},
fisher::FisherTest,
reference::RefPos,
alleles::{AllDesc, LargeDeletion},
stat_funcs::pnormc,
};
const FLT_FS: u32 = 1;
const FLT_QUAL_BIAS: u32 = 2;
const FLT_BLACKLIST: u32 = 4;
const FLT_Q30: u32 = 8;
const FLT_LOW_FREQ: u32 = 16;
const FLT_HOMO_POLY: u32 = 32;
const FLT_STR: [(&str, &str); 6] = [
("strand_bias", "Alleles unevenely distributed across strands", ),
("qual_bias", "Minor allele has lower quality values"),
("blacklist", "Position on black list"),
("q30", "Quality < 30"),
("low_freq", "Low heteroplasmy frequency"),
("homopolymer", "Indel starting in homopolymer region"),
];
pub(crate) struct VcfCalc<'a, 'b, 'c> {
pub(crate) ftest: &'a FisherTest,
pub(crate) seq_len: usize,
pub(crate) ref_seq: &'b [RefPos],
pub(crate) homopolymer_limit: u8,
pub(crate) all_desc: Vec<AllDesc>, // AllDesc for 'standard' 5 alleles (A, C, G, T, Del)
pub(crate) cfg: &'c Config,
}
pub(crate) struct Allele {
pub(crate) res: AlleleRes,
pub(crate) ix: usize,
}
pub(crate) struct VcfRes {
pub(crate) alleles: Vec<Allele>,
pub(crate) adesc: Option<Vec<AllDesc>>,
pub(crate) x: usize,
pub(crate) phred: u8,
}
// Additional allele specific results
#[derive(Default, Copy, Clone)]
struct ExtraRes {
flt: u32,
avg_qual: f64,
fisher_strand: f64,
wilcox: f64,
}
impl<'a, 'b, 'c> VcfCalc<'a, 'b, 'c> {
pub fn get_allele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> VcfRes {
// Should only be used for single base variants where we expect 5 'alleles'
// for A, C, G, T and Del
assert_eq!(cts.len(), 5);
let indel_flags = [false, false, false, false, true];
let ref_ix = (self.ref_seq[x].base()) as usize;
self.est_allele_freqs(x, ref_ix, cts, qcts, &indel_flags)
}
pub fn get_mallele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
self.est_allele_freqs(x,0, cts, qcts, indel_flags)
}
pub fn est_allele_freqs(&self, x: usize, ref_ix: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
let n_alls = cts.len();
assert_eq!(n_alls, qcts.len());
let qual_model = self.cfg.qual_table();
// Fold across strands
let jcts: Vec<usize> = cts.iter().map(|x| x[0] + x[1]).collect();
// Sort possible alleles by reverse numeric order on counts except that the reference base is always first
let alleles: Vec<usize> = {
let mut ix: Vec<usize> = (0..n_alls).filter(|x| *x != ref_ix).collect();
ix.sort_unstable_by(|a, b| jcts[*b].cmp(&jcts[*a]));
// Remove alleles where alleles not seen on both strands (apart from reference)
let mut ix1 = Vec::with_capacity(n_alls);
ix1.push(ref_ix); // Reference allele;
for &k in ix.iter() {
if cts[k][0] > 0 && cts[k][1] > 0 && cts[k][0] + cts[k][1] > 2 {
ix1.push(k)
}
}
ix1
};
let mut mr = freq_mle(&alleles, qcts, qual_model);
if log_enabled!(Trace) {
trace!("mle freq. estimates");
for &k in alleles.iter() {
trace!("{}\t{}\t{}", k, mr.alleles[k].freq, indel_flags[k]);
}
}
// Remove non-reference alleles where the frequencies were estimated below the thresholds
let snv_lim = self.cfg.snv_threshold(ThresholdType::Hard);
let indel_lim = self.cfg.indel_threshold(ThresholdType::Hard);
for ar in mr.alleles.iter_mut() { ar.flag = false }
let alleles: Vec<_> = alleles.iter().enumerate()
.filter(|(i, &k)| *i == 0 || mr.alleles[k].freq >= match indel_flags[k] {
true => indel_lim,
false => snv_lim,
})
.map(|(_, &k)| k).collect();
for &i in alleles.iter() {
mr.alleles[i].flag =true;
}
// Adjust the frequencies to account for any alleles that have been filtered
let tot = alleles.iter().fold(0.0, |s, &x| s + mr.alleles[x].freq);
// Rescale if necessary
if tot < 1.0 {
assert!(tot > 0.0);
for ar in mr.alleles.iter_mut() {
if ar.flag {
ar.freq /= tot
} else {
ar.freq = 0.0;
}
}
}
let ModelRes{alleles: all, phred} = mr;
let (all, phred) = (all, phred);
let alleles: Vec<_> = alleles.iter().filter(|&&k| all[k].flag)
.map(|&k| Allele{ix: k, res: all[k]}).collect();
VcfRes{alleles, adesc: None, x, phred}
}
// Generate VCF output line for large deletions
pub fn del_output(&self, del: &LargeDeletion) -> String {
let mut f = String::new();
let cts = del.counts;
let fq = del.fq();
let sd = (fq * (1.0 - fq) / (del.n() as f64)).sqrt();
let z = fq / sd;
let phred = if z > 10.0 { MAX_PHRED }
else {
(pnormc(z).log10()*-10.0).round().min(MAX_PHRED as f64) as u8
};
let flt = if phred >= 30 { 0 } else { FLT_Q30 };
// ALT, QUAL, FILTER
let _ = write!(f, "<DEL>\t{}\t{}", phred, Filter(flt));
// INFO
let _ = write!(f, "\tSVTYPE=DEL;END={};SVLEN={};CIPOS={},{};CILEN={},{}", del.end() + 1,
del.length, del.pos_ci(0), del.pos_ci(1), del.len_ci(0), del.len_ci(1));
// FORMAT
let _ = write!(f, "\tGT:ADF:ADR:HPL\t0/1:{},{}:{},{}:{:.5}",
cts[0][0], cts[1][0], cts[0][1], cts[1][1], fq);
f
}
// Generate Optional String with VCF output line
pub fn output(&self, vr: &mut VcfRes, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> Option<String> |
}
#[derive(Default, Copy, Clone)]
struct Filter(u32);
impl fmt::Display for Filter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 == 0 {
write!(f, "PASS")
} else {
let mut first = true;
let mut x = self.0;
for (s, _) in FLT_STR.iter() {
if (x & 1) == 1 {
if !first {
write!(f, ";{}", s)?
} else {
first = false;
write!(f, "{}", s)?
}
}
x >>= 1;
if x == 0 {
break;
}
}
Ok(())
}
}
}
pub fn write_vcf_header(sam_hdr: &SamHeader, cfg: &Config) -> io::Result<BufWriter<Writer>> {
let vcf_output = format!("{}.vcf", cfg.output_prefix());
let reg = cfg.region();
let mut vcf_wrt = CompressIo::new()
.path(vcf_output)
.ctype(CompressType::Bgzip)
.bufwriter()?;
let sample = cfg.sample().unwrap_or("SAMPLE");
// Write VCF Headers
writeln!(vcf_wrt, "##fileformat=VCFv4.2")?;
writeln!(vcf_wrt, "##contig=<ID={},length={}>", sam_hdr.tid2name(reg.tid()), reg.ctg_size())?;
writeln!(vcf_wrt, "##FILTER=<ID=PASS,Description=\"Site contains at least one allele that passes filters\">")?;
for (s1, s2) in FLT_STR.iter() { writeln!(vcf_wrt, "##FILTER=<ID={},Description=\"{}\">", s1, s2)?; }
writeln!(vcf_wrt, "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=ADF,Number=R,Type=Integer,Description=\"Allelic depths on the forward strand (high-quality bases)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=ADR,Number=R,Type=Integer,Description=\"Allelic depths on the reverse strand (high-quality bases)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=HPL,Number=A,Type=Float,Description=\"Estimate of heteroplasmy frequency for alternate alleles\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=FQSE,Number=R,Type=Float,Description=\"Standard errors of allele frequency estimates per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=AQ,Number=R,Type=Float,Description=\"Phred scaled likelihood ratio for each allele (H0: allele freq is zero)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=AFLT,Number=R,Type=String,Description=\"Filters per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=FSB,Number=R,Type=Float,Description=\"Fisher test of allele strand bias per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=QAVG,Number=R,Type=Float,Description=\"Average allele base quality scores\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=QBS,Number=R,Type=Float,Description=\"Mann-Whitney-Wilcoxon test of minor allele base quality scores\">")?;
writeln!(vcf_wrt, "##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw read depth\">")?;
writeln!(vcf_wrt, "##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is an INDEL\">")?;
writeln!(vcf_wrt, "##INFO=<ID=SVTYPE,Number=1,Type=String,Description=\"Type of structural variant\">")?;
writeln!(vcf_wrt, "##INFO=<ID=END,Number=1,Type=Integer,Description=\"End position of structural variant\">")?;
writeln!(vcf_wrt, "##INFO=<ID=SVLEN,Number=1,Type=Integer,Description=\"Difference in length between REF and ALT alleles\">")?;
writeln!(vcf_wrt, "##INFO=<ID=CIPOS,Number=2,Type=Integer,Description=\"95% confidence interval around POS for structural variants\">")?;
writeln!(vcf_wrt, "##INFO=<ID=CILEN,Number=2,Type=Integer,Description=\"95% confidence interval around SVLEN for structural variants\">")?;
writeln!(vcf_wrt, "##ALT=<ID=DEL, Description=\"Deletion\">")?;
writeln!(vcf_wrt, "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{}", sample)?;
Ok(vcf_wrt)
}
| {
let x = vr.x;
let raw_depth = cts.iter().fold(0, |t, x| t + x[0] + x[1]);
let thresh = 0.05 / (self.seq_len as f64);
// Sort alleles by frequency (keeping reference alleles at position 0)
vr.alleles[1..].sort_unstable_by(|a1, a2| a2.res.freq.partial_cmp(&a1.res.freq).unwrap());
// Find index of major allele
let (major_idx, mj_idx) = vr.alleles.iter().enumerate().max_by(|(_, ar1), (_, ar2)| ar1.res.freq.partial_cmp(&ar2.res.freq).unwrap())
.map(|(i, ar)| (ar.ix, i)).unwrap();
// Reference allele
let ref_ix = vr.alleles[0].ix;
// Filter cutoffs
let snv_soft_lim = self.cfg.snv_threshold(ThresholdType::Soft);
let indel_soft_lim = self.cfg.indel_threshold(ThresholdType::Soft);
let desc = vr.adesc.as_ref().unwrap_or(&self.all_desc);
// Extra per allele results
let mut all_res: Vec<_> = vr.alleles.iter().map(|ar| {
// Average quality
let (n, s) = qcts[ar.ix].iter().enumerate().fold((0, 0), |(n, s), (q, &ct)| {
(n + ct, s + ct * q)
});
let avg_qual = if n > 0 { s as f64 / n as f64 } else { 0.0 };
let mut flt = 0;
// Fisher strand test
let fisher_strand = if ar.ix != major_idx {
let k = ar.ix;
let k1 = major_idx;
let all_cts = [cts[k][0], cts[k1][0], cts[k][1], cts[k1][1]];
let fs = self.ftest.fisher(&all_cts);
if ar.res.freq < 0.75 && fs <= thresh {
flt |= FLT_FS
}
fs
} else { 1.0 };
// Wilcoxon-Mann-Whitney test for quality bias between the major (most frequent)
// allele and all minor alleles
let wilcox = if ar.ix != major_idx {
mann_whitney(qcts, major_idx, ar.ix).unwrap_or(1.0)
} else { 1.0 };
// Set allele freq. flags
let (lim, pos_adjust) = if desc[ar.ix].len() != desc[ref_ix].len() {
// This is an indel (size difference from reference)
let hp_size = (self.ref_seq[x].hpoly() & 0xf).max(self.ref_seq[x + 1].hpoly() & 0xf) + 1;
if hp_size >= self.homopolymer_limit {
flt |= FLT_HOMO_POLY
}
(indel_soft_lim, 0)
} else {
// In a complex variant, a SNV could start a few bases after the location of the variant
let x = if ar.ix == ref_ix { 0 } else {
// Find position of first base that differs between this allele and the reference
desc[ref_ix].iter().zip(desc[ar.ix].iter()).enumerate()
.find(|(_, (c1, c2))| *c1 != *c2).map(|(ix, _)| ix as u32).unwrap()
};
(snv_soft_lim, x)
};
if ar.res.freq < lim {
flt |= FLT_LOW_FREQ
}
// LR flags
if ar.res.lr_test < 30 {
flt |= FLT_Q30
}
// Blacklist
if self.cfg.blacklist(x + 1 + pos_adjust as usize) {
flt |= FLT_BLACKLIST
}
ExtraRes{ flt, avg_qual, fisher_strand, wilcox}
}).collect();
// Set wilcox allele flags
let mjr_qual = all_res[mj_idx].avg_qual;
for res in all_res.iter_mut() {
if res.wilcox <= thresh && mjr_qual - res.avg_qual > 2.0 {
res.flt |= FLT_QUAL_BIAS
}
}
// Genotype call
let f0 = vr.alleles[0].res.freq >= 1.0e-5;
let gt = match vr.alleles.len() {
1 => String::from("0"),
n => if f0 {
let mut s = String::from("0");
for i in 1..n {
s = format!("{}/{}", s, i);
}
s
} else {
let mut s = String::from("1");
for i in 2..n {
s = format!("{}/{}", s, i);
}
s
},
};
// Collect global flags
let mut flt = if vr.phred < 30 { FLT_Q30 } else { 0 };
// If no non-reference allele has no flags set, then set global flags
// to union of all allele flags
if all_res[1..].iter().all(|ar| ar.flt != 0) {
for ar in all_res[1..].iter() {
flt |= ar.flt
}
}
if vr.alleles.len() > 1 {
let mut f = String::new();
write!(f, "{}\t{}", desc[ref_ix], desc[vr.alleles[1].ix]).ok()?;
for s in vr.alleles[2..].iter().map(|a| &desc[a.ix]) {
write!(f, ",{}", s).ok()?;
}
write!(f, "\t{}\t{}", vr.phred, Filter(flt)).ok()?;
// INFO field
write!(f, "\tDP={}", raw_depth).ok()?;
for ar in vr.alleles[1..].iter() {
if desc[ar.ix].len() != desc[ref_ix].len() {
write!(f, ";INDEL").ok()?;
break
}
}
// FORMAT field
write!(f, "\tGT:ADF:ADR:HPL:FQSE:AQ:AFLT:QAVG:FSB:QBS").ok()?;
// GT field
write!(f, "\t{}:{}", gt, cts[vr.alleles[0].ix][0]).ok()?;
// ADF
for all in vr.alleles[1..].iter() {
write!(f, ",{}", cts[all.ix][0]).ok()?;
}
// ADR
write!(f, ":{}", cts[vr.alleles[0].ix][1]).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{}", cts[all.ix][1]).ok()?;
}
// HPL
write!(f, ":{:.5}", vr.alleles[1].res.freq).ok()?;
for all in vr.alleles[2..].iter() {
write!(f, ",{:.5}", all.res.freq).ok()?;
}
// FQSE
write!(f, ":{:.5}", vr.alleles[0].res.se).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{:.5}", all.res.se).ok()?;
}
// AQ
write!(f, ":{}", vr.alleles[0].res.lr_test).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{}", all.res.lr_test).ok()?;
}
// AFLT
write!(f, ":{}", Filter(all_res[0].flt)).ok()?;
for ar in &all_res[1..] {
write!(f, ",{}", Filter(ar.flt)).ok()?;
}
// QAVG
write!(f, ":{:.2}", all_res[0].avg_qual).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2}", ar.avg_qual).ok()?;
}
// FS
write!(f, ":{:.2e}", all_res[0].fisher_strand).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2e}", ar.fisher_strand).ok()?;
}
// QSB
write!(f, ":{:.2e}", all_res[0].wilcox).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2e}", ar.wilcox).ok()?;
}
Some(f)
} else {
None
}
} | identifier_body |
vcf.rs | use std::{
fmt::{self, Write as fmtWrite},
io::{self, BufWriter, Write},
};
use log::Level::Trace;
use compress_io::{
compress::{CompressIo, Writer},
compress_type::CompressType,
};
use r_htslib::*;
use crate::{
model::{freq_mle, AlleleRes, N_QUAL, MAX_PHRED, ModelRes},
mann_whitney::mann_whitney,
cli::{Config, ThresholdType},
fisher::FisherTest,
reference::RefPos,
alleles::{AllDesc, LargeDeletion},
stat_funcs::pnormc,
};
const FLT_FS: u32 = 1;
const FLT_QUAL_BIAS: u32 = 2;
const FLT_BLACKLIST: u32 = 4;
const FLT_Q30: u32 = 8;
const FLT_LOW_FREQ: u32 = 16;
const FLT_HOMO_POLY: u32 = 32;
const FLT_STR: [(&str, &str); 6] = [
("strand_bias", "Alleles unevenely distributed across strands", ),
("qual_bias", "Minor allele has lower quality values"),
("blacklist", "Position on black list"),
("q30", "Quality < 30"),
("low_freq", "Low heteroplasmy frequency"),
("homopolymer", "Indel starting in homopolymer region"),
];
pub(crate) struct VcfCalc<'a, 'b, 'c> {
pub(crate) ftest: &'a FisherTest,
pub(crate) seq_len: usize,
pub(crate) ref_seq: &'b [RefPos],
pub(crate) homopolymer_limit: u8,
pub(crate) all_desc: Vec<AllDesc>, // AllDesc for 'standard' 5 alleles (A, C, G, T, Del)
pub(crate) cfg: &'c Config,
}
pub(crate) struct | {
pub(crate) res: AlleleRes,
pub(crate) ix: usize,
}
pub(crate) struct VcfRes {
pub(crate) alleles: Vec<Allele>,
pub(crate) adesc: Option<Vec<AllDesc>>,
pub(crate) x: usize,
pub(crate) phred: u8,
}
// Additional allele specific results
#[derive(Default, Copy, Clone)]
struct ExtraRes {
flt: u32,
avg_qual: f64,
fisher_strand: f64,
wilcox: f64,
}
impl<'a, 'b, 'c> VcfCalc<'a, 'b, 'c> {
pub fn get_allele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> VcfRes {
// Should only be used for single base variants where we expect 5 'alleles'
// for A, C, G, T and Del
assert_eq!(cts.len(), 5);
let indel_flags = [false, false, false, false, true];
let ref_ix = (self.ref_seq[x].base()) as usize;
self.est_allele_freqs(x, ref_ix, cts, qcts, &indel_flags)
}
pub fn get_mallele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
self.est_allele_freqs(x,0, cts, qcts, indel_flags)
}
pub fn est_allele_freqs(&self, x: usize, ref_ix: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
let n_alls = cts.len();
assert_eq!(n_alls, qcts.len());
let qual_model = self.cfg.qual_table();
// Fold across strands
let jcts: Vec<usize> = cts.iter().map(|x| x[0] + x[1]).collect();
// Sort possible alleles by reverse numeric order on counts except that the reference base is always first
let alleles: Vec<usize> = {
let mut ix: Vec<usize> = (0..n_alls).filter(|x| *x != ref_ix).collect();
ix.sort_unstable_by(|a, b| jcts[*b].cmp(&jcts[*a]));
// Remove alleles where alleles not seen on both strands (apart from reference)
let mut ix1 = Vec::with_capacity(n_alls);
ix1.push(ref_ix); // Reference allele;
for &k in ix.iter() {
if cts[k][0] > 0 && cts[k][1] > 0 && cts[k][0] + cts[k][1] > 2 {
ix1.push(k)
}
}
ix1
};
let mut mr = freq_mle(&alleles, qcts, qual_model);
if log_enabled!(Trace) {
trace!("mle freq. estimates");
for &k in alleles.iter() {
trace!("{}\t{}\t{}", k, mr.alleles[k].freq, indel_flags[k]);
}
}
// Remove non-reference alleles where the frequencies were estimated below the thresholds
let snv_lim = self.cfg.snv_threshold(ThresholdType::Hard);
let indel_lim = self.cfg.indel_threshold(ThresholdType::Hard);
for ar in mr.alleles.iter_mut() { ar.flag = false }
let alleles: Vec<_> = alleles.iter().enumerate()
.filter(|(i, &k)| *i == 0 || mr.alleles[k].freq >= match indel_flags[k] {
true => indel_lim,
false => snv_lim,
})
.map(|(_, &k)| k).collect();
for &i in alleles.iter() {
mr.alleles[i].flag =true;
}
// Adjust the frequencies to account for any alleles that have been filtered
let tot = alleles.iter().fold(0.0, |s, &x| s + mr.alleles[x].freq);
// Rescale if necessary
if tot < 1.0 {
assert!(tot > 0.0);
for ar in mr.alleles.iter_mut() {
if ar.flag {
ar.freq /= tot
} else {
ar.freq = 0.0;
}
}
}
let ModelRes{alleles: all, phred} = mr;
let (all, phred) = (all, phred);
let alleles: Vec<_> = alleles.iter().filter(|&&k| all[k].flag)
.map(|&k| Allele{ix: k, res: all[k]}).collect();
VcfRes{alleles, adesc: None, x, phred}
}
// Generate VCF output line for large deletions
pub fn del_output(&self, del: &LargeDeletion) -> String {
let mut f = String::new();
let cts = del.counts;
let fq = del.fq();
let sd = (fq * (1.0 - fq) / (del.n() as f64)).sqrt();
let z = fq / sd;
let phred = if z > 10.0 { MAX_PHRED }
else {
(pnormc(z).log10()*-10.0).round().min(MAX_PHRED as f64) as u8
};
let flt = if phred >= 30 { 0 } else { FLT_Q30 };
// ALT, QUAL, FILTER
let _ = write!(f, "<DEL>\t{}\t{}", phred, Filter(flt));
// INFO
let _ = write!(f, "\tSVTYPE=DEL;END={};SVLEN={};CIPOS={},{};CILEN={},{}", del.end() + 1,
del.length, del.pos_ci(0), del.pos_ci(1), del.len_ci(0), del.len_ci(1));
// FORMAT
let _ = write!(f, "\tGT:ADF:ADR:HPL\t0/1:{},{}:{},{}:{:.5}",
cts[0][0], cts[1][0], cts[0][1], cts[1][1], fq);
f
}
// Generate Optional String with VCF output line
pub fn output(&self, vr: &mut VcfRes, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> Option<String> {
let x = vr.x;
let raw_depth = cts.iter().fold(0, |t, x| t + x[0] + x[1]);
let thresh = 0.05 / (self.seq_len as f64);
// Sort alleles by frequency (keeping reference alleles at position 0)
vr.alleles[1..].sort_unstable_by(|a1, a2| a2.res.freq.partial_cmp(&a1.res.freq).unwrap());
// Find index of major allele
let (major_idx, mj_idx) = vr.alleles.iter().enumerate().max_by(|(_, ar1), (_, ar2)| ar1.res.freq.partial_cmp(&ar2.res.freq).unwrap())
.map(|(i, ar)| (ar.ix, i)).unwrap();
// Reference allele
let ref_ix = vr.alleles[0].ix;
// Filter cutoffs
let snv_soft_lim = self.cfg.snv_threshold(ThresholdType::Soft);
let indel_soft_lim = self.cfg.indel_threshold(ThresholdType::Soft);
let desc = vr.adesc.as_ref().unwrap_or(&self.all_desc);
// Extra per allele results
let mut all_res: Vec<_> = vr.alleles.iter().map(|ar| {
// Average quality
let (n, s) = qcts[ar.ix].iter().enumerate().fold((0, 0), |(n, s), (q, &ct)| {
(n + ct, s + ct * q)
});
let avg_qual = if n > 0 { s as f64 / n as f64 } else { 0.0 };
let mut flt = 0;
// Fisher strand test
let fisher_strand = if ar.ix != major_idx {
let k = ar.ix;
let k1 = major_idx;
let all_cts = [cts[k][0], cts[k1][0], cts[k][1], cts[k1][1]];
let fs = self.ftest.fisher(&all_cts);
if ar.res.freq < 0.75 && fs <= thresh {
flt |= FLT_FS
}
fs
} else { 1.0 };
// Wilcoxon-Mann-Whitney test for quality bias between the major (most frequent)
// allele and all minor alleles
let wilcox = if ar.ix != major_idx {
mann_whitney(qcts, major_idx, ar.ix).unwrap_or(1.0)
} else { 1.0 };
// Set allele freq. flags
let (lim, pos_adjust) = if desc[ar.ix].len() != desc[ref_ix].len() {
// This is an indel (size difference from reference)
let hp_size = (self.ref_seq[x].hpoly() & 0xf).max(self.ref_seq[x + 1].hpoly() & 0xf) + 1;
if hp_size >= self.homopolymer_limit {
flt |= FLT_HOMO_POLY
}
(indel_soft_lim, 0)
} else {
// In a complex variant, a SNV could start a few bases after the location of the variant
let x = if ar.ix == ref_ix { 0 } else {
// Find position of first base that differs between this allele and the reference
desc[ref_ix].iter().zip(desc[ar.ix].iter()).enumerate()
.find(|(_, (c1, c2))| *c1 != *c2).map(|(ix, _)| ix as u32).unwrap()
};
(snv_soft_lim, x)
};
if ar.res.freq < lim {
flt |= FLT_LOW_FREQ
}
// LR flags
if ar.res.lr_test < 30 {
flt |= FLT_Q30
}
// Blacklist
if self.cfg.blacklist(x + 1 + pos_adjust as usize) {
flt |= FLT_BLACKLIST
}
ExtraRes{ flt, avg_qual, fisher_strand, wilcox}
}).collect();
// Set wilcox allele flags
let mjr_qual = all_res[mj_idx].avg_qual;
for res in all_res.iter_mut() {
if res.wilcox <= thresh && mjr_qual - res.avg_qual > 2.0 {
res.flt |= FLT_QUAL_BIAS
}
}
// Genotype call
let f0 = vr.alleles[0].res.freq >= 1.0e-5;
let gt = match vr.alleles.len() {
1 => String::from("0"),
n => if f0 {
let mut s = String::from("0");
for i in 1..n {
s = format!("{}/{}", s, i);
}
s
} else {
let mut s = String::from("1");
for i in 2..n {
s = format!("{}/{}", s, i);
}
s
},
};
// Collect global flags
let mut flt = if vr.phred < 30 { FLT_Q30 } else { 0 };
// If no non-reference allele has no flags set, then set global flags
// to union of all allele flags
if all_res[1..].iter().all(|ar| ar.flt != 0) {
for ar in all_res[1..].iter() {
flt |= ar.flt
}
}
if vr.alleles.len() > 1 {
let mut f = String::new();
write!(f, "{}\t{}", desc[ref_ix], desc[vr.alleles[1].ix]).ok()?;
for s in vr.alleles[2..].iter().map(|a| &desc[a.ix]) {
write!(f, ",{}", s).ok()?;
}
write!(f, "\t{}\t{}", vr.phred, Filter(flt)).ok()?;
// INFO field
write!(f, "\tDP={}", raw_depth).ok()?;
for ar in vr.alleles[1..].iter() {
if desc[ar.ix].len() != desc[ref_ix].len() {
write!(f, ";INDEL").ok()?;
break
}
}
// FORMAT field
write!(f, "\tGT:ADF:ADR:HPL:FQSE:AQ:AFLT:QAVG:FSB:QBS").ok()?;
// GT field
write!(f, "\t{}:{}", gt, cts[vr.alleles[0].ix][0]).ok()?;
// ADF
for all in vr.alleles[1..].iter() {
write!(f, ",{}", cts[all.ix][0]).ok()?;
}
// ADR
write!(f, ":{}", cts[vr.alleles[0].ix][1]).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{}", cts[all.ix][1]).ok()?;
}
// HPL
write!(f, ":{:.5}", vr.alleles[1].res.freq).ok()?;
for all in vr.alleles[2..].iter() {
write!(f, ",{:.5}", all.res.freq).ok()?;
}
// FQSE
write!(f, ":{:.5}", vr.alleles[0].res.se).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{:.5}", all.res.se).ok()?;
}
// AQ
write!(f, ":{}", vr.alleles[0].res.lr_test).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{}", all.res.lr_test).ok()?;
}
// AFLT
write!(f, ":{}", Filter(all_res[0].flt)).ok()?;
for ar in &all_res[1..] {
write!(f, ",{}", Filter(ar.flt)).ok()?;
}
// QAVG
write!(f, ":{:.2}", all_res[0].avg_qual).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2}", ar.avg_qual).ok()?;
}
// FS
write!(f, ":{:.2e}", all_res[0].fisher_strand).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2e}", ar.fisher_strand).ok()?;
}
// QSB
write!(f, ":{:.2e}", all_res[0].wilcox).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2e}", ar.wilcox).ok()?;
}
Some(f)
} else {
None
}
}
}
#[derive(Default, Copy, Clone)]
struct Filter(u32);
impl fmt::Display for Filter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 == 0 {
write!(f, "PASS")
} else {
let mut first = true;
let mut x = self.0;
for (s, _) in FLT_STR.iter() {
if (x & 1) == 1 {
if !first {
write!(f, ";{}", s)?
} else {
first = false;
write!(f, "{}", s)?
}
}
x >>= 1;
if x == 0 {
break;
}
}
Ok(())
}
}
}
pub fn write_vcf_header(sam_hdr: &SamHeader, cfg: &Config) -> io::Result<BufWriter<Writer>> {
let vcf_output = format!("{}.vcf", cfg.output_prefix());
let reg = cfg.region();
let mut vcf_wrt = CompressIo::new()
.path(vcf_output)
.ctype(CompressType::Bgzip)
.bufwriter()?;
let sample = cfg.sample().unwrap_or("SAMPLE");
// Write VCF Headers
writeln!(vcf_wrt, "##fileformat=VCFv4.2")?;
writeln!(vcf_wrt, "##contig=<ID={},length={}>", sam_hdr.tid2name(reg.tid()), reg.ctg_size())?;
writeln!(vcf_wrt, "##FILTER=<ID=PASS,Description=\"Site contains at least one allele that passes filters\">")?;
for (s1, s2) in FLT_STR.iter() { writeln!(vcf_wrt, "##FILTER=<ID={},Description=\"{}\">", s1, s2)?; }
writeln!(vcf_wrt, "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=ADF,Number=R,Type=Integer,Description=\"Allelic depths on the forward strand (high-quality bases)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=ADR,Number=R,Type=Integer,Description=\"Allelic depths on the reverse strand (high-quality bases)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=HPL,Number=A,Type=Float,Description=\"Estimate of heteroplasmy frequency for alternate alleles\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=FQSE,Number=R,Type=Float,Description=\"Standard errors of allele frequency estimates per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=AQ,Number=R,Type=Float,Description=\"Phred scaled likelihood ratio for each allele (H0: allele freq is zero)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=AFLT,Number=R,Type=String,Description=\"Filters per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=FSB,Number=R,Type=Float,Description=\"Fisher test of allele strand bias per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=QAVG,Number=R,Type=Float,Description=\"Average allele base quality scores\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=QBS,Number=R,Type=Float,Description=\"Mann-Whitney-Wilcoxon test of minor allele base quality scores\">")?;
writeln!(vcf_wrt, "##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw read depth\">")?;
writeln!(vcf_wrt, "##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is an INDEL\">")?;
writeln!(vcf_wrt, "##INFO=<ID=SVTYPE,Number=1,Type=String,Description=\"Type of structural variant\">")?;
writeln!(vcf_wrt, "##INFO=<ID=END,Number=1,Type=Integer,Description=\"End position of structural variant\">")?;
writeln!(vcf_wrt, "##INFO=<ID=SVLEN,Number=1,Type=Integer,Description=\"Difference in length between REF and ALT alleles\">")?;
writeln!(vcf_wrt, "##INFO=<ID=CIPOS,Number=2,Type=Integer,Description=\"95% confidence interval around POS for structural variants\">")?;
writeln!(vcf_wrt, "##INFO=<ID=CILEN,Number=2,Type=Integer,Description=\"95% confidence interval around SVLEN for structural variants\">")?;
writeln!(vcf_wrt, "##ALT=<ID=DEL, Description=\"Deletion\">")?;
writeln!(vcf_wrt, "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{}", sample)?;
Ok(vcf_wrt)
}
| Allele | identifier_name |
vcf.rs | use std::{
fmt::{self, Write as fmtWrite},
io::{self, BufWriter, Write},
};
use log::Level::Trace;
use compress_io::{
compress::{CompressIo, Writer},
compress_type::CompressType,
};
use r_htslib::*;
use crate::{
model::{freq_mle, AlleleRes, N_QUAL, MAX_PHRED, ModelRes},
mann_whitney::mann_whitney,
cli::{Config, ThresholdType},
fisher::FisherTest,
reference::RefPos,
alleles::{AllDesc, LargeDeletion},
stat_funcs::pnormc,
};
const FLT_FS: u32 = 1;
const FLT_QUAL_BIAS: u32 = 2;
const FLT_BLACKLIST: u32 = 4;
const FLT_Q30: u32 = 8;
const FLT_LOW_FREQ: u32 = 16;
const FLT_HOMO_POLY: u32 = 32;
const FLT_STR: [(&str, &str); 6] = [
("strand_bias", "Alleles unevenely distributed across strands", ),
("qual_bias", "Minor allele has lower quality values"),
("blacklist", "Position on black list"),
("q30", "Quality < 30"),
("low_freq", "Low heteroplasmy frequency"),
("homopolymer", "Indel starting in homopolymer region"),
];
pub(crate) struct VcfCalc<'a, 'b, 'c> {
pub(crate) ftest: &'a FisherTest,
pub(crate) seq_len: usize,
pub(crate) ref_seq: &'b [RefPos],
pub(crate) homopolymer_limit: u8,
pub(crate) all_desc: Vec<AllDesc>, // AllDesc for 'standard' 5 alleles (A, C, G, T, Del)
pub(crate) cfg: &'c Config,
}
pub(crate) struct Allele {
pub(crate) res: AlleleRes,
pub(crate) ix: usize,
}
pub(crate) struct VcfRes {
pub(crate) alleles: Vec<Allele>,
pub(crate) adesc: Option<Vec<AllDesc>>,
pub(crate) x: usize,
pub(crate) phred: u8,
}
// Additional allele specific results
#[derive(Default, Copy, Clone)]
struct ExtraRes {
flt: u32,
avg_qual: f64,
fisher_strand: f64,
wilcox: f64,
}
impl<'a, 'b, 'c> VcfCalc<'a, 'b, 'c> {
pub fn get_allele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> VcfRes {
// Should only be used for single base variants where we expect 5 'alleles'
// for A, C, G, T and Del
assert_eq!(cts.len(), 5);
let indel_flags = [false, false, false, false, true];
let ref_ix = (self.ref_seq[x].base()) as usize;
self.est_allele_freqs(x, ref_ix, cts, qcts, &indel_flags)
}
pub fn get_mallele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
self.est_allele_freqs(x,0, cts, qcts, indel_flags)
}
pub fn est_allele_freqs(&self, x: usize, ref_ix: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
let n_alls = cts.len();
assert_eq!(n_alls, qcts.len());
let qual_model = self.cfg.qual_table();
// Fold across strands
let jcts: Vec<usize> = cts.iter().map(|x| x[0] + x[1]).collect();
// Sort possible alleles by reverse numeric order on counts except that the reference base is always first
let alleles: Vec<usize> = {
let mut ix: Vec<usize> = (0..n_alls).filter(|x| *x != ref_ix).collect();
ix.sort_unstable_by(|a, b| jcts[*b].cmp(&jcts[*a]));
// Remove alleles where alleles not seen on both strands (apart from reference)
let mut ix1 = Vec::with_capacity(n_alls);
ix1.push(ref_ix); // Reference allele;
for &k in ix.iter() {
if cts[k][0] > 0 && cts[k][1] > 0 && cts[k][0] + cts[k][1] > 2 {
ix1.push(k)
}
}
ix1
};
let mut mr = freq_mle(&alleles, qcts, qual_model);
if log_enabled!(Trace) {
trace!("mle freq. estimates");
for &k in alleles.iter() {
trace!("{}\t{}\t{}", k, mr.alleles[k].freq, indel_flags[k]);
}
}
// Remove non-reference alleles where the frequencies were estimated below the thresholds
let snv_lim = self.cfg.snv_threshold(ThresholdType::Hard);
let indel_lim = self.cfg.indel_threshold(ThresholdType::Hard);
for ar in mr.alleles.iter_mut() { ar.flag = false }
let alleles: Vec<_> = alleles.iter().enumerate()
.filter(|(i, &k)| *i == 0 || mr.alleles[k].freq >= match indel_flags[k] {
true => indel_lim,
false => snv_lim,
})
.map(|(_, &k)| k).collect();
for &i in alleles.iter() {
mr.alleles[i].flag =true;
}
// Adjust the frequencies to account for any alleles that have been filtered
let tot = alleles.iter().fold(0.0, |s, &x| s + mr.alleles[x].freq);
// Rescale if necessary
if tot < 1.0 {
assert!(tot > 0.0);
for ar in mr.alleles.iter_mut() {
if ar.flag {
ar.freq /= tot
} else {
ar.freq = 0.0;
}
}
}
let ModelRes{alleles: all, phred} = mr;
let (all, phred) = (all, phred);
let alleles: Vec<_> = alleles.iter().filter(|&&k| all[k].flag)
.map(|&k| Allele{ix: k, res: all[k]}).collect();
VcfRes{alleles, adesc: None, x, phred}
}
// Generate VCF output line for large deletions
pub fn del_output(&self, del: &LargeDeletion) -> String {
let mut f = String::new();
let cts = del.counts;
let fq = del.fq();
let sd = (fq * (1.0 - fq) / (del.n() as f64)).sqrt();
let z = fq / sd;
let phred = if z > 10.0 { MAX_PHRED }
else {
(pnormc(z).log10()*-10.0).round().min(MAX_PHRED as f64) as u8
};
let flt = if phred >= 30 { 0 } else { FLT_Q30 };
// ALT, QUAL, FILTER
let _ = write!(f, "<DEL>\t{}\t{}", phred, Filter(flt));
// INFO
let _ = write!(f, "\tSVTYPE=DEL;END={};SVLEN={};CIPOS={},{};CILEN={},{}", del.end() + 1,
del.length, del.pos_ci(0), del.pos_ci(1), del.len_ci(0), del.len_ci(1));
// FORMAT
let _ = write!(f, "\tGT:ADF:ADR:HPL\t0/1:{},{}:{},{}:{:.5}",
cts[0][0], cts[1][0], cts[0][1], cts[1][1], fq);
f
}
// Generate Optional String with VCF output line
pub fn output(&self, vr: &mut VcfRes, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> Option<String> {
let x = vr.x;
let raw_depth = cts.iter().fold(0, |t, x| t + x[0] + x[1]);
let thresh = 0.05 / (self.seq_len as f64);
// Sort alleles by frequency (keeping reference alleles at position 0)
vr.alleles[1..].sort_unstable_by(|a1, a2| a2.res.freq.partial_cmp(&a1.res.freq).unwrap());
// Find index of major allele
let (major_idx, mj_idx) = vr.alleles.iter().enumerate().max_by(|(_, ar1), (_, ar2)| ar1.res.freq.partial_cmp(&ar2.res.freq).unwrap())
.map(|(i, ar)| (ar.ix, i)).unwrap();
// Reference allele
let ref_ix = vr.alleles[0].ix;
// Filter cutoffs
let snv_soft_lim = self.cfg.snv_threshold(ThresholdType::Soft);
let indel_soft_lim = self.cfg.indel_threshold(ThresholdType::Soft);
let desc = vr.adesc.as_ref().unwrap_or(&self.all_desc);
// Extra per allele results
let mut all_res: Vec<_> = vr.alleles.iter().map(|ar| {
// Average quality
let (n, s) = qcts[ar.ix].iter().enumerate().fold((0, 0), |(n, s), (q, &ct)| {
(n + ct, s + ct * q)
});
let avg_qual = if n > 0 { s as f64 / n as f64 } else { 0.0 };
let mut flt = 0;
// Fisher strand test
let fisher_strand = if ar.ix != major_idx | else { 1.0 };
// Wilcoxon-Mann-Whitney test for quality bias between the major (most frequent)
// allele and all minor alleles
let wilcox = if ar.ix != major_idx {
mann_whitney(qcts, major_idx, ar.ix).unwrap_or(1.0)
} else { 1.0 };
// Set allele freq. flags
let (lim, pos_adjust) = if desc[ar.ix].len() != desc[ref_ix].len() {
// This is an indel (size difference from reference)
let hp_size = (self.ref_seq[x].hpoly() & 0xf).max(self.ref_seq[x + 1].hpoly() & 0xf) + 1;
if hp_size >= self.homopolymer_limit {
flt |= FLT_HOMO_POLY
}
(indel_soft_lim, 0)
} else {
// In a complex variant, a SNV could start a few bases after the location of the variant
let x = if ar.ix == ref_ix { 0 } else {
// Find position of first base that differs between this allele and the reference
desc[ref_ix].iter().zip(desc[ar.ix].iter()).enumerate()
.find(|(_, (c1, c2))| *c1 != *c2).map(|(ix, _)| ix as u32).unwrap()
};
(snv_soft_lim, x)
};
if ar.res.freq < lim {
flt |= FLT_LOW_FREQ
}
// LR flags
if ar.res.lr_test < 30 {
flt |= FLT_Q30
}
// Blacklist
if self.cfg.blacklist(x + 1 + pos_adjust as usize) {
flt |= FLT_BLACKLIST
}
ExtraRes{ flt, avg_qual, fisher_strand, wilcox}
}).collect();
// Set wilcox allele flags
let mjr_qual = all_res[mj_idx].avg_qual;
for res in all_res.iter_mut() {
if res.wilcox <= thresh && mjr_qual - res.avg_qual > 2.0 {
res.flt |= FLT_QUAL_BIAS
}
}
// Genotype call
let f0 = vr.alleles[0].res.freq >= 1.0e-5;
let gt = match vr.alleles.len() {
1 => String::from("0"),
n => if f0 {
let mut s = String::from("0");
for i in 1..n {
s = format!("{}/{}", s, i);
}
s
} else {
let mut s = String::from("1");
for i in 2..n {
s = format!("{}/{}", s, i);
}
s
},
};
// Collect global flags
let mut flt = if vr.phred < 30 { FLT_Q30 } else { 0 };
// If no non-reference allele has no flags set, then set global flags
// to union of all allele flags
if all_res[1..].iter().all(|ar| ar.flt != 0) {
for ar in all_res[1..].iter() {
flt |= ar.flt
}
}
if vr.alleles.len() > 1 {
let mut f = String::new();
write!(f, "{}\t{}", desc[ref_ix], desc[vr.alleles[1].ix]).ok()?;
for s in vr.alleles[2..].iter().map(|a| &desc[a.ix]) {
write!(f, ",{}", s).ok()?;
}
write!(f, "\t{}\t{}", vr.phred, Filter(flt)).ok()?;
// INFO field
write!(f, "\tDP={}", raw_depth).ok()?;
for ar in vr.alleles[1..].iter() {
if desc[ar.ix].len() != desc[ref_ix].len() {
write!(f, ";INDEL").ok()?;
break
}
}
// FORMAT field
write!(f, "\tGT:ADF:ADR:HPL:FQSE:AQ:AFLT:QAVG:FSB:QBS").ok()?;
// GT field
write!(f, "\t{}:{}", gt, cts[vr.alleles[0].ix][0]).ok()?;
// ADF
for all in vr.alleles[1..].iter() {
write!(f, ",{}", cts[all.ix][0]).ok()?;
}
// ADR
write!(f, ":{}", cts[vr.alleles[0].ix][1]).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{}", cts[all.ix][1]).ok()?;
}
// HPL
write!(f, ":{:.5}", vr.alleles[1].res.freq).ok()?;
for all in vr.alleles[2..].iter() {
write!(f, ",{:.5}", all.res.freq).ok()?;
}
// FQSE
write!(f, ":{:.5}", vr.alleles[0].res.se).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{:.5}", all.res.se).ok()?;
}
// AQ
write!(f, ":{}", vr.alleles[0].res.lr_test).ok()?;
for all in vr.alleles[1..].iter() {
write!(f, ",{}", all.res.lr_test).ok()?;
}
// AFLT
write!(f, ":{}", Filter(all_res[0].flt)).ok()?;
for ar in &all_res[1..] {
write!(f, ",{}", Filter(ar.flt)).ok()?;
}
// QAVG
write!(f, ":{:.2}", all_res[0].avg_qual).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2}", ar.avg_qual).ok()?;
}
// FS
write!(f, ":{:.2e}", all_res[0].fisher_strand).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2e}", ar.fisher_strand).ok()?;
}
// QSB
write!(f, ":{:.2e}", all_res[0].wilcox).ok()?;
for ar in &all_res[1..] {
write!(f, ",{:.2e}", ar.wilcox).ok()?;
}
Some(f)
} else {
None
}
}
}
#[derive(Default, Copy, Clone)]
struct Filter(u32);
impl fmt::Display for Filter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 == 0 {
write!(f, "PASS")
} else {
let mut first = true;
let mut x = self.0;
for (s, _) in FLT_STR.iter() {
if (x & 1) == 1 {
if !first {
write!(f, ";{}", s)?
} else {
first = false;
write!(f, "{}", s)?
}
}
x >>= 1;
if x == 0 {
break;
}
}
Ok(())
}
}
}
pub fn write_vcf_header(sam_hdr: &SamHeader, cfg: &Config) -> io::Result<BufWriter<Writer>> {
let vcf_output = format!("{}.vcf", cfg.output_prefix());
let reg = cfg.region();
let mut vcf_wrt = CompressIo::new()
.path(vcf_output)
.ctype(CompressType::Bgzip)
.bufwriter()?;
let sample = cfg.sample().unwrap_or("SAMPLE");
// Write VCF Headers
writeln!(vcf_wrt, "##fileformat=VCFv4.2")?;
writeln!(vcf_wrt, "##contig=<ID={},length={}>", sam_hdr.tid2name(reg.tid()), reg.ctg_size())?;
writeln!(vcf_wrt, "##FILTER=<ID=PASS,Description=\"Site contains at least one allele that passes filters\">")?;
for (s1, s2) in FLT_STR.iter() { writeln!(vcf_wrt, "##FILTER=<ID={},Description=\"{}\">", s1, s2)?; }
writeln!(vcf_wrt, "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=ADF,Number=R,Type=Integer,Description=\"Allelic depths on the forward strand (high-quality bases)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=ADR,Number=R,Type=Integer,Description=\"Allelic depths on the reverse strand (high-quality bases)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=HPL,Number=A,Type=Float,Description=\"Estimate of heteroplasmy frequency for alternate alleles\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=FQSE,Number=R,Type=Float,Description=\"Standard errors of allele frequency estimates per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=AQ,Number=R,Type=Float,Description=\"Phred scaled likelihood ratio for each allele (H0: allele freq is zero)\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=AFLT,Number=R,Type=String,Description=\"Filters per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=FSB,Number=R,Type=Float,Description=\"Fisher test of allele strand bias per allele\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=QAVG,Number=R,Type=Float,Description=\"Average allele base quality scores\">")?;
writeln!(vcf_wrt, "##FORMAT=<ID=QBS,Number=R,Type=Float,Description=\"Mann-Whitney-Wilcoxon test of minor allele base quality scores\">")?;
writeln!(vcf_wrt, "##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw read depth\">")?;
writeln!(vcf_wrt, "##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is an INDEL\">")?;
writeln!(vcf_wrt, "##INFO=<ID=SVTYPE,Number=1,Type=String,Description=\"Type of structural variant\">")?;
writeln!(vcf_wrt, "##INFO=<ID=END,Number=1,Type=Integer,Description=\"End position of structural variant\">")?;
writeln!(vcf_wrt, "##INFO=<ID=SVLEN,Number=1,Type=Integer,Description=\"Difference in length between REF and ALT alleles\">")?;
writeln!(vcf_wrt, "##INFO=<ID=CIPOS,Number=2,Type=Integer,Description=\"95% confidence interval around POS for structural variants\">")?;
writeln!(vcf_wrt, "##INFO=<ID=CILEN,Number=2,Type=Integer,Description=\"95% confidence interval around SVLEN for structural variants\">")?;
writeln!(vcf_wrt, "##ALT=<ID=DEL, Description=\"Deletion\">")?;
writeln!(vcf_wrt, "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{}", sample)?;
Ok(vcf_wrt)
}
| {
let k = ar.ix;
let k1 = major_idx;
let all_cts = [cts[k][0], cts[k1][0], cts[k][1], cts[k1][1]];
let fs = self.ftest.fisher(&all_cts);
if ar.res.freq < 0.75 && fs <= thresh {
flt |= FLT_FS
}
fs
} | conditional_block |
Pak.py | #! /usr/bin/env python3
#-*- coding: UTF-8 -*-
### Legal
#
# Author: Thomas DEBESSE <dev@illwieckz.net>
# License: ISC
#
from Urcheon import Action
from Urcheon import Default
from Urcheon import FileSystem
from Urcheon import Game
from Urcheon import MapCompiler
from Urcheon import Parallelism
from Urcheon import Repository
from Urcheon import Ui
import __main__ as m
import argparse
import logging
import os
import sys
import tempfile
import time
import zipfile
from collections import OrderedDict
from datetime import datetime
from operator import attrgetter
class MultiRunner():
def __init__(self, source_dir_list, args):
self.source_dir_list = source_dir_list
self.args = args
self.runner_dict = {
"prepare": Builder,
"build": Builder,
"package": Packager,
}
def run(self):
cpu_count = Parallelism.countCPU()
runner_thread_list = []
for source_dir in self.source_dir_list:
# FIXME: because of this code Urcheon must run within package set directory
Ui.notice(self.args.stage_name + " from: " + source_dir)
source_dir = os.path.realpath(source_dir)
source_tree = Repository.Tree(source_dir, game_name=self.args.game_name)
runner = self.runner_dict[self.args.stage_name](source_tree, self.args)
if self.args.no_parallel:
runner.run()
else:
runner_thread = Parallelism.Thread(target=runner.run)
runner_thread_list.append(runner_thread)
while len(runner_thread_list) > cpu_count:
# join dead thread early to raise thread exceptions early
# forget ended threads
runner_thread_list = Parallelism.joinDeadThreads(runner_thread_list)
runner_thread.start()
# wait for all remaining threads ending
Parallelism.joinThreads(runner_thread_list)
class Builder():
def __init__(self, source_tree, args, is_nested=False, disabled_action_list=[], file_list=[]):
self.source_tree = source_tree
self.source_dir = source_tree.dir
self.pak_name = source_tree.pak_name
self.pak_format = source_tree.pak_format
self.game_name = source_tree.game_name
self.is_nested = is_nested
self.stage_name = args.stage_name
if is_nested:
self.keep_dust = False
else:
self.keep_dust = args.keep_dust
action_list = Action.List(source_tree, self.stage_name, disabled_action_list=disabled_action_list)
if self.stage_name == "prepare":
self.test_dir = self.source_dir
self.since_reference = None
self.no_auto_actions = False
self.clean_map = False
self.map_profile = None
# FIXME: currently the prepare stage
# can't be parallel (for example SlothRun task
# needs all PrevRun tasks to be finished first)
# btw all packages can be prepared in parallel
self.is_parallel = False
else:
if is_nested:
self.test_dir = args.test_dir
else:
self.test_dir = source_tree.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
if is_nested:
self.since_reference = False
self.no_auto_actions = False
self.clean_map = False
self.map_profile = None
self.is_parallel = not args.no_parallel
else:
self.since_reference = args.since_reference
self.no_auto_actions = args.no_auto_actions
self.clean_map = args.clean_map
self.map_profile = args.map_profile
self.is_parallel = not args.no_parallel
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(self.source_tree, self.test_dir, self.stage_name)
self.deps = Repository.Deps(self.source_tree, self.test_dir)
if not is_nested:
if self.pak_format == "dpk":
deleted_action_list = self.deleted.getActions()
action_list.readActions(action_list=deleted_action_list)
action_list.readActions()
if not file_list:
# FIXME: only if one package?
# same reference for multiple packages
# makes sense when using tags
# NOTE: already prepared file can be seen as source again, but there may be no easy way to solve it
if self.since_reference:
file_repo = Repository.Git(self.source_dir, self.pak_format)
file_list = file_repo.listFilesSinceReference(self.since_reference)
# also look for untracked files
untracked_file_list = file_repo.listUntrackedFiles()
for file_name in untracked_file_list:
if file_name not in file_list:
logging.debug("found untracked file “" + file_name + "”")
# FIXME: next loop will look for prepared files for it, which makes no sense,
# is it harmful?
file_list.append(file_name)
# also look for files produced with “prepare” command
# from files modified since this reference
paktrace = Repository.Paktrace(source_tree, self.source_dir)
input_file_dict = paktrace.getFileDict()["input"]
for file_path in file_list:
logging.debug("looking for prepared files for “" + str(file_path) + "”")
logging.debug("looking for prepared files for “" + file_path + "”")
if file_path in input_file_dict.keys():
for input_file_path in input_file_dict[file_path]:
if not os.path.exists(os.path.join(self.source_dir, input_file_path)):
logging.debug("missing prepared files for “" + file_path + "”: " + input_file_path)
else:
logging.debug("found prepared files for “" + file_path + "”: " + input_file_path)
file_list.append(input_file_path)
else:
file_list = source_tree.listFiles()
if not self.no_auto_actions:
action_list.computeActions(file_list)
self.action_list = action_list
self.game_profile = Game.Game(source_tree)
if not self.map_profile:
map_config = MapCompiler.Config(source_tree)
self.map_profile = map_config.requireDefaultProfile()
def run(self):
if self.source_dir == self.test_dir:
Ui.print("Preparing: " + self.source_dir)
else:
Ui.print("Building “" + self.source_dir + "” as: " + self.test_dir)
# TODO: check if not a directory
if os.path.isdir(self.test_dir):
logging.debug("found build dir: " + self.test_dir)
else:
logging.debug("create build dir: " + self.test_dir)
os.makedirs(self.test_dir, exist_ok=True)
if not self.is_nested and not self.keep_dust:
clean_dust = True
else:
clean_dust = False
if clean_dust:
# do not read paktrace from temporary directories
# do not read paktrace if dust will be kept
paktrace = Repository.Paktrace(self.source_tree, self.test_dir)
previous_file_list = paktrace.listAll()
if self.clean_map or clean_dust:
cleaner = Cleaner(self.source_tree)
if self.clean_map:
cleaner.cleanMap(self.test_dir)
cpu_count = Parallelism.countCPU()
action_thread_list = []
produced_unit_list = []
main_process = Parallelism.getProcess()
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
# no need to use multiprocessing module to manage task contention, since each task will call its own process
# using threads on one core is faster, and it does not prevent tasks to be able to use other cores
# the is_nested argument is there to tell action to not do specific stuff because of recursion
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, map_profile=self.map_profile, is_nested=self.is_nested)
# check if task is already done (usually comparing timestamps the make way)
if action.isDone():
produced_unit_list.extend(action.getOldProducedUnitList())
continue
if not self.is_parallel or not action_type.is_parallel:
# tasks are run sequentially but they can
# use multiple threads themselves
thread_count = cpu_count
else:
# this compute is super slow because of process.children()
child_thread_count = Parallelism.countChildThread(main_process)
thread_count = max(1, cpu_count - child_thread_count)
action.thread_count = thread_count
if not self.is_parallel or not action_type.is_parallel:
# sequential build explicitely requested (like in recursion)
# or action that can't be run concurrently to others (like MergeBsp)
produced_unit_list.extend(action.run())
else:
# do not use >= in case of there is some extra thread we don't think about
# it's better to spawn an extra one than looping forever
while child_thread_count > cpu_count:
# no need to loop at full cpu speed
time.sleep(.05)
child_thread_count = Parallelism.countChildThread(main_process)
pass
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
action.thread_count = max(2, cpu_count - child_thread_count)
# wrapper does: produced_unit_list.extend(action.run())
action_thread = Parallelism.Thread(target=self.threadExtendRes, args=(action.run, (), produced_unit_list))
action_thread_list.append(action_thread)
action_thread.start()
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
# wait for all threads to end, otherwise it will start packaging next
# package while the building task for the current one is not ended
# and well, we now have to read that list to purge old files, so we
# must wait
Parallelism.joinThreads(action_thread_list)
# Handle symbolic links.
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, action_list=self.action_list, map_profile=self.map_profile, is_nested=self.is_nested)
# TODO: check for symbolic link to missing or deleted files.
produced_unit_list.extend(action.symlink())
# deduplication
unit_list = []
deleted_file_list = []
produced_file_list = []
for unit in produced_unit_list:
if unit == []:
continue
logging.debug("unit: " + str(unit))
head = unit["head"]
body = unit["body"]
action = unit["action"]
if action == "ignore":
continue
if action == "delete":
deleted_file_list.append( head )
if head not in produced_file_list:
produced_file_list.append(head)
for part in body:
if part not in produced_file_list:
# FIXME: only if action was not “ignore”
produced_file_list.append(part)
# if multiple calls produce the same files (like merge_bsp)
# FIXME: that can't work, this is probably a leftover
# or we may have to do “if head in body” instead.
# See https://github.com/DaemonEngine/Urcheon/issues/48
if head in unit:
continue
unit_list.append(unit)
produced_unit_list = unit_list
if self.stage_name == "build" and not self.is_nested:
if self.pak_format == "dpk":
is_deleted = False
if self.since_reference:
Ui.laconic("looking for deleted files")
# Unvanquished game did not support DELETED file until after 0.52.1.
workaround_no_delete = self.source_tree.game_name == "unvanquished" and self.since_reference in ["unvanquished/0.52.1", "v0.52.1"]
git_repo = Repository.Git(self.source_dir, "dpk", workaround_no_delete=workaround_no_delete)
previous_version = git_repo.computeVersion(self.since_reference, named_reference=True)
self.deps.set(self.pak_name, previous_version)
for deleted_file in git_repo.getDeletedFileList(self.since_reference):
if deleted_file not in deleted_file_list:
is_deleted = True
deleted_file_list.append(deleted_file)
if deleted_file_list:
is_deleted = True
for deleted_file in deleted_file_list:
self.deleted.set(self.pak_name, deleted_file)
if self.deleted.read():
is_deleted = True
if is_deleted:
deleted_part_list = self.deleted.translate()
# TODO: No need to mark as DELETED a file from the same
# package if it does not depend on itself.
# TODO: A way to not translate DELETED files may be needed
# in some cases.
# If flamer.jpg producing flamer.crn was replaced
# by flamer.png also producing flamer.crn, the
# flamer.crn file will be listed as deleted
# while it will be shipped, but built from another
# source file, so we must check deleted files
# aren't built in other way to avoid listing
# as deleted a file that is actually shipped.
for deleted_part_dict in deleted_part_list:
is_built = False
if deleted_part_dict["pak_name"] == self.pak_name:
deleted_part = deleted_part_dict["file_path"]
if deleted_part.startswith(Default.repository_config_dir + os.path.sep):
continue
if deleted_part.startswith(Default.legacy_pakinfo_dir + os.path.sep):
continue
if deleted_part in produced_file_list:
is_built = True
Ui.laconic(deleted_part + ": do nothing because it is produced by another source file.")
self.deleted.removePart(self.pak_name, deleted_part)
if not is_built:
Ui.laconic(deleted_part + ": will mark as deleted.")
# Writing DELETED file.
for deleted_part in deleted_part_list:
self.deleted.set(self.source_tree.pak_name, deleted_part)
is_deleted = self.deleted.write()
if is_deleted:
unit = {
"head": "DELETED",
"body": [ "DELETED" ],
}
produced_unit_list.append(unit)
else:
# Remove DELETED leftover from partial build.
self.deps.remove(self.test_dir)
is_deps = False
# add itself to DEPS if partial build,
# also look for deleted files
if self.since_reference:
is_deps = True
if self.deps.read():
is_deps = True
if is_deps:
# translating DEPS file
self.deps.translateTest()
self.deps.write()
unit = {
"head": "DEPS",
"body": [ "DEPS" ],
}
produced_unit_list.append(unit)
else:
# Remove DEPS leftover from partial build.
self.deps.remove(self.test_dir)
logging.debug("produced unit list:" + str(produced_unit_list))
# do not clean-up if building from temporary directories
# or if user asked to not clean-up
if clean_dust:
cleaner.cleanDust(self.test_dir, produced_unit_list, previous_file_list)
return produced_unit_list
def threadExtendRes(self, func, args, res):
# magic: only works if res is a mutable object (like a list)
res.extend(func(*args))
class Packager():
# TODO: reuse paktraces, do not walk for file,s
def __init__(self, source_tree, args):
self.source_dir = source_tree.dir
self.pak_vfs = source_tree.pak_vfs
self.pak_config = source_tree.pak_config
self.pak_format = source_tree.pak_format
self.allow_dirty = args.allow_dirty
self.no_compress = args.no_compress
self.test_dir = self.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
self.pak_file = self.pak_config.getPakFile(build_prefix=args.build_prefix, pak_prefix=args.pak_prefix, pak_file=args.pak_file, version_suffix=args.version_suffix)
self.game_profile = Game.Game(source_tree)
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(source_tree, self.test_dir, None)
self.deps = Repository.Deps(source_tree, self.test_dir)
def createSubdirs(self, pak_file):
pak_subdir = os.path.dirname(pak_file)
if pak_subdir == "":
pak_subdir = "."
if os.path.isdir(pak_subdir):
logging.debug("found pak subdir: " + pak_subdir)
else:
logging.debug("create pak subdir: " + pak_subdir)
os.makedirs(pak_subdir, exist_ok=True)
def run(self):
if not os.path.isdir(self.test_dir):
Ui.error("test pakdir not built: " + self.test_dir)
source_repository = Repository.Git(self.source_dir, self.pak_format)
if source_repository.isGit() and source_repository.isDirty():
if self.allow_dirty:
Ui.warning("Dirty repository: " + self.source_dir)
else:
Ui.error("Dirty repository isn't allowed to be packaged (use --allow-dirty to override): " + self.source_dir)
Ui.print("Packaging “" + self.test_dir + "” as: " + self.pak_file)
self.createSubdirs(self.pak_file)
logging.debug("opening: " + self.pak_file)
# remove existing file (do not write in place) to force the game engine to reread the file
if os.path.isfile(self.pak_file):
logging.debug("remove existing package: " + self.pak_file)
os.remove(self.pak_file)
if self.no_compress:
# why zlib.Z_NO_COMPRESSION not defined?
zipfile.zlib.Z_DEFAULT_COMPRESSION = 0
else:
# maximum compression
zipfile.zlib.Z_DEFAULT_COMPRESSION = zipfile.zlib.Z_BEST_COMPRESSION
found_file = False
paktrace_dir = Default.getPakTraceDir(self.test_dir)
relative_paktrace_dir = os.path.relpath(paktrace_dir, self.test_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
for file_name in file_name_list:
found_file = True
break
if found_file:
break
# FIXME: if only the DEPS file is modified, the package will
# not be created (it should be).
if not found_file:
Ui.print("Not writing empty package: " + self.pak_file)
return
pak = zipfile.ZipFile(self.pak_file, "w", zipfile.ZIP_DEFLATED)
for dir_name, subdir_name_list, file_name_list in os.walk(self.test_dir):
for file_name in file_name_list:
rel_dir_name = os.path.relpath(dir_name, self.test_dir)
full_path = os.path.join(dir_name, file_name)
file_path = os.path.relpath(full_path, self.test_dir)
# ignore paktrace files
if file_path.startswith(relative_paktrace_dir + os.path.sep):
continue
# ignore DELETED and DEPS file, will add it later
if self.pak_format == "dpk" and file_path in Repository.dpk_special_files:
continue
found_file = True
# TODO: add a mechanism to know if VFS supports
# symbolic links in packages or not.
# Dæmon's DPK VFS is supporting symbolic links.
# DarkPlaces' PK3 VFS is supporting symbolic links.
# Others may not.
is_symlink_supported = True
if is_symlink_supported and os.path.islink(full_path):
Ui.print("add symlink to package " + os.path.basename(self.pak_file) + ": " + file_path)
# TODO: Remove this test when Urcheon deletes extra
# files in build directory. Currently a deleted but not |
# See https://stackoverflow.com/a/61795576/9131399
attrs = ('year', 'month', 'day', 'hour', 'minute', 'second')
file_date_time_tuple = attrgetter(*attrs)(file_date_time)
# See https://stackoverflow.com/a/60691331/9131399
zip_info = zipfile.ZipInfo(file_path, date_time=file_date_time_tuple)
zip_info.create_system = 3
file_permissions = 0o777
file_permissions |= 0xA000
zip_info.external_attr = file_permissions << 16
target_path = os.readlink(full_path)
pak.writestr(zip_info, target_path)
else:
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": " + file_path)
pak.write(full_path, arcname=file_path)
if self.pak_format == "dpk":
# Writing DELETED file.
deleted_file_path = self.deleted.get_test_path()
if os.path.isfile(deleted_file_path):
pak.write(deleted_file_path, arcname="DELETED")
# Translating DEPS file.
if self.deps.read(deps_dir=self.test_dir):
self.deps.translateRelease(self.pak_vfs)
deps_temp_dir = tempfile.mkdtemp()
deps_temp_file = self.deps.write(deps_dir=deps_temp_dir)
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": DEPS")
pak.write(deps_temp_file, arcname="DEPS")
logging.debug("close: " + self.pak_file)
pak.close()
if source_repository.isGit():
repo_date = int(source_repository.getDate("HEAD"))
os.utime(self.pak_file, (repo_date, repo_date))
Ui.laconic("Package written: " + self.pak_file)
class Cleaner():
def __init__(self, source_tree):
self.pak_name = source_tree.pak_name
self.game_profile = Game.Game(source_tree)
def cleanTest(self, test_dir):
for dir_name, subdir_name_list, file_name_list in os.walk(test_dir):
for file_name in file_name_list:
that_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + that_file)
os.remove(that_file)
FileSystem.removeEmptyDir(dir_name)
for dir_name in subdir_name_list:
that_dir = dir_name + os.path.sep + dir_name
FileSystem.removeEmptyDir(that_dir)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(test_dir)
def cleanPak(self, pak_prefix):
for dir_name, subdir_name_list, file_name_list in os.walk(pak_prefix):
for file_name in file_name_list:
if file_name.startswith(self.pak_name) and file_name.endswith(self.game_profile.pak_ext):
pak_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + pak_file)
os.remove(pak_file)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(pak_prefix)
def cleanMap(self, test_dir):
# TODO: use paktrace abilities?
for dir_name, subdir_name_list, file_name_list in os.walk(test_dir):
for file_name in file_name_list:
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "bsp"):
bsp_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + bsp_file)
os.remove(bsp_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "map"):
map_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + map_file)
os.remove(map_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-2:-1] == ["maps"] and file_name.startswith("lm_"):
lightmap_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + lightmap_file)
os.remove(lightmap_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "navMesh"):
navmesh_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + navmesh_file)
os.remove(navmesh_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["minimaps"]:
minimap_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + minimap_file)
os.remove(minimap_file)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(test_dir)
def cleanDust(self, test_dir, produced_unit_list, previous_file_list):
# TODO: remove extra files that are not tracked in paktraces?
# FIXME: reuse produced_file_list from build()
produced_file_list = []
head_list = []
for unit in produced_unit_list:
head_list.append(unit["head"])
produced_file_list.extend(unit["body"])
for file_name in previous_file_list:
if file_name not in produced_file_list:
dust_file_path = os.path.normpath(os.path.join(test_dir, file_name))
Ui.laconic("clean dust file: " + file_name)
dust_file_fullpath = os.path.realpath(dust_file_path)
if not os.path.isfile(dust_file_fullpath):
# if you're there, it's because you are debugging a crash
continue
FileSystem.cleanRemoveFile(dust_file_fullpath)
paktrace_dir = Default.getPakTraceDir(test_dir)
if os.path.isdir(paktrace_dir):
logging.debug("look for dust in directory: " + paktrace_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
dir_name = os.path.relpath(dir_name, test_dir)
logging.debug("found paktrace dir: " + dir_name)
for file_name in file_name_list:
file_path = os.path.join(dir_name, file_name)
file_path = os.path.normpath(file_path)
relative_paktrace_dir = os.path.relpath(paktrace_dir, test_dir)
trace_file = os.path.relpath(file_path, relative_paktrace_dir)
head_name=trace_file[:-len(Default.paktrace_file_ext)]
if head_name not in head_list:
Ui.print("clean dust paktrace: " + file_path)
dust_paktrace_path = os.path.normpath(os.path.join(test_dir, file_path))
dust_paktrace_fullpath = os.path.realpath(dust_paktrace_path)
FileSystem.cleanRemoveFile(dust_paktrace_fullpath) | # committed file is kept.
if os.path.exists(full_path):
# FIXME: getmtime reads realpath datetime, not symbolic link datetime.
file_date_time = (datetime.fromtimestamp(os.path.getmtime(full_path))) | random_line_split |
Pak.py | #! /usr/bin/env python3
#-*- coding: UTF-8 -*-
### Legal
#
# Author: Thomas DEBESSE <dev@illwieckz.net>
# License: ISC
#
from Urcheon import Action
from Urcheon import Default
from Urcheon import FileSystem
from Urcheon import Game
from Urcheon import MapCompiler
from Urcheon import Parallelism
from Urcheon import Repository
from Urcheon import Ui
import __main__ as m
import argparse
import logging
import os
import sys
import tempfile
import time
import zipfile
from collections import OrderedDict
from datetime import datetime
from operator import attrgetter
class MultiRunner():
def __init__(self, source_dir_list, args):
self.source_dir_list = source_dir_list
self.args = args
self.runner_dict = {
"prepare": Builder,
"build": Builder,
"package": Packager,
}
def run(self):
cpu_count = Parallelism.countCPU()
runner_thread_list = []
for source_dir in self.source_dir_list:
# FIXME: because of this code Urcheon must run within package set directory
Ui.notice(self.args.stage_name + " from: " + source_dir)
source_dir = os.path.realpath(source_dir)
source_tree = Repository.Tree(source_dir, game_name=self.args.game_name)
runner = self.runner_dict[self.args.stage_name](source_tree, self.args)
if self.args.no_parallel:
runner.run()
else:
runner_thread = Parallelism.Thread(target=runner.run)
runner_thread_list.append(runner_thread)
while len(runner_thread_list) > cpu_count:
# join dead thread early to raise thread exceptions early
# forget ended threads
runner_thread_list = Parallelism.joinDeadThreads(runner_thread_list)
runner_thread.start()
# wait for all remaining threads ending
Parallelism.joinThreads(runner_thread_list)
class Builder():
def __init__(self, source_tree, args, is_nested=False, disabled_action_list=[], file_list=[]):
self.source_tree = source_tree
self.source_dir = source_tree.dir
self.pak_name = source_tree.pak_name
self.pak_format = source_tree.pak_format
self.game_name = source_tree.game_name
self.is_nested = is_nested
self.stage_name = args.stage_name
if is_nested:
self.keep_dust = False
else:
self.keep_dust = args.keep_dust
action_list = Action.List(source_tree, self.stage_name, disabled_action_list=disabled_action_list)
if self.stage_name == "prepare":
self.test_dir = self.source_dir
self.since_reference = None
self.no_auto_actions = False
self.clean_map = False
self.map_profile = None
# FIXME: currently the prepare stage
# can't be parallel (for example SlothRun task
# needs all PrevRun tasks to be finished first)
# btw all packages can be prepared in parallel
self.is_parallel = False
else:
if is_nested:
self.test_dir = args.test_dir
else:
self.test_dir = source_tree.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
if is_nested:
self.since_reference = False
self.no_auto_actions = False
self.clean_map = False
self.map_profile = None
self.is_parallel = not args.no_parallel
else:
self.since_reference = args.since_reference
self.no_auto_actions = args.no_auto_actions
self.clean_map = args.clean_map
self.map_profile = args.map_profile
self.is_parallel = not args.no_parallel
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(self.source_tree, self.test_dir, self.stage_name)
self.deps = Repository.Deps(self.source_tree, self.test_dir)
if not is_nested:
if self.pak_format == "dpk":
deleted_action_list = self.deleted.getActions()
action_list.readActions(action_list=deleted_action_list)
action_list.readActions()
if not file_list:
# FIXME: only if one package?
# same reference for multiple packages
# makes sense when using tags
# NOTE: already prepared file can be seen as source again, but there may be no easy way to solve it
if self.since_reference:
file_repo = Repository.Git(self.source_dir, self.pak_format)
file_list = file_repo.listFilesSinceReference(self.since_reference)
# also look for untracked files
untracked_file_list = file_repo.listUntrackedFiles()
for file_name in untracked_file_list:
if file_name not in file_list:
logging.debug("found untracked file “" + file_name + "”")
# FIXME: next loop will look for prepared files for it, which makes no sense,
# is it harmful?
file_list.append(file_name)
# also look for files produced with “prepare” command
# from files modified since this reference
paktrace = Repository.Paktrace(source_tree, self.source_dir)
input_file_dict = paktrace.getFileDict()["input"]
for file_path in file_list:
logging.debug("looking for prepared files for “" + str(file_path) + "”")
logging.debug("looking for prepared files for “" + file_path + "”")
if file_path in input_file_dict.keys():
for input_file_path in input_file_dict[file_path]:
if not os.path.exists(os.path.join(self.source_dir, input_file_path)):
logging.debug("missing prepared files for “" + file_path + "”: " + input_file_path)
else:
logging.debug("found prepared files for “" + file_path + "”: " + input_file_path)
file_list.append(input_file_path)
else:
file_list = source_tree. | actions:
action_list.computeActions(file_list)
self.action_list = action_list
self.game_profile = Game.Game(source_tree)
if not self.map_profile:
map_config = MapCompiler.Config(source_tree)
self.map_profile = map_config.requireDefaultProfile()
def run(self):
if self.source_dir == self.test_dir:
Ui.print("Preparing: " + self.source_dir)
else:
Ui.print("Building “" + self.source_dir + "” as: " + self.test_dir)
# TODO: check if not a directory
if os.path.isdir(self.test_dir):
logging.debug("found build dir: " + self.test_dir)
else:
logging.debug("create build dir: " + self.test_dir)
os.makedirs(self.test_dir, exist_ok=True)
if not self.is_nested and not self.keep_dust:
clean_dust = True
else:
clean_dust = False
if clean_dust:
# do not read paktrace from temporary directories
# do not read paktrace if dust will be kept
paktrace = Repository.Paktrace(self.source_tree, self.test_dir)
previous_file_list = paktrace.listAll()
if self.clean_map or clean_dust:
cleaner = Cleaner(self.source_tree)
if self.clean_map:
cleaner.cleanMap(self.test_dir)
cpu_count = Parallelism.countCPU()
action_thread_list = []
produced_unit_list = []
main_process = Parallelism.getProcess()
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
# no need to use multiprocessing module to manage task contention, since each task will call its own process
# using threads on one core is faster, and it does not prevent tasks to be able to use other cores
# the is_nested argument is there to tell action to not do specific stuff because of recursion
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, map_profile=self.map_profile, is_nested=self.is_nested)
# check if task is already done (usually comparing timestamps the make way)
if action.isDone():
produced_unit_list.extend(action.getOldProducedUnitList())
continue
if not self.is_parallel or not action_type.is_parallel:
# tasks are run sequentially but they can
# use multiple threads themselves
thread_count = cpu_count
else:
# this compute is super slow because of process.children()
child_thread_count = Parallelism.countChildThread(main_process)
thread_count = max(1, cpu_count - child_thread_count)
action.thread_count = thread_count
if not self.is_parallel or not action_type.is_parallel:
# sequential build explicitely requested (like in recursion)
# or action that can't be run concurrently to others (like MergeBsp)
produced_unit_list.extend(action.run())
else:
# do not use >= in case of there is some extra thread we don't think about
# it's better to spawn an extra one than looping forever
while child_thread_count > cpu_count:
# no need to loop at full cpu speed
time.sleep(.05)
child_thread_count = Parallelism.countChildThread(main_process)
pass
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
action.thread_count = max(2, cpu_count - child_thread_count)
# wrapper does: produced_unit_list.extend(action.run())
action_thread = Parallelism.Thread(target=self.threadExtendRes, args=(action.run, (), produced_unit_list))
action_thread_list.append(action_thread)
action_thread.start()
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
# wait for all threads to end, otherwise it will start packaging next
# package while the building task for the current one is not ended
# and well, we now have to read that list to purge old files, so we
# must wait
Parallelism.joinThreads(action_thread_list)
# Handle symbolic links.
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, action_list=self.action_list, map_profile=self.map_profile, is_nested=self.is_nested)
# TODO: check for symbolic link to missing or deleted files.
produced_unit_list.extend(action.symlink())
# deduplication
unit_list = []
deleted_file_list = []
produced_file_list = []
for unit in produced_unit_list:
if unit == []:
continue
logging.debug("unit: " + str(unit))
head = unit["head"]
body = unit["body"]
action = unit["action"]
if action == "ignore":
continue
if action == "delete":
deleted_file_list.append( head )
if head not in produced_file_list:
produced_file_list.append(head)
for part in body:
if part not in produced_file_list:
# FIXME: only if action was not “ignore”
produced_file_list.append(part)
# if multiple calls produce the same files (like merge_bsp)
# FIXME: that can't work, this is probably a leftover
# or we may have to do “if head in body” instead.
# See https://github.com/DaemonEngine/Urcheon/issues/48
if head in unit:
continue
unit_list.append(unit)
produced_unit_list = unit_list
if self.stage_name == "build" and not self.is_nested:
if self.pak_format == "dpk":
is_deleted = False
if self.since_reference:
Ui.laconic("looking for deleted files")
# Unvanquished game did not support DELETED file until after 0.52.1.
workaround_no_delete = self.source_tree.game_name == "unvanquished" and self.since_reference in ["unvanquished/0.52.1", "v0.52.1"]
git_repo = Repository.Git(self.source_dir, "dpk", workaround_no_delete=workaround_no_delete)
previous_version = git_repo.computeVersion(self.since_reference, named_reference=True)
self.deps.set(self.pak_name, previous_version)
for deleted_file in git_repo.getDeletedFileList(self.since_reference):
if deleted_file not in deleted_file_list:
is_deleted = True
deleted_file_list.append(deleted_file)
if deleted_file_list:
is_deleted = True
for deleted_file in deleted_file_list:
self.deleted.set(self.pak_name, deleted_file)
if self.deleted.read():
is_deleted = True
if is_deleted:
deleted_part_list = self.deleted.translate()
# TODO: No need to mark as DELETED a file from the same
# package if it does not depend on itself.
# TODO: A way to not translate DELETED files may be needed
# in some cases.
# If flamer.jpg producing flamer.crn was replaced
# by flamer.png also producing flamer.crn, the
# flamer.crn file will be listed as deleted
# while it will be shipped, but built from another
# source file, so we must check deleted files
# aren't built in other way to avoid listing
# as deleted a file that is actually shipped.
for deleted_part_dict in deleted_part_list:
is_built = False
if deleted_part_dict["pak_name"] == self.pak_name:
deleted_part = deleted_part_dict["file_path"]
if deleted_part.startswith(Default.repository_config_dir + os.path.sep):
continue
if deleted_part.startswith(Default.legacy_pakinfo_dir + os.path.sep):
continue
if deleted_part in produced_file_list:
is_built = True
Ui.laconic(deleted_part + ": do nothing because it is produced by another source file.")
self.deleted.removePart(self.pak_name, deleted_part)
if not is_built:
Ui.laconic(deleted_part + ": will mark as deleted.")
# Writing DELETED file.
for deleted_part in deleted_part_list:
self.deleted.set(self.source_tree.pak_name, deleted_part)
is_deleted = self.deleted.write()
if is_deleted:
unit = {
"head": "DELETED",
"body": [ "DELETED" ],
}
produced_unit_list.append(unit)
else:
# Remove DELETED leftover from partial build.
self.deps.remove(self.test_dir)
is_deps = False
# add itself to DEPS if partial build,
# also look for deleted files
if self.since_reference:
is_deps = True
if self.deps.read():
is_deps = True
if is_deps:
# translating DEPS file
self.deps.translateTest()
self.deps.write()
unit = {
"head": "DEPS",
"body": [ "DEPS" ],
}
produced_unit_list.append(unit)
else:
# Remove DEPS leftover from partial build.
self.deps.remove(self.test_dir)
logging.debug("produced unit list:" + str(produced_unit_list))
# do not clean-up if building from temporary directories
# or if user asked to not clean-up
if clean_dust:
cleaner.cleanDust(self.test_dir, produced_unit_list, previous_file_list)
return produced_unit_list
def threadExtendRes(self, func, args, res):
# magic: only works if res is a mutable object (like a list)
res.extend(func(*args))
class Packager():
# TODO: reuse paktraces, do not walk for file,s
def __init__(self, source_tree, args):
self.source_dir = source_tree.dir
self.pak_vfs = source_tree.pak_vfs
self.pak_config = source_tree.pak_config
self.pak_format = source_tree.pak_format
self.allow_dirty = args.allow_dirty
self.no_compress = args.no_compress
self.test_dir = self.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
self.pak_file = self.pak_config.getPakFile(build_prefix=args.build_prefix, pak_prefix=args.pak_prefix, pak_file=args.pak_file, version_suffix=args.version_suffix)
self.game_profile = Game.Game(source_tree)
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(source_tree, self.test_dir, None)
self.deps = Repository.Deps(source_tree, self.test_dir)
def createSubdirs(self, pak_file):
pak_subdir = os.path.dirname(pak_file)
if pak_subdir == "":
pak_subdir = "."
if os.path.isdir(pak_subdir):
logging.debug("found pak subdir: " + pak_subdir)
else:
logging.debug("create pak subdir: " + pak_subdir)
os.makedirs(pak_subdir, exist_ok=True)
def run(self):
if not os.path.isdir(self.test_dir):
Ui.error("test pakdir not built: " + self.test_dir)
source_repository = Repository.Git(self.source_dir, self.pak_format)
if source_repository.isGit() and source_repository.isDirty():
if self.allow_dirty:
Ui.warning("Dirty repository: " + self.source_dir)
else:
Ui.error("Dirty repository isn't allowed to be packaged (use --allow-dirty to override): " + self.source_dir)
Ui.print("Packaging “" + self.test_dir + "” as: " + self.pak_file)
self.createSubdirs(self.pak_file)
logging.debug("opening: " + self.pak_file)
# remove existing file (do not write in place) to force the game engine to reread the file
if os.path.isfile(self.pak_file):
logging.debug("remove existing package: " + self.pak_file)
os.remove(self.pak_file)
if self.no_compress:
# why zlib.Z_NO_COMPRESSION not defined?
zipfile.zlib.Z_DEFAULT_COMPRESSION = 0
else:
# maximum compression
zipfile.zlib.Z_DEFAULT_COMPRESSION = zipfile.zlib.Z_BEST_COMPRESSION
found_file = False
paktrace_dir = Default.getPakTraceDir(self.test_dir)
relative_paktrace_dir = os.path.relpath(paktrace_dir, self.test_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
for file_name in file_name_list:
found_file = True
break
if found_file:
break
# FIXME: if only the DEPS file is modified, the package will
# not be created (it should be).
if not found_file:
Ui.print("Not writing empty package: " + self.pak_file)
return
pak = zipfile.ZipFile(self.pak_file, "w", zipfile.ZIP_DEFLATED)
for dir_name, subdir_name_list, file_name_list in os.walk(self.test_dir):
for file_name in file_name_list:
rel_dir_name = os.path.relpath(dir_name, self.test_dir)
full_path = os.path.join(dir_name, file_name)
file_path = os.path.relpath(full_path, self.test_dir)
# ignore paktrace files
if file_path.startswith(relative_paktrace_dir + os.path.sep):
continue
# ignore DELETED and DEPS file, will add it later
if self.pak_format == "dpk" and file_path in Repository.dpk_special_files:
continue
found_file = True
# TODO: add a mechanism to know if VFS supports
# symbolic links in packages or not.
# Dæmon's DPK VFS is supporting symbolic links.
# DarkPlaces' PK3 VFS is supporting symbolic links.
# Others may not.
is_symlink_supported = True
if is_symlink_supported and os.path.islink(full_path):
Ui.print("add symlink to package " + os.path.basename(self.pak_file) + ": " + file_path)
# TODO: Remove this test when Urcheon deletes extra
# files in build directory. Currently a deleted but not
# committed file is kept.
if os.path.exists(full_path):
# FIXME: getmtime reads realpath datetime, not symbolic link datetime.
file_date_time = (datetime.fromtimestamp(os.path.getmtime(full_path)))
# See https://stackoverflow.com/a/61795576/9131399
attrs = ('year', 'month', 'day', 'hour', 'minute', 'second')
file_date_time_tuple = attrgetter(*attrs)(file_date_time)
# See https://stackoverflow.com/a/60691331/9131399
zip_info = zipfile.ZipInfo(file_path, date_time=file_date_time_tuple)
zip_info.create_system = 3
file_permissions = 0o777
file_permissions |= 0xA000
zip_info.external_attr = file_permissions << 16
target_path = os.readlink(full_path)
pak.writestr(zip_info, target_path)
else:
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": " + file_path)
pak.write(full_path, arcname=file_path)
if self.pak_format == "dpk":
# Writing DELETED file.
deleted_file_path = self.deleted.get_test_path()
if os.path.isfile(deleted_file_path):
pak.write(deleted_file_path, arcname="DELETED")
# Translating DEPS file.
if self.deps.read(deps_dir=self.test_dir):
self.deps.translateRelease(self.pak_vfs)
deps_temp_dir = tempfile.mkdtemp()
deps_temp_file = self.deps.write(deps_dir=deps_temp_dir)
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": DEPS")
pak.write(deps_temp_file, arcname="DEPS")
logging.debug("close: " + self.pak_file)
pak.close()
if source_repository.isGit():
repo_date = int(source_repository.getDate("HEAD"))
os.utime(self.pak_file, (repo_date, repo_date))
Ui.laconic("Package written: " + self.pak_file)
class Cleaner():
def __init__(self, source_tree):
self.pak_name = source_tree.pak_name
self.game_profile = Game.Game(source_tree)
def cleanTest(self, test_dir):
for dir_name, subdir_name_list, file_name_list in os.walk(test_dir):
for file_name in file_name_list:
that_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + that_file)
os.remove(that_file)
FileSystem.removeEmptyDir(dir_name)
for dir_name in subdir_name_list:
that_dir = dir_name + os.path.sep + dir_name
FileSystem.removeEmptyDir(that_dir)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(test_dir)
def cleanPak(self, pak_prefix):
for dir_name, subdir_name_list, file_name_list in os.walk(pak_prefix):
for file_name in file_name_list:
if file_name.startswith(self.pak_name) and file_name.endswith(self.game_profile.pak_ext):
pak_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + pak_file)
os.remove(pak_file)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(pak_prefix)
def cleanMap(self, test_dir):
# TODO: use paktrace abilities?
for dir_name, subdir_name_list, file_name_list in os.walk(test_dir):
for file_name in file_name_list:
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "bsp"):
bsp_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + bsp_file)
os.remove(bsp_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "map"):
map_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + map_file)
os.remove(map_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-2:-1] == ["maps"] and file_name.startswith("lm_"):
lightmap_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + lightmap_file)
os.remove(lightmap_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "navMesh"):
navmesh_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + navmesh_file)
os.remove(navmesh_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["minimaps"]:
minimap_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + minimap_file)
os.remove(minimap_file)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(test_dir)
def cleanDust(self, test_dir, produced_unit_list, previous_file_list):
# TODO: remove extra files that are not tracked in paktraces?
# FIXME: reuse produced_file_list from build()
produced_file_list = []
head_list = []
for unit in produced_unit_list:
head_list.append(unit["head"])
produced_file_list.extend(unit["body"])
for file_name in previous_file_list:
if file_name not in produced_file_list:
dust_file_path = os.path.normpath(os.path.join(test_dir, file_name))
Ui.laconic("clean dust file: " + file_name)
dust_file_fullpath = os.path.realpath(dust_file_path)
if not os.path.isfile(dust_file_fullpath):
# if you're there, it's because you are debugging a crash
continue
FileSystem.cleanRemoveFile(dust_file_fullpath)
paktrace_dir = Default.getPakTraceDir(test_dir)
if os.path.isdir(paktrace_dir):
logging.debug("look for dust in directory: " + paktrace_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
dir_name = os.path.relpath(dir_name, test_dir)
logging.debug("found paktrace dir: " + dir_name)
for file_name in file_name_list:
file_path = os.path.join(dir_name, file_name)
file_path = os.path.normpath(file_path)
relative_paktrace_dir = os.path.relpath(paktrace_dir, test_dir)
trace_file = os.path.relpath(file_path, relative_paktrace_dir)
head_name=trace_file[:-len(Default.paktrace_file_ext)]
if head_name not in head_list:
Ui.print("clean dust paktrace: " + file_path)
dust_paktrace_path = os.path.normpath(os.path.join(test_dir, file_path))
dust_paktrace_fullpath = os.path.realpath(dust_paktrace_path)
FileSystem.cleanRemoveFile(dust_paktrace_fullpath)
| listFiles()
if not self.no_auto_ | conditional_block |
Pak.py | #! /usr/bin/env python3
#-*- coding: UTF-8 -*-
### Legal
#
# Author: Thomas DEBESSE <dev@illwieckz.net>
# License: ISC
#
from Urcheon import Action
from Urcheon import Default
from Urcheon import FileSystem
from Urcheon import Game
from Urcheon import MapCompiler
from Urcheon import Parallelism
from Urcheon import Repository
from Urcheon import Ui
import __main__ as m
import argparse
import logging
import os
import sys
import tempfile
import time
import zipfile
from collections import OrderedDict
from datetime import datetime
from operator import attrgetter
class MultiRunner():
def __init__(self, source_dir_list, args):
self.source_dir_list = source_dir_list
self.args = args
self.runner_dict = {
"prepare": Builder,
"build": Builder,
"package": Packager,
}
def run(self):
cpu_count = Parallelism.countCPU()
runner_thread_list = []
for source_dir in self.source_dir_list:
# FIXME: because of this code Urcheon must run within package set directory
Ui.notice(self.args.stage_name + " from: " + source_dir)
source_dir = os.path.realpath(source_dir)
source_tree = Repository.Tree(source_dir, game_name=self.args.game_name)
runner = self.runner_dict[self.args.stage_name](source_tree, self.args)
if self.args.no_parallel:
runner.run()
else:
runner_thread = Parallelism.Thread(target=runner.run)
runner_thread_list.append(runner_thread)
while len(runner_thread_list) > cpu_count:
# join dead thread early to raise thread exceptions early
# forget ended threads
runner_thread_list = Parallelism.joinDeadThreads(runner_thread_list)
runner_thread.start()
# wait for all remaining threads ending
Parallelism.joinThreads(runner_thread_list)
class Builder():
def __init__(self, source_tree, args, is_nested=False, disabled_action_list=[], file_list=[]):
self.source_tree = source_tree
self.source_dir = source_tree.dir
self.pak_name = source_tree.pak_name
self.pak_format = source_tree.pak_format
self.game_name = source_tree.game_name
self.is_nested = is_nested
self.stage_name = args.stage_name
if is_nested:
self.keep_dust = False
else:
self.keep_dust = args.keep_dust
action_list = Action.List(source_tree, self.stage_name, disabled_action_list=disabled_action_list)
if self.stage_name == "prepare":
self.test_dir = self.source_dir
self.since_reference = None
self.no_auto_actions = False
self.clean_map = False
self.map_profile = None
# FIXME: currently the prepare stage
# can't be parallel (for example SlothRun task
# needs all PrevRun tasks to be finished first)
# btw all packages can be prepared in parallel
self.is_parallel = False
else:
if is_nested:
self.test_dir = args.test_dir
else:
self.test_dir = source_tree.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
if is_nested:
self.since_reference = False
self.no_auto_actions = False
self.clean_map = False
self.map_profile = None
self.is_parallel = not args.no_parallel
else:
self.since_reference = args.since_reference
self.no_auto_actions = args.no_auto_actions
self.clean_map = args.clean_map
self.map_profile = args.map_profile
self.is_parallel = not args.no_parallel
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(self.source_tree, self.test_dir, self.stage_name)
self.deps = Repository.Deps(self.source_tree, self.test_dir)
if not is_nested:
if self.pak_format == "dpk":
deleted_action_list = self.deleted.getActions()
action_list.readActions(action_list=deleted_action_list)
action_list.readActions()
if not file_list:
# FIXME: only if one package?
# same reference for multiple packages
# makes sense when using tags
# NOTE: already prepared file can be seen as source again, but there may be no easy way to solve it
if self.since_reference:
file_repo = Repository.Git(self.source_dir, self.pak_format)
file_list = file_repo.listFilesSinceReference(self.since_reference)
# also look for untracked files
untracked_file_list = file_repo.listUntrackedFiles()
for file_name in untracked_file_list:
if file_name not in file_list:
logging.debug("found untracked file “" + file_name + "”")
# FIXME: next loop will look for prepared files for it, which makes no sense,
# is it harmful?
file_list.append(file_name)
# also look for files produced with “prepare” command
# from files modified since this reference
paktrace = Repository.Paktrace(source_tree, self.source_dir)
input_file_dict = paktrace.getFileDict()["input"]
for file_path in file_list:
logging.debug("looking for prepared files for “" + str(file_path) + "”")
logging.debug("looking for prepared files for “" + file_path + "”")
if file_path in input_file_dict.keys():
for input_file_path in input_file_dict[file_path]:
if not os.path.exists(os.path.join(self.source_dir, input_file_path)):
logging.debug("missing prepared files for “" + file_path + "”: " + input_file_path)
else:
logging.debug("found prepared files for “" + file_path + "”: " + input_file_path)
file_list.append(input_file_path)
else:
file_list = source_tree.listFiles()
if not self.no_auto_actions:
action_list.computeActions(file_list)
self.action_list = action_list
self.game_profile = Game.Game(source_tree)
if not self.map_profile:
map_config = MapCompiler.Config(source_tree)
self.map_profile = map_config.requireDefaultProfile()
def run(self):
if self.source_dir == self.test_dir:
Ui.print("Preparing: " + self.source_dir)
else:
Ui.print("Building “" + self.source_dir + "” as: " + self.test_dir)
# TODO: check if not a directory
if os.path.isdir(self.test_dir):
logging.debug("found build dir: " + self.test_dir)
else:
logging.debug("create build dir: " + self.test_dir)
os.makedirs(self.test_dir, exist_ok=True)
if not self.is_nested and not self.keep_dust:
clean_dust = True
else:
clean_dust = False
if clean_dust:
# do not read paktrace from temporary directories
# do not read paktrace if dust will be kept
paktrace = Repository.Paktrace(self.source_tree, self.test_dir)
previous_file_list = paktrace.listAll()
if self.clean_map or clean_dust:
cleaner = Cleaner(self.source_tree)
if self.clean_map:
cleaner.cleanMap(self.test_dir)
cpu_count = Parallelism.countCPU()
action_thread_list = []
produced_unit_list = []
main_process = Parallelism.getProcess()
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
# no need to use multiprocessing module to manage task contention, since each task will call its own process
# using threads on one core is faster, and it does not prevent tasks to be able to use other cores
# the is_nested argument is there to tell action to not do specific stuff because of recursion
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, map_profile=self.map_profile, is_nested=self.is_nested)
# check if task is already done (usually comparing timestamps the make way)
if action.isDone():
produced_unit_list.extend(action.getOldProducedUnitList())
continue
if not self.is_parallel or not action_type.is_parallel:
# tasks are run sequentially but they can
# use multiple threads themselves
thread_count = cpu_count
else:
# this compute is super slow because of process.children()
child_thread_count = Parallelism.countChildThread(main_process)
thread_count = max(1, cpu_count - child_thread_count)
action.thread_count = thread_count
if not self.is_parallel or not action_type.is_parallel:
# sequential build explicitely requested (like in recursion)
# or action that can't be run concurrently to others (like MergeBsp)
produced_unit_list.extend(action.run())
else:
# do not use >= in case of there is some extra thread we don't think about
# it's better to spawn an extra one than looping forever
while child_thread_count > cpu_count:
# no need to loop at full cpu speed
time.sleep(.05)
child_thread_count = Parallelism.countChildThread(main_process)
pass
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
action.thread_count = max(2, cpu_count - child_thread_count)
# wrapper does: produced_unit_list.extend(action.run())
action_thread = Parallelism.Thread(target=self.threadExtendRes, args=(action.run, (), produced_unit_list))
action_thread_list.append(action_thread)
action_thread.start()
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
# wait for all threads to end, otherwise it will start packaging next
# package while the building task for the current one is not ended
# and well, we now have to read that list to purge old files, so we
# must wait
Parallelism.joinThreads(action_thread_list)
# Handle symbolic links.
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, action_list=self.action_list, map_profile=self.map_profile, is_nested=self.is_nested)
# TODO: check for symbolic link to missing or deleted files.
produced_unit_list.extend(action.symlink())
# deduplication
unit_list = []
deleted_file_list = []
produced_file_list = []
for unit in produced_unit_list:
if unit == []:
continue
logging.debug("unit: " + str(unit))
head = unit["head"]
body = unit["body"]
action = unit["action"]
if action == "ignore":
continue
if action == "delete":
deleted_file_list.append( head )
if head not in produced_file_list:
produced_file_list.append(head)
for part in body:
if part not in produced_file_list:
# FIXME: only if action was not “ignore”
produced_file_list.append(part)
# if multiple calls produce the same files (like merge_bsp)
# FIXME: that can't work, this is probably a leftover
# or we may have to do “if head in body” instead.
# See https://github.com/DaemonEngine/Urcheon/issues/48
if head in unit:
continue
unit_list.append(unit)
produced_unit_list = unit_list
if self.stage_name == "build" and not self.is_nested:
if self.pak_format == "dpk":
is_deleted = False
if self.since_reference:
Ui.laconic("looking for deleted files")
# Unvanquished game did not support DELETED file until after 0.52.1.
workaround_no_delete = self.source_tree.game_name == "unvanquished" and self.since_reference in ["unvanquished/0.52.1", "v0.52.1"]
git_repo = Repository.Git(self.source_dir, "dpk", workaround_no_delete=workaround_no_delete)
previous_version = git_repo.computeVersion(self.since_reference, named_reference=True)
self.deps.set(self.pak_name, previous_version)
for deleted_file in git_repo.getDeletedFileList(self.since_reference):
if deleted_file not in deleted_file_list:
is_deleted = True
deleted_file_list.append(deleted_file)
if deleted_file_list:
is_deleted = True
for deleted_file in deleted_file_list:
self.deleted.set(self.pak_name, deleted_file)
if self.deleted.read():
is_deleted = True
if is_deleted:
deleted_part_list = self.deleted.translate()
# TODO: No need to mark as DELETED a file from the same
# package if it does not depend on itself.
# TODO: A way to not translate DELETED files may be needed
# in some cases.
# If flamer.jpg producing flamer.crn was replaced
# by flamer.png also producing flamer.crn, the
# flamer.crn file will be listed as deleted
# while it will be shipped, but built from another
# source file, so we must check deleted files
# aren't built in other way to avoid listing
# as deleted a file that is actually shipped.
for deleted_part_dict in deleted_part_list:
is_built = False
if deleted_part_dict["pak_name"] == self.pak_name:
deleted_part = deleted_part_dict["file_path"]
if deleted_part.startswith(Default.repository_config_dir + os.path.sep):
continue
if deleted_part.startswith(Default.legacy_pakinfo_dir + os.path.sep):
continue
if deleted_part in produced_file_list:
is_built = True
Ui.laconic(deleted_part + ": do nothing because it is produced by another source file.")
self.deleted.removePart(self.pak_name, deleted_part)
if not is_built:
Ui.laconic(deleted_part + ": will mark as deleted.")
# Writing DELETED file.
for deleted_part in deleted_part_list:
self.deleted.set(self.source_tree.pak_name, deleted_part)
is_deleted = self.deleted.write()
if is_deleted:
unit = {
"head": "DELETED",
"body": [ "DELETED" ],
}
produced_unit_list.append(unit)
else:
# Remove DELETED leftover from partial build.
self.deps.remove(self.test_dir)
is_deps = False
# add itself to DEPS if partial build,
# also look for deleted files
if self.since_reference:
is_deps = True
if self.deps.read():
is_deps = True
if is_deps:
# translating DEPS file
self.deps.translateTest()
self.deps.write()
unit = {
"head": "DEPS",
"body": [ "DEPS" ],
}
produced_unit_list.append(unit)
else:
# Remove DEPS leftover from partial build.
self.deps.remove(self.test_dir)
logging.debug("produced unit list:" + str(produced_unit_list))
# do not clean-up if building from temporary directories
# or if user asked to not clean-up
if clean_dust:
cleaner.cleanDust(self.test_dir, produced_unit_list, previous_file_list)
return produced_unit_list
def threadExtendRes(self, func, args, res):
# magic: only works if res is a mutable object (like a list)
res.extend(func(*args))
class Packager():
# TODO: reuse paktraces, do not walk for file,s
def __init__(self, source_tree, args):
self.source_dir = source_tree.dir
self.pak_vfs = source_tree.pak_vfs
self.pak_config = source_tree.pak_config
self.pak_format = source_tree.pak_format
self.allow_dirty = args.allow_dirty
self.no_compress = args.no_compress
self.test_dir = self.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
self.pak_file = self.pak_config.getPakFile(build_prefix=args.build_prefix, pak_prefix=args.pak_prefix, pak_file=args.pak_file, version_suffix=args.version_suffix)
self.game_profile = Game.Game(source_tree)
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(source_tree, self.test_dir, None)
self.deps = Repository.Deps(source_tree, self.test_dir)
def createSubdirs(self, pak_file):
pak_subdir = os.path.dirname(pak_file)
if pak_subdir == "":
pak_subdir = "."
if os.path.isdir(pak_subdir):
logging.debug("found pak subdir: " + pak_subdir)
else:
logging.debug("create pak subdir: " + pak_subdir)
os.makedirs(pak_subdir, exist_ok=True)
def run(self):
if not os.path.isdir(self.test_dir):
Ui.error("test pakdir not built: " + self.test_dir)
source_repository = Repository.Git(self.source_dir, self.pak_format)
if source_repository.isGit() and source_repository.isDirty():
if self.allow_dirty:
Ui.warning("Dirty repository: " + self.source_dir)
else:
Ui.error("Dirty repository isn't allowed to be packaged (use --allow-dirty to override): " + self.source_dir)
Ui.print("Packaging “" + self.test_dir + "” as: " + self.pak_file)
self.createSubdirs(self.pak_file)
logging.debug("opening: " + self.pak_file)
# remove existing file (do not write in place) to force the game engine to reread the file
if os.path.isfile(self.pak_file):
logging.debug("remove existing package: " + self.pak_file)
os.remove(self.pak_file)
if self.no_compress:
# why zlib.Z_NO_COMPRESSION not defined?
zipfile.zlib.Z_DEFAULT_COMPRESSION = 0
else:
# maximum compression
zipfile.zlib.Z_DEFAULT_COMPRESSION = zipfile.zlib.Z_BEST_COMPRESSION
found_file = False
paktrace_dir = Default.getPakTraceDir(self.test_dir)
relative_paktrace_dir = os.path.relpath(paktrace_dir, self.test_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
for file_name in file_name_list:
found_file = True
break
if found_file:
break
# FIXME: if only the DEPS file is modified, the package will
# not be created (it should be).
if not found_file:
Ui.print("Not writing empty package: " + self.pak_file)
return
pak = zipfile.ZipFile(self.pak_file, "w", zipfile.ZIP_DEFLATED)
for dir_name, subdir_name_list, file_name_list in os.walk(self.test_dir):
for file_name in file_name_list:
rel_dir_name = os.path.relpath(dir_name, self.test_dir)
full_path = os.path.join(dir_name, file_name)
file_path = os.path.relpath(full_path, self.test_dir)
# ignore paktrace files
if file_path.startswith(relative_paktrace_dir + os.path.sep):
continue
# ignore DELETED and DEPS file, will add it later
if self.pak_format == "dpk" and file_path in Repository.dpk_special_files:
continue
found_file = True
# TODO: add a mechanism to know if VFS supports
# symbolic links in packages or not.
# Dæmon's DPK VFS is supporting symbolic links.
# DarkPlaces' PK3 VFS is supporting symbolic links.
# Others may not.
is_symlink_supported = True
if is_symlink_supported and os.path.islink(full_path):
Ui.print("add symlink to package " + os.path.basename(self.pak_file) + ": " + file_path)
# TODO: Remove this test when Urcheon deletes extra
# files in build directory. Currently a deleted but not
# committed file is kept.
if os.path.exists(full_path):
# FIXME: getmtime reads realpath datetime, not symbolic link datetime.
file_date_time = (datetime.fromtimestamp(os.path.getmtime(full_path)))
# See https://stackoverflow.com/a/61795576/9131399
attrs = ('year', 'month', 'day', 'hour', 'minute', 'second')
file_date_time_tuple = attrgetter(*attrs)(file_date_time)
# See https://stackoverflow.com/a/60691331/9131399
zip_info = zipfile.ZipInfo(file_path, date_time=file_date_time_tuple)
zip_info.create_system = 3
file_permissions = 0o777
file_permissions |= 0xA000
zip_info.external_attr = file_permissions << 16
target_path = os.readlink(full_path)
pak.writestr(zip_info, target_path)
else:
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": " + file_path)
pak.write(full_path, arcname=file_path)
if self.pak_format == "dpk":
# Writing DELETED file.
deleted_file_path = self.deleted.get_test_path()
if os.path.isfile(deleted_file_path):
pak.write(deleted_file_path, arcname="DELETED")
# Translating DEPS file.
if self.deps.read(deps_dir=self.test_dir):
self.deps.translateRelease(self.pak_vfs)
deps_temp_dir = tempfile.mkdtemp()
deps_temp_file = self.deps.write(deps_dir=deps_temp_dir)
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": DEPS")
pak.write(deps_temp_file, arcname="DEPS")
logging.debug("close: " + self.pak_file)
pak.close()
if source_repository.isGit():
repo_date = int(source_repository.getDate("HEAD"))
os.utime(self.pak_file, (repo_date, repo_date))
Ui.laconic("Package written: " + self.pak_file)
class Cleaner():
def __init__(self, source_tree):
self.pak_name = source_tree.pak_name
self.game_profile = Game.Game(source_tree)
def cleanTest(self, test_dir):
for dir_name, subdir_name_list, file_name_list in os.walk(test_dir):
for file_name in file_name_list:
that_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + that_file)
os.remove(that_file)
FileSystem.removeEmptyDir(dir_name)
for dir_name in subdir_name_list:
that_dir = dir_name + os.path.sep + dir_name
FileSystem.removeEmptyDir(that_dir)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(test_dir)
def cleanPak(self, pak_prefix):
for dir_name, subdir_name_list, file_name_list in os.walk(pak_prefix):
for file_name in file_name_list:
if file_name.startswith(self.pak_name) and file_name.endswith(self.game_profile.pak_ext):
pak_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + pak_file)
os.remove(pak_file)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(pak_prefix)
def cleanMap(self, test_dir):
# TODO: use p | abilities?
for dir_name, subdir_name_list, file_name_list in os.walk(test_dir):
for file_name in file_name_list:
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "bsp"):
bsp_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + bsp_file)
os.remove(bsp_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "map"):
map_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + map_file)
os.remove(map_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-2:-1] == ["maps"] and file_name.startswith("lm_"):
lightmap_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + lightmap_file)
os.remove(lightmap_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "navMesh"):
navmesh_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + navmesh_file)
os.remove(navmesh_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["minimaps"]:
minimap_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + minimap_file)
os.remove(minimap_file)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(test_dir)
def cleanDust(self, test_dir, produced_unit_list, previous_file_list):
# TODO: remove extra files that are not tracked in paktraces?
# FIXME: reuse produced_file_list from build()
produced_file_list = []
head_list = []
for unit in produced_unit_list:
head_list.append(unit["head"])
produced_file_list.extend(unit["body"])
for file_name in previous_file_list:
if file_name not in produced_file_list:
dust_file_path = os.path.normpath(os.path.join(test_dir, file_name))
Ui.laconic("clean dust file: " + file_name)
dust_file_fullpath = os.path.realpath(dust_file_path)
if not os.path.isfile(dust_file_fullpath):
# if you're there, it's because you are debugging a crash
continue
FileSystem.cleanRemoveFile(dust_file_fullpath)
paktrace_dir = Default.getPakTraceDir(test_dir)
if os.path.isdir(paktrace_dir):
logging.debug("look for dust in directory: " + paktrace_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
dir_name = os.path.relpath(dir_name, test_dir)
logging.debug("found paktrace dir: " + dir_name)
for file_name in file_name_list:
file_path = os.path.join(dir_name, file_name)
file_path = os.path.normpath(file_path)
relative_paktrace_dir = os.path.relpath(paktrace_dir, test_dir)
trace_file = os.path.relpath(file_path, relative_paktrace_dir)
head_name=trace_file[:-len(Default.paktrace_file_ext)]
if head_name not in head_list:
Ui.print("clean dust paktrace: " + file_path)
dust_paktrace_path = os.path.normpath(os.path.join(test_dir, file_path))
dust_paktrace_fullpath = os.path.realpath(dust_paktrace_path)
FileSystem.cleanRemoveFile(dust_paktrace_fullpath)
| aktrace | identifier_name |
Pak.py | #! /usr/bin/env python3
#-*- coding: UTF-8 -*-
### Legal
#
# Author: Thomas DEBESSE <dev@illwieckz.net>
# License: ISC
#
from Urcheon import Action
from Urcheon import Default
from Urcheon import FileSystem
from Urcheon import Game
from Urcheon import MapCompiler
from Urcheon import Parallelism
from Urcheon import Repository
from Urcheon import Ui
import __main__ as m
import argparse
import logging
import os
import sys
import tempfile
import time
import zipfile
from collections import OrderedDict
from datetime import datetime
from operator import attrgetter
class MultiRunner():
def __init__(self, source_dir_list, args):
self.source_dir_list = source_dir_list
self.args = args
self.runner_dict = {
"prepare": Builder,
"build": Builder,
"package": Packager,
}
def run(self):
cpu_count = Parallelism.countCPU()
runner_thread_list = []
for source_dir in self.source_dir_list:
# FIXME: because of this code Urcheon must run within package set directory
Ui.notice(self.args.stage_name + " from: " + source_dir)
source_dir = os.path.realpath(source_dir)
source_tree = Repository.Tree(source_dir, game_name=self.args.game_name)
runner = self.runner_dict[self.args.stage_name](source_tree, self.args)
if self.args.no_parallel:
runner.run()
else:
runner_thread = Parallelism.Thread(target=runner.run)
runner_thread_list.append(runner_thread)
while len(runner_thread_list) > cpu_count:
# join dead thread early to raise thread exceptions early
# forget ended threads
runner_thread_list = Parallelism.joinDeadThreads(runner_thread_list)
runner_thread.start()
# wait for all remaining threads ending
Parallelism.joinThreads(runner_thread_list)
class Builder():
def __init__(self, source_tree, args, is_nested=False, disabled_action_list=[], file_list=[]):
self.source_tree = source_tree
self.source_dir = source_tree.dir
self.pak_name = source_tree.pak_name
self.pak_format = source_tree.pak_format
self.game_name = source_tree.game_name
self.is_nested = is_nested
self.stage_name = args.stage_name
if is_nested:
self.keep_dust = False
else:
self.keep_dust = args.keep_dust
action_list = Action.List(source_tree, self.stage_name, disabled_action_list=disabled_action_list)
if self.stage_name == "prepare":
self.test_dir = self.source_dir
self.since_reference = None
self.no_auto_actions = False
self.clean_map = False
self.map_profile = None
# FIXME: currently the prepare stage
# can't be parallel (for example SlothRun task
# needs all PrevRun tasks to be finished first)
# btw all packages can be prepared in parallel
self.is_parallel = False
else:
if is_nested:
self.test_dir = args.test_dir
else:
self.test_dir = source_tree.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
if is_nested:
self.since_reference = False
self.no_auto_actions = False
self.clean_map = False
self.map_profile = None
self.is_parallel = not args.no_parallel
else:
self.since_reference = args.since_reference
self.no_auto_actions = args.no_auto_actions
self.clean_map = args.clean_map
self.map_profile = args.map_profile
self.is_parallel = not args.no_parallel
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(self.source_tree, self.test_dir, self.stage_name)
self.deps = Repository.Deps(self.source_tree, self.test_dir)
if not is_nested:
if self.pak_format == "dpk":
deleted_action_list = self.deleted.getActions()
action_list.readActions(action_list=deleted_action_list)
action_list.readActions()
if not file_list:
# FIXME: only if one package?
# same reference for multiple packages
# makes sense when using tags
# NOTE: already prepared file can be seen as source again, but there may be no easy way to solve it
if self.since_reference:
file_repo = Repository.Git(self.source_dir, self.pak_format)
file_list = file_repo.listFilesSinceReference(self.since_reference)
# also look for untracked files
untracked_file_list = file_repo.listUntrackedFiles()
for file_name in untracked_file_list:
if file_name not in file_list:
logging.debug("found untracked file “" + file_name + "”")
# FIXME: next loop will look for prepared files for it, which makes no sense,
# is it harmful?
file_list.append(file_name)
# also look for files produced with “prepare” command
# from files modified since this reference
paktrace = Repository.Paktrace(source_tree, self.source_dir)
input_file_dict = paktrace.getFileDict()["input"]
for file_path in file_list:
logging.debug("looking for prepared files for “" + str(file_path) + "”")
logging.debug("looking for prepared files for “" + file_path + "”")
if file_path in input_file_dict.keys():
for input_file_path in input_file_dict[file_path]:
if not os.path.exists(os.path.join(self.source_dir, input_file_path)):
logging.debug("missing prepared files for “" + file_path + "”: " + input_file_path)
else:
logging.debug("found prepared files for “" + file_path + "”: " + input_file_path)
file_list.append(input_file_path)
else:
file_list = source_tree.listFiles()
if not self.no_auto_actions:
action_list.computeActions(file_list)
self.action_list = action_list
self.game_profile = Game.Game(source_tree)
if not self.map_profile:
map_config = MapCompiler.Config(source_tree)
self.map_profile = map_config.requireDefaultProfile()
def run(self):
if self.source_dir == self.test_dir:
Ui.print("Preparing: " + self.source_dir)
else:
Ui.print("Building “" + self.source_dir + "” as: " + self.test_dir)
# TODO: check if not a directory
if os.path.isdir(self.test_dir):
logging.debug("found build dir: " + self.test_dir)
else:
logging.debug("create build dir: " + self.test_dir)
os.makedirs(self.test_dir, exist_ok=True)
if not self.is_nested and not self.keep_dust:
clean_dust = True
else:
clean_dust = False
if clean_dust:
# do not read paktrace from temporary directories
# do not read paktrace if dust will be kept
paktrace = Repository.Paktrace(self.source_tree, self.test_dir)
previous_file_list = paktrace.listAll()
if self.clean_map or clean_dust:
cleaner = Cleaner(self.source_tree)
if self.clean_map:
cleaner.cleanMap(self.test_dir)
cpu_count = Parallelism.countCPU()
action_thread_list = []
produced_unit_list = []
main_process = Parallelism.getProcess()
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
# no need to use multiprocessing module to manage task contention, since each task will call its own process
# using threads on one core is faster, and it does not prevent tasks to be able to use other cores
# the is_nested argument is there to tell action to not do specific stuff because of recursion
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, map_profile=self.map_profile, is_nested=self.is_nested)
# check if task is already done (usually comparing timestamps the make way)
if action.isDone():
produced_unit_list.extend(action.getOldProducedUnitList())
continue
if not self.is_parallel or not action_type.is_parallel:
# tasks are run sequentially but they can
# use multiple threads themselves
thread_count = cpu_count
else:
# this compute is super slow because of process.children()
child_thread_count = Parallelism.countChildThread(main_process)
thread_count = max(1, cpu_count - child_thread_count)
action.thread_count = thread_count
if not self.is_parallel or not action_type.is_parallel:
# sequential build explicitely requested (like in recursion)
# or action that can't be run concurrently to others (like MergeBsp)
produced_unit_list.extend(action.run())
else:
# do not use >= in case of there is some extra thread we don't think about
# it's better to spawn an extra one than looping forever
while child_thread_count > cpu_count:
# no need to loop at full cpu speed
time.sleep(.05)
child_thread_count = Parallelism.countChildThread(main_process)
pass
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
action.thread_count = max(2, cpu_count - child_thread_count)
# wrapper does: produced_unit_list.extend(action.run())
action_thread = Parallelism.Thread(target=self.threadExtendRes, args=(action.run, (), produced_unit_list))
action_thread_list.append(action_thread)
action_thread.start()
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
# wait for all threads to end, otherwise it will start packaging next
# package while the building task for the current one is not ended
# and well, we now have to read that list to purge old files, so we
# must wait
Parallelism.joinThreads(action_thread_list)
# Handle symbolic links.
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, action_list=self.action_list, map_profile=self.map_profile, is_nested=self.is_nested)
# TODO: check for symbolic link to missing or deleted files.
produced_unit_list.extend(action.symlink())
# deduplication
unit_list = []
deleted_file_list = []
produced_file_list = []
for unit in produced_unit_list:
if unit == []:
continue
logging.debug("unit: " + str(unit))
head = unit["head"]
body = unit["body"]
action = unit["action"]
if action == "ignore":
continue
if action == "delete":
deleted_file_list.append( head )
if head not in produced_file_list:
produced_file_list.append(head)
for part in body:
if part not in produced_file_list:
# FIXME: only if action was not “ignore”
produced_file_list.append(part)
# if multiple calls produce the same files (like merge_bsp)
# FIXME: that can't work, this is probably a leftover
# or we may have to do “if head in body” instead.
# See https://github.com/DaemonEngine/Urcheon/issues/48
if head in unit:
continue
unit_list.append(unit)
produced_unit_list = unit_list
if self.stage_name == "build" and not self.is_nested:
if self.pak_format == "dpk":
is_deleted = False
if self.since_reference:
Ui.laconic("looking for deleted files")
# Unvanquished game did not support DELETED file until after 0.52.1.
workaround_no_delete = self.source_tree.game_name == "unvanquished" and self.since_reference in ["unvanquished/0.52.1", "v0.52.1"]
git_repo = Repository.Git(self.source_dir, "dpk", workaround_no_delete=workaround_no_delete)
previous_version = git_repo.computeVersion(self.since_reference, named_reference=True)
self.deps.set(self.pak_name, previous_version)
for deleted_file in git_repo.getDeletedFileList(self.since_reference):
if deleted_file not in deleted_file_list:
is_deleted = True
deleted_file_list.append(deleted_file)
if deleted_file_list:
is_deleted = True
for deleted_file in deleted_file_list:
self.deleted.set(self.pak_name, deleted_file)
if self.deleted.read():
is_deleted = True
if is_deleted:
deleted_part_list = self.deleted.translate()
# TODO: No need to mark as DELETED a file from the same
# package if it does not depend on itself.
# TODO: A way to not translate DELETED files may be needed
# in some cases.
# If flamer.jpg producing flamer.crn was replaced
# by flamer.png also producing flamer.crn, the
# flamer.crn file will be listed as deleted
# while it will be shipped, but built from another
# source file, so we must check deleted files
# aren't built in other way to avoid listing
# as deleted a file that is actually shipped.
for deleted_part_dict in deleted_part_list:
is_built = False
if deleted_part_dict["pak_name"] == self.pak_name:
deleted_part = deleted_part_dict["file_path"]
if deleted_part.startswith(Default.repository_config_dir + os.path.sep):
continue
if deleted_part.startswith(Default.legacy_pakinfo_dir + os.path.sep):
continue
if deleted_part in produced_file_list:
is_built = True
Ui.laconic(deleted_part + ": do nothing because it is produced by another source file.")
self.deleted.removePart(self.pak_name, deleted_part)
if not is_built:
Ui.laconic(deleted_part + ": will mark as deleted.")
# Writing DELETED file.
for deleted_part in deleted_part_list:
self.deleted.set(self.source_tree.pak_name, deleted_part)
is_deleted = self.deleted.write()
if is_deleted:
unit = {
"head": "DELETED",
"body": [ "DELETED" ],
}
produced_unit_list.append(unit)
else:
# Remove DELETED leftover from partial build.
self.deps.remove(self.test_dir)
is_deps = False
# add itself to DEPS if partial build,
# also look for deleted files
if self.since_reference:
is_deps = True
if self.deps.read():
is_deps = True
if is_deps:
# translating DEPS file
self.deps.translateTest()
self.deps.write()
unit = {
"head": "DEPS",
"body": [ "DEPS" ],
}
produced_unit_list.append(unit)
else:
# Remove DEPS leftover from partial build.
self.deps.remove(self.test_dir)
logging.debug("produced unit list:" + str(produced_unit_list))
# do not clean-up if building from temporary directories
# or if user asked to not clean-up
if clean_dust:
cleaner.cleanDust(self.test_dir, produced_unit_list, previous_file_list)
return produced_unit_list
def threadExtendRes(self, func, args, res):
# magic: only works if res is a mutable object (like a list)
res.extend(func(*args))
class Packager():
# TODO: reuse paktraces, do not walk for file,s
def __init__(self, source_tree, args):
self.source_dir = source_tree.dir
self.pak_vfs = source_tree.pak_vfs
self.pak_config = source_tree.pak_config
self.pak_format = source_tree.pak_format
self.allow_dirty = args.allow_dirty
self.no_compress = args.no_compress
self.test_dir = self.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
self.pak_file = self.pak_config.getPakFile(build_prefix=args.build_prefix, pak_prefix=args.pak_prefix, pak_file=args.pak_file, version_suffix=args.version_suffix)
self.game_profile = Game.Game(source_tree)
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(source_tree, self.test_dir, None)
self.deps = Repository.Deps(source_tree, self.test_dir)
def createSubdirs(self, pak_file):
pak_subdir = os.path.dirname(pak_fil | isdir(self.test_dir):
Ui.error("test pakdir not built: " + self.test_dir)
source_repository = Repository.Git(self.source_dir, self.pak_format)
if source_repository.isGit() and source_repository.isDirty():
if self.allow_dirty:
Ui.warning("Dirty repository: " + self.source_dir)
else:
Ui.error("Dirty repository isn't allowed to be packaged (use --allow-dirty to override): " + self.source_dir)
Ui.print("Packaging “" + self.test_dir + "” as: " + self.pak_file)
self.createSubdirs(self.pak_file)
logging.debug("opening: " + self.pak_file)
# remove existing file (do not write in place) to force the game engine to reread the file
if os.path.isfile(self.pak_file):
logging.debug("remove existing package: " + self.pak_file)
os.remove(self.pak_file)
if self.no_compress:
# why zlib.Z_NO_COMPRESSION not defined?
zipfile.zlib.Z_DEFAULT_COMPRESSION = 0
else:
# maximum compression
zipfile.zlib.Z_DEFAULT_COMPRESSION = zipfile.zlib.Z_BEST_COMPRESSION
found_file = False
paktrace_dir = Default.getPakTraceDir(self.test_dir)
relative_paktrace_dir = os.path.relpath(paktrace_dir, self.test_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
for file_name in file_name_list:
found_file = True
break
if found_file:
break
# FIXME: if only the DEPS file is modified, the package will
# not be created (it should be).
if not found_file:
Ui.print("Not writing empty package: " + self.pak_file)
return
pak = zipfile.ZipFile(self.pak_file, "w", zipfile.ZIP_DEFLATED)
for dir_name, subdir_name_list, file_name_list in os.walk(self.test_dir):
for file_name in file_name_list:
rel_dir_name = os.path.relpath(dir_name, self.test_dir)
full_path = os.path.join(dir_name, file_name)
file_path = os.path.relpath(full_path, self.test_dir)
# ignore paktrace files
if file_path.startswith(relative_paktrace_dir + os.path.sep):
continue
# ignore DELETED and DEPS file, will add it later
if self.pak_format == "dpk" and file_path in Repository.dpk_special_files:
continue
found_file = True
# TODO: add a mechanism to know if VFS supports
# symbolic links in packages or not.
# Dæmon's DPK VFS is supporting symbolic links.
# DarkPlaces' PK3 VFS is supporting symbolic links.
# Others may not.
is_symlink_supported = True
if is_symlink_supported and os.path.islink(full_path):
Ui.print("add symlink to package " + os.path.basename(self.pak_file) + ": " + file_path)
# TODO: Remove this test when Urcheon deletes extra
# files in build directory. Currently a deleted but not
# committed file is kept.
if os.path.exists(full_path):
# FIXME: getmtime reads realpath datetime, not symbolic link datetime.
file_date_time = (datetime.fromtimestamp(os.path.getmtime(full_path)))
# See https://stackoverflow.com/a/61795576/9131399
attrs = ('year', 'month', 'day', 'hour', 'minute', 'second')
file_date_time_tuple = attrgetter(*attrs)(file_date_time)
# See https://stackoverflow.com/a/60691331/9131399
zip_info = zipfile.ZipInfo(file_path, date_time=file_date_time_tuple)
zip_info.create_system = 3
file_permissions = 0o777
file_permissions |= 0xA000
zip_info.external_attr = file_permissions << 16
target_path = os.readlink(full_path)
pak.writestr(zip_info, target_path)
else:
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": " + file_path)
pak.write(full_path, arcname=file_path)
if self.pak_format == "dpk":
# Writing DELETED file.
deleted_file_path = self.deleted.get_test_path()
if os.path.isfile(deleted_file_path):
pak.write(deleted_file_path, arcname="DELETED")
# Translating DEPS file.
if self.deps.read(deps_dir=self.test_dir):
self.deps.translateRelease(self.pak_vfs)
deps_temp_dir = tempfile.mkdtemp()
deps_temp_file = self.deps.write(deps_dir=deps_temp_dir)
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": DEPS")
pak.write(deps_temp_file, arcname="DEPS")
logging.debug("close: " + self.pak_file)
pak.close()
if source_repository.isGit():
repo_date = int(source_repository.getDate("HEAD"))
os.utime(self.pak_file, (repo_date, repo_date))
Ui.laconic("Package written: " + self.pak_file)
class Cleaner():
def __init__(self, source_tree):
self.pak_name = source_tree.pak_name
self.game_profile = Game.Game(source_tree)
def cleanTest(self, test_dir):
for dir_name, subdir_name_list, file_name_list in os.walk(test_dir):
for file_name in file_name_list:
that_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + that_file)
os.remove(that_file)
FileSystem.removeEmptyDir(dir_name)
for dir_name in subdir_name_list:
that_dir = dir_name + os.path.sep + dir_name
FileSystem.removeEmptyDir(that_dir)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(test_dir)
def cleanPak(self, pak_prefix):
for dir_name, subdir_name_list, file_name_list in os.walk(pak_prefix):
for file_name in file_name_list:
if file_name.startswith(self.pak_name) and file_name.endswith(self.game_profile.pak_ext):
pak_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + pak_file)
os.remove(pak_file)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(pak_prefix)
def cleanMap(self, test_dir):
# TODO: use paktrace abilities?
for dir_name, subdir_name_list, file_name_list in os.walk(test_dir):
for file_name in file_name_list:
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "bsp"):
bsp_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + bsp_file)
os.remove(bsp_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "map"):
map_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + map_file)
os.remove(map_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-2:-1] == ["maps"] and file_name.startswith("lm_"):
lightmap_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + lightmap_file)
os.remove(lightmap_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["maps"] and file_name.endswith(os.path.extsep + "navMesh"):
navmesh_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + navmesh_file)
os.remove(navmesh_file)
FileSystem.removeEmptyDir(dir_name)
if dir_name.split("/")[-1:] == ["minimaps"]:
minimap_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + minimap_file)
os.remove(minimap_file)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(test_dir)
def cleanDust(self, test_dir, produced_unit_list, previous_file_list):
# TODO: remove extra files that are not tracked in paktraces?
# FIXME: reuse produced_file_list from build()
produced_file_list = []
head_list = []
for unit in produced_unit_list:
head_list.append(unit["head"])
produced_file_list.extend(unit["body"])
for file_name in previous_file_list:
if file_name not in produced_file_list:
dust_file_path = os.path.normpath(os.path.join(test_dir, file_name))
Ui.laconic("clean dust file: " + file_name)
dust_file_fullpath = os.path.realpath(dust_file_path)
if not os.path.isfile(dust_file_fullpath):
# if you're there, it's because you are debugging a crash
continue
FileSystem.cleanRemoveFile(dust_file_fullpath)
paktrace_dir = Default.getPakTraceDir(test_dir)
if os.path.isdir(paktrace_dir):
logging.debug("look for dust in directory: " + paktrace_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
dir_name = os.path.relpath(dir_name, test_dir)
logging.debug("found paktrace dir: " + dir_name)
for file_name in file_name_list:
file_path = os.path.join(dir_name, file_name)
file_path = os.path.normpath(file_path)
relative_paktrace_dir = os.path.relpath(paktrace_dir, test_dir)
trace_file = os.path.relpath(file_path, relative_paktrace_dir)
head_name=trace_file[:-len(Default.paktrace_file_ext)]
if head_name not in head_list:
Ui.print("clean dust paktrace: " + file_path)
dust_paktrace_path = os.path.normpath(os.path.join(test_dir, file_path))
dust_paktrace_fullpath = os.path.realpath(dust_paktrace_path)
FileSystem.cleanRemoveFile(dust_paktrace_fullpath)
| e)
if pak_subdir == "":
pak_subdir = "."
if os.path.isdir(pak_subdir):
logging.debug("found pak subdir: " + pak_subdir)
else:
logging.debug("create pak subdir: " + pak_subdir)
os.makedirs(pak_subdir, exist_ok=True)
def run(self):
if not os.path. | identifier_body |
patchsBuild.py | __author__ = 'amir'
import sys
import os
import glob
import re
import subprocess
import shutil
import sqlite3
import utilsConf
# git format-patch --root origin
def mkDirs(outDir,commitID):
o=outDir+"\\"+commitID
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\before"
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\after"
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\parser"
if not (os.path.isdir(o)):
os.mkdir(o)
def oneFileParser(methods,javaFile,inds,key):
if not ".java" in javaFile:
return
f=open(javaFile)
lines=f.readlines()
f.close()
if len([l for l in lines if l.lstrip()!=""])==0:
return
run_commands = ["java", "-jar", "C:\projs\checkstyle-6.8-SNAPSHOT-all.jar ", "-c", "C:\projs\methodNameLines.xml",
javaFile]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, shell=True,cwd=r'C:\projs')
(out, err) = proc.communicate()
out=out.replace("\n","").split("\r")[1:-3]
fileName=javaFile.split("\\")[-1]
fileName=fileName.replace("_","\\")
for o in out:
if o=="":
continue
if not "@" in o:
continue
file,data=o.split(" ")
name,begin,end=data.split("@")
methodDir=fileName+"$"+name
if not methodDir in methods:
methods[methodDir]={}
if not "methodName" in methods[methodDir]:
methods[methodDir]["methodName"]=name
if not "fileName" in methods[methodDir]:
methods[methodDir]["fileName"]=fileName
rng=range(int(begin),int(end)+1)
if methodDir not in methods:
methods[methodDir]={}
methods[methodDir][key]=len(list(set(rng) & set(inds)))
def FileToMethods(beforeFile,AfterFile,deletedInds,addedInds, outPath,commitID):
methods={}
oneFileParser(methods,beforeFile,deletedInds,"deleted")
oneFileParser(methods,AfterFile,addedInds,"inserted")
f=open(outPath,"w")
for methodDir in methods:
dels=0
ins=0
fileName=""
methodName=""
if "deleted" in methods[methodDir]:
dels=methods[methodDir]["deleted"]
if "inserted" in methods[methodDir]:
ins=methods[methodDir]["inserted"]
if "fileName" in methods[methodDir]:
fileName=methods[methodDir]["fileName"]
if "methodName" in methods[methodDir]:
methodName=methods[methodDir]["methodName"]
row=[commitID,methodDir,fileName,methodName,str(dels),str(ins),str(dels+ins)]
f.write(",".join(row))
f.close()
def fixEnum(l):
if "enum =" in l:
l=l.replace("enum =","enumAmir =")
if "enum=" in l:
l=l.replace("enum=","enumAmir=")
if "enum," in l:
l=l.replace("enum,","enumAmir,")
if "enum." in l:
l=l.replace("enum.","enumAmir.")
if "enum;" in l:
l=l.replace("enum;","enumAmir;")
if "enum)" in l:
l=l.replace("enum)","enumAmir)")
return l
def fixAssert(l):
if "assert " in l:
l=l.replace("assert ","assertAmir ")
if ":" in l:
l=l.replace(":",";//")
if "assert(" in l:
l=l.replace("assert(","assertAmir(")
if ":" in l:
l=l.replace(":",";//")
return l
def OneClass(diff_lines, outPath, commitID, change):
fileName = diff_lines[0].split()
if len(fileName)<3:
return []
fileName = diff_lines[0].split()[2]
fileName = fileName[2:]
fileName = os.path.normpath(fileName).replace(os.path.sep,"_")
if not ".java" in fileName:
return []
fileName = fileName.split('.java')[0] + '.java'
if len(diff_lines) > 3:
diff_lines = diff_lines[5:]
befLines=[]
afterLines=[]
deletedInds=[]
addedInds=[]
delind=0
addind=0
for l in diff_lines:
if "\ No newline at end of file" in l:
continue
if "1.9.4.msysgit.2" in l:
continue
if "- \n"== l:
continue
if "-- \n"== l:
continue
l=fixEnum(l)
l=fixAssert(l)
replaced=re.sub('@@(-|\+|,| |[0-9])*@@','',l)
if replaced.startswith("*"):
replaced="\\"+replaced
if replaced.startswith("+"):
afterLines.append(replaced[1:])
addedInds.append(addind)
addind=addind+1
elif replaced.startswith("-"):
befLines.append(replaced[1:])
deletedInds.append(delind)
delind=delind+1
else:
|
with open(os.path.join(outPath, "before", fileName), "wb") as bef:
bef.writelines(befLines)
with open(os.path.join(outPath, "after", fileName), "wb") as after:
after.writelines(afterLines)
with open(os.path.join(outPath, fileName + "_deletsIns.txt"), "wb") as f:
f.writelines(["deleted\n", str(deletedInds)+"\n","added\n", str(addedInds)])
change.write(fileName+"@"+str(commitID)+"@"+str(deletedInds)+"@"+str(addedInds)+"\n")
def oneFile(PatchFile, outDir,change):
with open(PatchFile,'r') as f:
lines=f.readlines()
if len(lines)==0:
return []
commitSha = lines[0].split()[1] # line 0 word 1
commitID = str(commitSha)
mkDirs(outDir, commitID)
inds=[lines.index(l) for l in lines if "diff --git" in l]+[len(lines)] #lines that start with diff --git
shutil.copyfile(PatchFile, os.path.join(outDir, commitID, os.path.basename(PatchFile)))
for i in range(len(inds)-1):
diff_lines = lines[inds[i]:inds[i+1]]
if len(diff_lines) == 0:
continue
OneClass(diff_lines, os.path.join(outDir, commitID),commitID,change)
def debugPatchs(Path,outFile):
lst= glob.glob(Path+"/*.patch")
i=0
allComms=[]
ou=open(outFile,"wt")
for doc in lst:
i=i+1
f=open(doc,'r')
lines=f.readlines()[:9]
ou.writelines(lines)
ou.close()
def buildPatchs(Path,outDir,changedFile):
mkdir(outDir)
with open(changedFile,"wb") as change:
for doc in glob.glob(os.path.join(Path,"/*.patch")):
oneFile(doc, outDir, change)
def mkdir(d):
if not os.path.isdir(d):
os.mkdir(d)
def DbAdd(dbPath,allComms):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
c.execute('''CREATE TABLE commitedMethods (commitID INT, methodDir text, fileName text, methodName text, deletions INT , insertions INT , lines INT )''')
for com in allComms:
c.execute("INSERT INTO commitedMethods VALUES (?,?,?,?,?,?,?)",com)
conn.commit()
conn.close()
def RunCheckStyle(workingDir, outPath, checkStyle68, methodNameLines):
run_commands = ["java" ,"-jar" ,checkStyle68 ,"-c" ,methodNameLines ,"javaFile" ,"-o",outPath,workingDir]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def detectFromConf(lines,lineInd):
deleted = (lines[lineInd])
deleted = deleted.replace("[","").replace("]","").replace("\n","")
deleted = deleted.split(",")
return [x.lstrip() for x in deleted]
def readDataFile(Dfile):
f=open(Dfile,"r")
lines=f.readlines()
f.close()
deleted=detectFromConf(lines,1)
insertions=detectFromConf(lines,3)
return deleted,insertions
def checkStyleCreateDict(checkOut, changesDict):
methods = {}
lines = []
with open(checkOut, "r") as f:
lines = f.readlines()[1:-3]
for line in lines:
if line == "":
continue
if not "@" in line:
continue
if not len(line.split(" ")) == 2:
# case of error
continue
file, data = line.split(" ")
file = file.split(".java")[0]+".java"
fileNameSplited = file.split(os.path.sep)
fileName = fileNameSplited[-1].replace("_", os.path.sep)
commitID = fileNameSplited[fileNameSplited.index("commitsFiles") + 1]
if not (fileName, commitID) in changesDict.keys():
continue
key = ""
inds = []
deleted, insertions = changesDict[(fileName, commitID)]
if "before" in file:
key = "deletions"
inds = deleted
if "after" in file:
key = "insertions"
inds = insertions
name, begin, end = data.split("@")
rng = map(str, range(int(begin)-1, int(end)))
both = filter(lambda x: x in rng, map(str, inds))
keyChange = len(both)
if keyChange == 0:
continue
methodDir = fileName + "$" + name
tup = (methodDir, commitID)
if not tup in methods:
methods[tup] = {}
methods[tup][key] = keyChange
if not "methodName" in methods[tup]:
methods[tup]["methodName"] = name
if not "fileName" in methods[tup]:
methods[tup]["fileName"] = fileName
if not "commitID" in methods[tup]:
methods[tup]["commitID"] = commitID
return methods
def readChangesFile(change):
dict = {}
rows = []
with open(change, "r") as f:
for line in f:
fileName, commitSha, dels, Ins = line.strip().split("@")
fileName = fileName.replace("_", os.path.sep)
dict[(fileName, commitSha)] = [eval(dels), eval(Ins)]
rows.append(map(str, [fileName,commitSha, len(dels), len(Ins), len(dels)+len(Ins)]))
return dict, rows
def analyzeCheckStyle(checkOut, changeFile):
changesDict, filesRows = readChangesFile(changeFile)
methods = checkStyleCreateDict(checkOut, changesDict)
all_methods = []
for tup in methods:
methodDir = tup[0]
dels = methods[tup].setdefault("deletions", 0)
ins = methods[tup].setdefault("insertions", 0)
fileName = methods[tup].setdefault("fileName", "")
methodName = methods[tup].setdefault("methodName", "")
commitID = methods[tup].setdefault("commitID", "")
all_methods.append(map(str, [commitID, methodDir, fileName, methodName, dels, ins, dels+ins]))
return all_methods, filesRows
# @utilsConf.marker_decorator(utilsConf.PATCHS_FEATURES_MARKER)
def do_all():
patchD = os.path.join(utilsConf.get_configuration().LocalGitPath, "patch")
commitsFiles = os.path.join(utilsConf.get_configuration().LocalGitPath, "commitsFiles")
changedFile = os.path.join(utilsConf.get_configuration().LocalGitPath, "commitsFiles", "Ins_dels.txt")
mkdir(patchD)
mkdir(commitsFiles)
run_commands = "git format-patch --root -o patch --function-context --unified=9000".split()
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=utilsConf.to_short_path(utilsConf.get_configuration().LocalGitPath))
proc.communicate()
buildPatchs(patchD, commitsFiles, changedFile)
checkOut = os.path.join(commitsFiles, "CheckStyle.txt")
RunCheckStyle(commitsFiles, checkOut, utilsConf.get_configuration().checkStyle68, utilsConf.get_configuration().methodsNamesXML)
| afterLines.append(replaced)
befLines.append(replaced)
delind=delind+1
addind=addind+1 | conditional_block |
patchsBuild.py | __author__ = 'amir'
import sys
import os
import glob
import re
import subprocess
import shutil
import sqlite3
import utilsConf
# git format-patch --root origin
def mkDirs(outDir,commitID):
o=outDir+"\\"+commitID
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\before"
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\after"
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\parser"
if not (os.path.isdir(o)):
os.mkdir(o)
def oneFileParser(methods,javaFile,inds,key):
if not ".java" in javaFile:
return
f=open(javaFile)
lines=f.readlines()
f.close()
if len([l for l in lines if l.lstrip()!=""])==0:
return
run_commands = ["java", "-jar", "C:\projs\checkstyle-6.8-SNAPSHOT-all.jar ", "-c", "C:\projs\methodNameLines.xml",
javaFile]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, shell=True,cwd=r'C:\projs')
(out, err) = proc.communicate()
out=out.replace("\n","").split("\r")[1:-3]
fileName=javaFile.split("\\")[-1]
fileName=fileName.replace("_","\\")
for o in out:
if o=="":
continue
if not "@" in o:
continue
file,data=o.split(" ")
name,begin,end=data.split("@")
methodDir=fileName+"$"+name
if not methodDir in methods:
methods[methodDir]={}
if not "methodName" in methods[methodDir]:
methods[methodDir]["methodName"]=name
if not "fileName" in methods[methodDir]:
methods[methodDir]["fileName"]=fileName
rng=range(int(begin),int(end)+1)
if methodDir not in methods:
methods[methodDir]={}
methods[methodDir][key]=len(list(set(rng) & set(inds)))
def FileToMethods(beforeFile,AfterFile,deletedInds,addedInds, outPath,commitID):
methods={}
oneFileParser(methods,beforeFile,deletedInds,"deleted")
oneFileParser(methods,AfterFile,addedInds,"inserted")
f=open(outPath,"w")
for methodDir in methods:
dels=0
ins=0
fileName=""
methodName=""
if "deleted" in methods[methodDir]:
dels=methods[methodDir]["deleted"]
if "inserted" in methods[methodDir]:
ins=methods[methodDir]["inserted"]
if "fileName" in methods[methodDir]:
fileName=methods[methodDir]["fileName"]
if "methodName" in methods[methodDir]:
methodName=methods[methodDir]["methodName"]
row=[commitID,methodDir,fileName,methodName,str(dels),str(ins),str(dels+ins)]
f.write(",".join(row))
f.close()
def fixEnum(l):
if "enum =" in l:
l=l.replace("enum =","enumAmir =")
if "enum=" in l:
l=l.replace("enum=","enumAmir=")
if "enum," in l:
l=l.replace("enum,","enumAmir,")
if "enum." in l:
l=l.replace("enum.","enumAmir.")
if "enum;" in l:
l=l.replace("enum;","enumAmir;")
if "enum)" in l:
l=l.replace("enum)","enumAmir)")
return l
def fixAssert(l):
if "assert " in l:
l=l.replace("assert ","assertAmir ")
if ":" in l:
l=l.replace(":",";//")
if "assert(" in l:
l=l.replace("assert(","assertAmir(")
if ":" in l:
l=l.replace(":",";//")
return l
def | (diff_lines, outPath, commitID, change):
fileName = diff_lines[0].split()
if len(fileName)<3:
return []
fileName = diff_lines[0].split()[2]
fileName = fileName[2:]
fileName = os.path.normpath(fileName).replace(os.path.sep,"_")
if not ".java" in fileName:
return []
fileName = fileName.split('.java')[0] + '.java'
if len(diff_lines) > 3:
diff_lines = diff_lines[5:]
befLines=[]
afterLines=[]
deletedInds=[]
addedInds=[]
delind=0
addind=0
for l in diff_lines:
if "\ No newline at end of file" in l:
continue
if "1.9.4.msysgit.2" in l:
continue
if "- \n"== l:
continue
if "-- \n"== l:
continue
l=fixEnum(l)
l=fixAssert(l)
replaced=re.sub('@@(-|\+|,| |[0-9])*@@','',l)
if replaced.startswith("*"):
replaced="\\"+replaced
if replaced.startswith("+"):
afterLines.append(replaced[1:])
addedInds.append(addind)
addind=addind+1
elif replaced.startswith("-"):
befLines.append(replaced[1:])
deletedInds.append(delind)
delind=delind+1
else:
afterLines.append(replaced)
befLines.append(replaced)
delind=delind+1
addind=addind+1
with open(os.path.join(outPath, "before", fileName), "wb") as bef:
bef.writelines(befLines)
with open(os.path.join(outPath, "after", fileName), "wb") as after:
after.writelines(afterLines)
with open(os.path.join(outPath, fileName + "_deletsIns.txt"), "wb") as f:
f.writelines(["deleted\n", str(deletedInds)+"\n","added\n", str(addedInds)])
change.write(fileName+"@"+str(commitID)+"@"+str(deletedInds)+"@"+str(addedInds)+"\n")
def oneFile(PatchFile, outDir,change):
with open(PatchFile,'r') as f:
lines=f.readlines()
if len(lines)==0:
return []
commitSha = lines[0].split()[1] # line 0 word 1
commitID = str(commitSha)
mkDirs(outDir, commitID)
inds=[lines.index(l) for l in lines if "diff --git" in l]+[len(lines)] #lines that start with diff --git
shutil.copyfile(PatchFile, os.path.join(outDir, commitID, os.path.basename(PatchFile)))
for i in range(len(inds)-1):
diff_lines = lines[inds[i]:inds[i+1]]
if len(diff_lines) == 0:
continue
OneClass(diff_lines, os.path.join(outDir, commitID),commitID,change)
def debugPatchs(Path,outFile):
lst= glob.glob(Path+"/*.patch")
i=0
allComms=[]
ou=open(outFile,"wt")
for doc in lst:
i=i+1
f=open(doc,'r')
lines=f.readlines()[:9]
ou.writelines(lines)
ou.close()
def buildPatchs(Path,outDir,changedFile):
mkdir(outDir)
with open(changedFile,"wb") as change:
for doc in glob.glob(os.path.join(Path,"/*.patch")):
oneFile(doc, outDir, change)
def mkdir(d):
if not os.path.isdir(d):
os.mkdir(d)
def DbAdd(dbPath,allComms):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
c.execute('''CREATE TABLE commitedMethods (commitID INT, methodDir text, fileName text, methodName text, deletions INT , insertions INT , lines INT )''')
for com in allComms:
c.execute("INSERT INTO commitedMethods VALUES (?,?,?,?,?,?,?)",com)
conn.commit()
conn.close()
def RunCheckStyle(workingDir, outPath, checkStyle68, methodNameLines):
run_commands = ["java" ,"-jar" ,checkStyle68 ,"-c" ,methodNameLines ,"javaFile" ,"-o",outPath,workingDir]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def detectFromConf(lines,lineInd):
deleted = (lines[lineInd])
deleted = deleted.replace("[","").replace("]","").replace("\n","")
deleted = deleted.split(",")
return [x.lstrip() for x in deleted]
def readDataFile(Dfile):
f=open(Dfile,"r")
lines=f.readlines()
f.close()
deleted=detectFromConf(lines,1)
insertions=detectFromConf(lines,3)
return deleted,insertions
def checkStyleCreateDict(checkOut, changesDict):
methods = {}
lines = []
with open(checkOut, "r") as f:
lines = f.readlines()[1:-3]
for line in lines:
if line == "":
continue
if not "@" in line:
continue
if not len(line.split(" ")) == 2:
# case of error
continue
file, data = line.split(" ")
file = file.split(".java")[0]+".java"
fileNameSplited = file.split(os.path.sep)
fileName = fileNameSplited[-1].replace("_", os.path.sep)
commitID = fileNameSplited[fileNameSplited.index("commitsFiles") + 1]
if not (fileName, commitID) in changesDict.keys():
continue
key = ""
inds = []
deleted, insertions = changesDict[(fileName, commitID)]
if "before" in file:
key = "deletions"
inds = deleted
if "after" in file:
key = "insertions"
inds = insertions
name, begin, end = data.split("@")
rng = map(str, range(int(begin)-1, int(end)))
both = filter(lambda x: x in rng, map(str, inds))
keyChange = len(both)
if keyChange == 0:
continue
methodDir = fileName + "$" + name
tup = (methodDir, commitID)
if not tup in methods:
methods[tup] = {}
methods[tup][key] = keyChange
if not "methodName" in methods[tup]:
methods[tup]["methodName"] = name
if not "fileName" in methods[tup]:
methods[tup]["fileName"] = fileName
if not "commitID" in methods[tup]:
methods[tup]["commitID"] = commitID
return methods
def readChangesFile(change):
dict = {}
rows = []
with open(change, "r") as f:
for line in f:
fileName, commitSha, dels, Ins = line.strip().split("@")
fileName = fileName.replace("_", os.path.sep)
dict[(fileName, commitSha)] = [eval(dels), eval(Ins)]
rows.append(map(str, [fileName,commitSha, len(dels), len(Ins), len(dels)+len(Ins)]))
return dict, rows
def analyzeCheckStyle(checkOut, changeFile):
changesDict, filesRows = readChangesFile(changeFile)
methods = checkStyleCreateDict(checkOut, changesDict)
all_methods = []
for tup in methods:
methodDir = tup[0]
dels = methods[tup].setdefault("deletions", 0)
ins = methods[tup].setdefault("insertions", 0)
fileName = methods[tup].setdefault("fileName", "")
methodName = methods[tup].setdefault("methodName", "")
commitID = methods[tup].setdefault("commitID", "")
all_methods.append(map(str, [commitID, methodDir, fileName, methodName, dels, ins, dels+ins]))
return all_methods, filesRows
# @utilsConf.marker_decorator(utilsConf.PATCHS_FEATURES_MARKER)
def do_all():
patchD = os.path.join(utilsConf.get_configuration().LocalGitPath, "patch")
commitsFiles = os.path.join(utilsConf.get_configuration().LocalGitPath, "commitsFiles")
changedFile = os.path.join(utilsConf.get_configuration().LocalGitPath, "commitsFiles", "Ins_dels.txt")
mkdir(patchD)
mkdir(commitsFiles)
run_commands = "git format-patch --root -o patch --function-context --unified=9000".split()
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=utilsConf.to_short_path(utilsConf.get_configuration().LocalGitPath))
proc.communicate()
buildPatchs(patchD, commitsFiles, changedFile)
checkOut = os.path.join(commitsFiles, "CheckStyle.txt")
RunCheckStyle(commitsFiles, checkOut, utilsConf.get_configuration().checkStyle68, utilsConf.get_configuration().methodsNamesXML)
| OneClass | identifier_name |
patchsBuild.py | __author__ = 'amir'
import sys
import os
import glob
import re
import subprocess
import shutil
import sqlite3
import utilsConf
# git format-patch --root origin
def mkDirs(outDir,commitID):
o=outDir+"\\"+commitID
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\before"
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\after"
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\parser"
if not (os.path.isdir(o)):
os.mkdir(o)
def oneFileParser(methods,javaFile,inds,key):
if not ".java" in javaFile:
return
f=open(javaFile)
lines=f.readlines()
f.close()
if len([l for l in lines if l.lstrip()!=""])==0:
return
run_commands = ["java", "-jar", "C:\projs\checkstyle-6.8-SNAPSHOT-all.jar ", "-c", "C:\projs\methodNameLines.xml",
javaFile]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, shell=True,cwd=r'C:\projs')
(out, err) = proc.communicate()
out=out.replace("\n","").split("\r")[1:-3]
fileName=javaFile.split("\\")[-1]
fileName=fileName.replace("_","\\")
for o in out:
if o=="":
continue
if not "@" in o:
continue
file,data=o.split(" ")
name,begin,end=data.split("@")
methodDir=fileName+"$"+name
if not methodDir in methods:
methods[methodDir]={}
if not "methodName" in methods[methodDir]:
methods[methodDir]["methodName"]=name
if not "fileName" in methods[methodDir]:
methods[methodDir]["fileName"]=fileName
rng=range(int(begin),int(end)+1)
if methodDir not in methods:
methods[methodDir]={}
methods[methodDir][key]=len(list(set(rng) & set(inds)))
def FileToMethods(beforeFile,AfterFile,deletedInds,addedInds, outPath,commitID):
methods={}
oneFileParser(methods,beforeFile,deletedInds,"deleted")
oneFileParser(methods,AfterFile,addedInds,"inserted")
f=open(outPath,"w")
for methodDir in methods:
dels=0
ins=0
fileName=""
methodName=""
if "deleted" in methods[methodDir]:
dels=methods[methodDir]["deleted"]
if "inserted" in methods[methodDir]:
ins=methods[methodDir]["inserted"]
if "fileName" in methods[methodDir]:
fileName=methods[methodDir]["fileName"]
if "methodName" in methods[methodDir]:
methodName=methods[methodDir]["methodName"]
row=[commitID,methodDir,fileName,methodName,str(dels),str(ins),str(dels+ins)]
f.write(",".join(row))
f.close()
def fixEnum(l):
if "enum =" in l:
l=l.replace("enum =","enumAmir =")
if "enum=" in l:
l=l.replace("enum=","enumAmir=")
if "enum," in l:
l=l.replace("enum,","enumAmir,")
if "enum." in l:
l=l.replace("enum.","enumAmir.")
if "enum;" in l:
l=l.replace("enum;","enumAmir;")
if "enum)" in l:
l=l.replace("enum)","enumAmir)")
return l
def fixAssert(l):
if "assert " in l:
l=l.replace("assert ","assertAmir ")
if ":" in l:
l=l.replace(":",";//")
if "assert(" in l:
l=l.replace("assert(","assertAmir(")
if ":" in l:
l=l.replace(":",";//")
return l
def OneClass(diff_lines, outPath, commitID, change):
fileName = diff_lines[0].split()
if len(fileName)<3:
return []
fileName = diff_lines[0].split()[2]
fileName = fileName[2:]
fileName = os.path.normpath(fileName).replace(os.path.sep,"_")
if not ".java" in fileName:
return []
fileName = fileName.split('.java')[0] + '.java'
if len(diff_lines) > 3:
diff_lines = diff_lines[5:]
befLines=[]
afterLines=[]
deletedInds=[]
addedInds=[]
delind=0
addind=0
for l in diff_lines:
if "\ No newline at end of file" in l:
continue
if "1.9.4.msysgit.2" in l:
continue
if "- \n"== l:
continue
if "-- \n"== l:
continue
l=fixEnum(l)
l=fixAssert(l)
replaced=re.sub('@@(-|\+|,| |[0-9])*@@','',l)
if replaced.startswith("*"):
replaced="\\"+replaced
if replaced.startswith("+"):
afterLines.append(replaced[1:])
addedInds.append(addind)
addind=addind+1
elif replaced.startswith("-"):
befLines.append(replaced[1:])
deletedInds.append(delind)
delind=delind+1
else:
afterLines.append(replaced)
befLines.append(replaced)
delind=delind+1
addind=addind+1
with open(os.path.join(outPath, "before", fileName), "wb") as bef:
bef.writelines(befLines)
with open(os.path.join(outPath, "after", fileName), "wb") as after:
after.writelines(afterLines)
with open(os.path.join(outPath, fileName + "_deletsIns.txt"), "wb") as f:
f.writelines(["deleted\n", str(deletedInds)+"\n","added\n", str(addedInds)])
change.write(fileName+"@"+str(commitID)+"@"+str(deletedInds)+"@"+str(addedInds)+"\n")
def oneFile(PatchFile, outDir,change):
with open(PatchFile,'r') as f:
lines=f.readlines()
if len(lines)==0:
return []
commitSha = lines[0].split()[1] # line 0 word 1
commitID = str(commitSha)
mkDirs(outDir, commitID)
inds=[lines.index(l) for l in lines if "diff --git" in l]+[len(lines)] #lines that start with diff --git
shutil.copyfile(PatchFile, os.path.join(outDir, commitID, os.path.basename(PatchFile)))
for i in range(len(inds)-1):
diff_lines = lines[inds[i]:inds[i+1]]
if len(diff_lines) == 0:
continue
OneClass(diff_lines, os.path.join(outDir, commitID),commitID,change)
def debugPatchs(Path,outFile):
lst= glob.glob(Path+"/*.patch")
i=0
allComms=[]
ou=open(outFile,"wt")
for doc in lst:
i=i+1
f=open(doc,'r')
lines=f.readlines()[:9]
ou.writelines(lines)
ou.close()
def buildPatchs(Path,outDir,changedFile):
mkdir(outDir)
with open(changedFile,"wb") as change:
for doc in glob.glob(os.path.join(Path,"/*.patch")):
oneFile(doc, outDir, change)
def mkdir(d):
if not os.path.isdir(d):
os.mkdir(d)
def DbAdd(dbPath,allComms):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
c.execute('''CREATE TABLE commitedMethods (commitID INT, methodDir text, fileName text, methodName text, deletions INT , insertions INT , lines INT )''')
for com in allComms:
c.execute("INSERT INTO commitedMethods VALUES (?,?,?,?,?,?,?)",com)
conn.commit()
conn.close()
def RunCheckStyle(workingDir, outPath, checkStyle68, methodNameLines):
run_commands = ["java" ,"-jar" ,checkStyle68 ,"-c" ,methodNameLines ,"javaFile" ,"-o",outPath,workingDir]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def detectFromConf(lines,lineInd):
deleted = (lines[lineInd])
deleted = deleted.replace("[","").replace("]","").replace("\n","")
deleted = deleted.split(",")
return [x.lstrip() for x in deleted]
def readDataFile(Dfile):
f=open(Dfile,"r")
lines=f.readlines()
f.close()
deleted=detectFromConf(lines,1)
insertions=detectFromConf(lines,3)
return deleted,insertions
def checkStyleCreateDict(checkOut, changesDict):
methods = {}
lines = []
with open(checkOut, "r") as f:
lines = f.readlines()[1:-3]
for line in lines:
if line == "":
continue
if not "@" in line:
continue
if not len(line.split(" ")) == 2:
# case of error
continue
file, data = line.split(" ")
file = file.split(".java")[0]+".java"
fileNameSplited = file.split(os.path.sep)
fileName = fileNameSplited[-1].replace("_", os.path.sep)
commitID = fileNameSplited[fileNameSplited.index("commitsFiles") + 1]
if not (fileName, commitID) in changesDict.keys():
continue
key = ""
inds = []
deleted, insertions = changesDict[(fileName, commitID)]
if "before" in file:
key = "deletions"
inds = deleted
if "after" in file:
key = "insertions"
inds = insertions
name, begin, end = data.split("@")
rng = map(str, range(int(begin)-1, int(end)))
both = filter(lambda x: x in rng, map(str, inds))
keyChange = len(both)
if keyChange == 0:
continue
methodDir = fileName + "$" + name
tup = (methodDir, commitID)
if not tup in methods:
methods[tup] = {}
methods[tup][key] = keyChange
if not "methodName" in methods[tup]:
methods[tup]["methodName"] = name
if not "fileName" in methods[tup]:
methods[tup]["fileName"] = fileName
if not "commitID" in methods[tup]:
methods[tup]["commitID"] = commitID
return methods
def readChangesFile(change):
dict = {}
rows = []
with open(change, "r") as f:
for line in f:
fileName, commitSha, dels, Ins = line.strip().split("@") | dict[(fileName, commitSha)] = [eval(dels), eval(Ins)]
rows.append(map(str, [fileName,commitSha, len(dels), len(Ins), len(dels)+len(Ins)]))
return dict, rows
def analyzeCheckStyle(checkOut, changeFile):
changesDict, filesRows = readChangesFile(changeFile)
methods = checkStyleCreateDict(checkOut, changesDict)
all_methods = []
for tup in methods:
methodDir = tup[0]
dels = methods[tup].setdefault("deletions", 0)
ins = methods[tup].setdefault("insertions", 0)
fileName = methods[tup].setdefault("fileName", "")
methodName = methods[tup].setdefault("methodName", "")
commitID = methods[tup].setdefault("commitID", "")
all_methods.append(map(str, [commitID, methodDir, fileName, methodName, dels, ins, dels+ins]))
return all_methods, filesRows
# @utilsConf.marker_decorator(utilsConf.PATCHS_FEATURES_MARKER)
def do_all():
patchD = os.path.join(utilsConf.get_configuration().LocalGitPath, "patch")
commitsFiles = os.path.join(utilsConf.get_configuration().LocalGitPath, "commitsFiles")
changedFile = os.path.join(utilsConf.get_configuration().LocalGitPath, "commitsFiles", "Ins_dels.txt")
mkdir(patchD)
mkdir(commitsFiles)
run_commands = "git format-patch --root -o patch --function-context --unified=9000".split()
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=utilsConf.to_short_path(utilsConf.get_configuration().LocalGitPath))
proc.communicate()
buildPatchs(patchD, commitsFiles, changedFile)
checkOut = os.path.join(commitsFiles, "CheckStyle.txt")
RunCheckStyle(commitsFiles, checkOut, utilsConf.get_configuration().checkStyle68, utilsConf.get_configuration().methodsNamesXML) | fileName = fileName.replace("_", os.path.sep) | random_line_split |
patchsBuild.py | __author__ = 'amir'
import sys
import os
import glob
import re
import subprocess
import shutil
import sqlite3
import utilsConf
# git format-patch --root origin
def mkDirs(outDir,commitID):
o=outDir+"\\"+commitID
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\before"
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\after"
if not (os.path.isdir(o)):
os.mkdir(o)
o=outDir+"\\"+commitID+"\\parser"
if not (os.path.isdir(o)):
os.mkdir(o)
def oneFileParser(methods,javaFile,inds,key):
if not ".java" in javaFile:
return
f=open(javaFile)
lines=f.readlines()
f.close()
if len([l for l in lines if l.lstrip()!=""])==0:
return
run_commands = ["java", "-jar", "C:\projs\checkstyle-6.8-SNAPSHOT-all.jar ", "-c", "C:\projs\methodNameLines.xml",
javaFile]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, shell=True,cwd=r'C:\projs')
(out, err) = proc.communicate()
out=out.replace("\n","").split("\r")[1:-3]
fileName=javaFile.split("\\")[-1]
fileName=fileName.replace("_","\\")
for o in out:
if o=="":
continue
if not "@" in o:
continue
file,data=o.split(" ")
name,begin,end=data.split("@")
methodDir=fileName+"$"+name
if not methodDir in methods:
methods[methodDir]={}
if not "methodName" in methods[methodDir]:
methods[methodDir]["methodName"]=name
if not "fileName" in methods[methodDir]:
methods[methodDir]["fileName"]=fileName
rng=range(int(begin),int(end)+1)
if methodDir not in methods:
methods[methodDir]={}
methods[methodDir][key]=len(list(set(rng) & set(inds)))
def FileToMethods(beforeFile,AfterFile,deletedInds,addedInds, outPath,commitID):
methods={}
oneFileParser(methods,beforeFile,deletedInds,"deleted")
oneFileParser(methods,AfterFile,addedInds,"inserted")
f=open(outPath,"w")
for methodDir in methods:
dels=0
ins=0
fileName=""
methodName=""
if "deleted" in methods[methodDir]:
dels=methods[methodDir]["deleted"]
if "inserted" in methods[methodDir]:
ins=methods[methodDir]["inserted"]
if "fileName" in methods[methodDir]:
fileName=methods[methodDir]["fileName"]
if "methodName" in methods[methodDir]:
methodName=methods[methodDir]["methodName"]
row=[commitID,methodDir,fileName,methodName,str(dels),str(ins),str(dels+ins)]
f.write(",".join(row))
f.close()
def fixEnum(l):
if "enum =" in l:
l=l.replace("enum =","enumAmir =")
if "enum=" in l:
l=l.replace("enum=","enumAmir=")
if "enum," in l:
l=l.replace("enum,","enumAmir,")
if "enum." in l:
l=l.replace("enum.","enumAmir.")
if "enum;" in l:
l=l.replace("enum;","enumAmir;")
if "enum)" in l:
l=l.replace("enum)","enumAmir)")
return l
def fixAssert(l):
if "assert " in l:
l=l.replace("assert ","assertAmir ")
if ":" in l:
l=l.replace(":",";//")
if "assert(" in l:
l=l.replace("assert(","assertAmir(")
if ":" in l:
l=l.replace(":",";//")
return l
def OneClass(diff_lines, outPath, commitID, change):
fileName = diff_lines[0].split()
if len(fileName)<3:
return []
fileName = diff_lines[0].split()[2]
fileName = fileName[2:]
fileName = os.path.normpath(fileName).replace(os.path.sep,"_")
if not ".java" in fileName:
return []
fileName = fileName.split('.java')[0] + '.java'
if len(diff_lines) > 3:
diff_lines = diff_lines[5:]
befLines=[]
afterLines=[]
deletedInds=[]
addedInds=[]
delind=0
addind=0
for l in diff_lines:
if "\ No newline at end of file" in l:
continue
if "1.9.4.msysgit.2" in l:
continue
if "- \n"== l:
continue
if "-- \n"== l:
continue
l=fixEnum(l)
l=fixAssert(l)
replaced=re.sub('@@(-|\+|,| |[0-9])*@@','',l)
if replaced.startswith("*"):
replaced="\\"+replaced
if replaced.startswith("+"):
afterLines.append(replaced[1:])
addedInds.append(addind)
addind=addind+1
elif replaced.startswith("-"):
befLines.append(replaced[1:])
deletedInds.append(delind)
delind=delind+1
else:
afterLines.append(replaced)
befLines.append(replaced)
delind=delind+1
addind=addind+1
with open(os.path.join(outPath, "before", fileName), "wb") as bef:
bef.writelines(befLines)
with open(os.path.join(outPath, "after", fileName), "wb") as after:
after.writelines(afterLines)
with open(os.path.join(outPath, fileName + "_deletsIns.txt"), "wb") as f:
f.writelines(["deleted\n", str(deletedInds)+"\n","added\n", str(addedInds)])
change.write(fileName+"@"+str(commitID)+"@"+str(deletedInds)+"@"+str(addedInds)+"\n")
def oneFile(PatchFile, outDir,change):
with open(PatchFile,'r') as f:
lines=f.readlines()
if len(lines)==0:
return []
commitSha = lines[0].split()[1] # line 0 word 1
commitID = str(commitSha)
mkDirs(outDir, commitID)
inds=[lines.index(l) for l in lines if "diff --git" in l]+[len(lines)] #lines that start with diff --git
shutil.copyfile(PatchFile, os.path.join(outDir, commitID, os.path.basename(PatchFile)))
for i in range(len(inds)-1):
diff_lines = lines[inds[i]:inds[i+1]]
if len(diff_lines) == 0:
continue
OneClass(diff_lines, os.path.join(outDir, commitID),commitID,change)
def debugPatchs(Path,outFile):
lst= glob.glob(Path+"/*.patch")
i=0
allComms=[]
ou=open(outFile,"wt")
for doc in lst:
i=i+1
f=open(doc,'r')
lines=f.readlines()[:9]
ou.writelines(lines)
ou.close()
def buildPatchs(Path,outDir,changedFile):
mkdir(outDir)
with open(changedFile,"wb") as change:
for doc in glob.glob(os.path.join(Path,"/*.patch")):
oneFile(doc, outDir, change)
def mkdir(d):
if not os.path.isdir(d):
os.mkdir(d)
def DbAdd(dbPath,allComms):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
c.execute('''CREATE TABLE commitedMethods (commitID INT, methodDir text, fileName text, methodName text, deletions INT , insertions INT , lines INT )''')
for com in allComms:
c.execute("INSERT INTO commitedMethods VALUES (?,?,?,?,?,?,?)",com)
conn.commit()
conn.close()
def RunCheckStyle(workingDir, outPath, checkStyle68, methodNameLines):
run_commands = ["java" ,"-jar" ,checkStyle68 ,"-c" ,methodNameLines ,"javaFile" ,"-o",outPath,workingDir]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def detectFromConf(lines,lineInd):
deleted = (lines[lineInd])
deleted = deleted.replace("[","").replace("]","").replace("\n","")
deleted = deleted.split(",")
return [x.lstrip() for x in deleted]
def readDataFile(Dfile):
f=open(Dfile,"r")
lines=f.readlines()
f.close()
deleted=detectFromConf(lines,1)
insertions=detectFromConf(lines,3)
return deleted,insertions
def checkStyleCreateDict(checkOut, changesDict):
|
def readChangesFile(change):
dict = {}
rows = []
with open(change, "r") as f:
for line in f:
fileName, commitSha, dels, Ins = line.strip().split("@")
fileName = fileName.replace("_", os.path.sep)
dict[(fileName, commitSha)] = [eval(dels), eval(Ins)]
rows.append(map(str, [fileName,commitSha, len(dels), len(Ins), len(dels)+len(Ins)]))
return dict, rows
def analyzeCheckStyle(checkOut, changeFile):
changesDict, filesRows = readChangesFile(changeFile)
methods = checkStyleCreateDict(checkOut, changesDict)
all_methods = []
for tup in methods:
methodDir = tup[0]
dels = methods[tup].setdefault("deletions", 0)
ins = methods[tup].setdefault("insertions", 0)
fileName = methods[tup].setdefault("fileName", "")
methodName = methods[tup].setdefault("methodName", "")
commitID = methods[tup].setdefault("commitID", "")
all_methods.append(map(str, [commitID, methodDir, fileName, methodName, dels, ins, dels+ins]))
return all_methods, filesRows
# @utilsConf.marker_decorator(utilsConf.PATCHS_FEATURES_MARKER)
def do_all():
patchD = os.path.join(utilsConf.get_configuration().LocalGitPath, "patch")
commitsFiles = os.path.join(utilsConf.get_configuration().LocalGitPath, "commitsFiles")
changedFile = os.path.join(utilsConf.get_configuration().LocalGitPath, "commitsFiles", "Ins_dels.txt")
mkdir(patchD)
mkdir(commitsFiles)
run_commands = "git format-patch --root -o patch --function-context --unified=9000".split()
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=utilsConf.to_short_path(utilsConf.get_configuration().LocalGitPath))
proc.communicate()
buildPatchs(patchD, commitsFiles, changedFile)
checkOut = os.path.join(commitsFiles, "CheckStyle.txt")
RunCheckStyle(commitsFiles, checkOut, utilsConf.get_configuration().checkStyle68, utilsConf.get_configuration().methodsNamesXML)
| methods = {}
lines = []
with open(checkOut, "r") as f:
lines = f.readlines()[1:-3]
for line in lines:
if line == "":
continue
if not "@" in line:
continue
if not len(line.split(" ")) == 2:
# case of error
continue
file, data = line.split(" ")
file = file.split(".java")[0]+".java"
fileNameSplited = file.split(os.path.sep)
fileName = fileNameSplited[-1].replace("_", os.path.sep)
commitID = fileNameSplited[fileNameSplited.index("commitsFiles") + 1]
if not (fileName, commitID) in changesDict.keys():
continue
key = ""
inds = []
deleted, insertions = changesDict[(fileName, commitID)]
if "before" in file:
key = "deletions"
inds = deleted
if "after" in file:
key = "insertions"
inds = insertions
name, begin, end = data.split("@")
rng = map(str, range(int(begin)-1, int(end)))
both = filter(lambda x: x in rng, map(str, inds))
keyChange = len(both)
if keyChange == 0:
continue
methodDir = fileName + "$" + name
tup = (methodDir, commitID)
if not tup in methods:
methods[tup] = {}
methods[tup][key] = keyChange
if not "methodName" in methods[tup]:
methods[tup]["methodName"] = name
if not "fileName" in methods[tup]:
methods[tup]["fileName"] = fileName
if not "commitID" in methods[tup]:
methods[tup]["commitID"] = commitID
return methods | identifier_body |
sg_start.go | package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"github.com/cockroachdb/errors"
"github.com/peterbourgon/ff/v3/ffcli"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/run"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/stdout"
"github.com/sourcegraph/sourcegraph/dev/sg/root"
"github.com/sourcegraph/sourcegraph/lib/output"
)
var (
startFlagSet = flag.NewFlagSet("sg start", flag.ExitOnError)
debugStartServices = startFlagSet.String("debug", "", "Comma separated list of services to set at debug log level.")
infoStartServices = startFlagSet.String("info", "", "Comma separated list of services to set at info log level.")
warnStartServices = startFlagSet.String("warn", "", "Comma separated list of services to set at warn log level.")
errorStartServices = startFlagSet.String("error", "", "Comma separated list of services to set at error log level.")
critStartServices = startFlagSet.String("crit", "", "Comma separated list of services to set at crit log level.")
startCommand = &ffcli.Command{
Name: "start",
ShortUsage: "sg start [commandset]",
ShortHelp: "🌟Starts the given commandset. Without a commandset it starts the default Sourcegraph dev environment.",
LongHelp: constructStartCmdLongHelp(),
FlagSet: startFlagSet,
Exec: startExec,
}
// run-set is the deprecated older version of `start`
runSetFlagSet = flag.NewFlagSet("sg run-set", flag.ExitOnError)
runSetCommand = &ffcli.Command{
Name: "run-set",
ShortUsage: "sg run-set <commandset>",
ShortHelp: "DEPRECATED. Use 'sg start' instead. Run the given commandset.",
FlagSet: runSetFlagSet,
Exec: runSetExec,
}
)
func constructStartCmdLongHelp() string {
var out strings.Builder
fmt.Fprintf(&out, `Runs the given commandset.
If no commandset is specified, it starts the commandset with the name 'default'.
Use this to start your Sourcegraph environment!
`)
// Attempt to parse config to list available commands, but don't fail on
// error, because we should never error when the user wants --help output.
_, _ = parseConf(*configFlag, *overwriteConfigFlag)
if globalConf != nil {
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "AVAILABLE COMMANDSETS IN %s%s%s\n", output.StyleBold, *configFlag, output.StyleReset)
var names []string
for name := range globalConf.Commandsets {
switch name {
case "enterprise-codeintel":
names = append(names, fmt.Sprintf(" %s 🧠", name))
case "batches":
names = append(names, fmt.Sprintf(" %s 🦡", name))
default:
names = append(names, fmt.Sprintf(" %s", name))
}
}
sort.Strings(names)
fmt.Fprint(&out, strings.Join(names, "\n"))
}
return out.String()
}
func startExec(ctx context.Context, args []string) error {
ok, errLine := parseConf(*configFlag, *overwriteConfigFlag)
if !ok {
out.WriteLine(errLine)
os.Exit(1)
}
if len(args) > 2 {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: too many arguments"))
return flag.ErrHelp
}
if len(args) != 1 {
if globalConf.DefaultCommandset != "" {
args = append(args, globalConf.DefaultCommandset)
} else {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: No commandset specified and no 'defaultCommandset' specified in sg.config.yaml\n"))
return flag.ErrHelp
}
}
set, ok := globalConf.Commandsets[args[0]]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: commandset %q not found :(", args[0]))
return flag.ErrHelp
}
// If the commandset requires the dev-private repository to be cloned, we
// check that it's at the right location here.
if set.RequiresDevPrivate {
repoRoot, err := root.RepositoryRoot()
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to determine repository root location: %s", err))
os.Exit(1)
}
devPrivatePath := filepath.Join(repoRoot, "..", "dev-private")
exists, err := pathExists(devPrivatePath)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to check whether dev-private repository exists: %s", err))
os.Exit(1)
}
if !exists {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: dev-private repository not found!"))
out.WriteLine(output.Linef("", output.StyleWarning, "It's expected to exist at: %s", devPrivatePath))
out.WriteLine(output.Line("", output.StyleWarning, "If you're not a Sourcegraph employee you probably want to run: sg start oss"))
out.WriteLine(output.Line("", output.StyleWarning, "If you're a Sourcegraph employee, see the documentation for how to clone it: https://docs.sourcegraph.com/dev/getting-started/quickstart_2_clone_repository"))
out.Write("")
overwritePath := filepath.Join(repoRoot, "sg.config.overwrite.yaml")
out.WriteLine(output.Linef("", output.StylePending, "If you know what you're doing and want disable the check, add the following to %s:", overwritePath))
out.Write("")
out.Write(fmt.Sprintf(` commandsets:
%s:
requiresDevPrivate: false
`, set.Name))
out.Write("")
os.Exit(1)
}
}
var checks []run.Check
for _, name := range set.Checks {
check, ok := globalConf.Checks[name]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: check %s not found in config", name))
continue
}
checks = append(checks, check)
}
ok, err := run.Checks(ctx, globalConf.Env, checks...)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks could not be run: %s", err))
}
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks did not pass, aborting start of commandset %s", set.Name))
return nil
}
cmds := make([]run.Command, 0, len(set.Commands))
for _, name := range set.Commands {
cmd, ok := globalConf.Commands[name]
if !ok {
return errors.Errorf("command %q not found in commandset %q", name, args[0])
}
cmds = append(cmds, cmd)
}
if len(cmds) == 0 {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: no commands to run"))
}
levelOverrides := logLevelOverrides()
for _, cmd := range cmds {
enrichWithLogLevels(&cmd, levelOverrides)
}
env := globalConf.Env
for k, v := range set.Env {
env[k] = v
}
return run.Commands(ctx, env, *verboseFlag, cmds...)
}
// logLevelOverrides builds a map of commands -> log level that should be overridden in the environment.
func logLevelOverrides() map[string]string {
levelServices := make(map[string][]string)
levelServices["debug"] = parseCsv(*debugStartServices)
levelServices["info"] = parseCsv(*infoStartServices)
levelServices["warn"] = parseCsv(*warnStartServices)
levelServices["error"] = parseCsv(*errorStartServices)
levelServices["crit"] = parseCsv(*critStartServices)
overrides := make(map[string]string)
for level, services := range levelServices {
for _, service := range services {
overrides[service] = level
}
}
return overrides
}
// enrichWithLogLevels will add any logger level overrides to a given command if they have been specified.
func enrichWithLogLevels(cmd *run.Command, overrides map[string]string) {
logLevelVariable := "SRC_LOG_LEVEL"
if level, ok := overrides[cmd.Name]; ok {
out.WriteLine(output.Linef("", output.StylePending, "Setting log level: %s for command %s.", level, cmd.Name))
if cmd.Env == nil {
cmd.Env = make(map[string]string, 1)
cmd.Env[logLevelVariable] = level
}
cmd.Env[logLevelVariable] = level
}
}
// parseCsv takes an input comma seperated string and returns a list of tokens each trimmed for whitespace
func parseCsv(input string) []string {
tokens := strings.Split(input, ",")
results := make([]string, 0, len(tokens))
for _, token := range tokens {
results = append(results, strings.TrimSpace(token))
}
return results
}
var deprecationStyle = output.CombineStyles(output.Fg256Color(255), output.Bg256Color(124))
func runSetExec(ctx context.Context, args []string) error {
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _______________________________________________________________________ "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "/ `sg run-set` is deprecated - use `sg start` instead! \\"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! Run `sg start -help` for usage information. !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "\\_______________________________________________________________________/"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " L_ ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / _)! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / /__L "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____/ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\_(____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\__/ "))
return startExec(ctx, args)
}
func pathExists(path string) (bool, error) {
_, err | := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
| identifier_body | |
sg_start.go | package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"github.com/cockroachdb/errors"
"github.com/peterbourgon/ff/v3/ffcli"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/run"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/stdout"
"github.com/sourcegraph/sourcegraph/dev/sg/root"
"github.com/sourcegraph/sourcegraph/lib/output"
)
var (
startFlagSet = flag.NewFlagSet("sg start", flag.ExitOnError)
debugStartServices = startFlagSet.String("debug", "", "Comma separated list of services to set at debug log level.")
infoStartServices = startFlagSet.String("info", "", "Comma separated list of services to set at info log level.")
warnStartServices = startFlagSet.String("warn", "", "Comma separated list of services to set at warn log level.")
errorStartServices = startFlagSet.String("error", "", "Comma separated list of services to set at error log level.")
critStartServices = startFlagSet.String("crit", "", "Comma separated list of services to set at crit log level.")
startCommand = &ffcli.Command{
Name: "start",
ShortUsage: "sg start [commandset]",
ShortHelp: "🌟Starts the given commandset. Without a commandset it starts the default Sourcegraph dev environment.",
LongHelp: constructStartCmdLongHelp(),
FlagSet: startFlagSet,
Exec: startExec,
}
// run-set is the deprecated older version of `start`
runSetFlagSet = flag.NewFlagSet("sg run-set", flag.ExitOnError)
runSetCommand = &ffcli.Command{
Name: "run-set",
ShortUsage: "sg run-set <commandset>",
ShortHelp: "DEPRECATED. Use 'sg start' instead. Run the given commandset.",
FlagSet: runSetFlagSet,
Exec: runSetExec,
}
)
func con | string {
var out strings.Builder
fmt.Fprintf(&out, `Runs the given commandset.
If no commandset is specified, it starts the commandset with the name 'default'.
Use this to start your Sourcegraph environment!
`)
// Attempt to parse config to list available commands, but don't fail on
// error, because we should never error when the user wants --help output.
_, _ = parseConf(*configFlag, *overwriteConfigFlag)
if globalConf != nil {
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "AVAILABLE COMMANDSETS IN %s%s%s\n", output.StyleBold, *configFlag, output.StyleReset)
var names []string
for name := range globalConf.Commandsets {
switch name {
case "enterprise-codeintel":
names = append(names, fmt.Sprintf(" %s 🧠", name))
case "batches":
names = append(names, fmt.Sprintf(" %s 🦡", name))
default:
names = append(names, fmt.Sprintf(" %s", name))
}
}
sort.Strings(names)
fmt.Fprint(&out, strings.Join(names, "\n"))
}
return out.String()
}
func startExec(ctx context.Context, args []string) error {
ok, errLine := parseConf(*configFlag, *overwriteConfigFlag)
if !ok {
out.WriteLine(errLine)
os.Exit(1)
}
if len(args) > 2 {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: too many arguments"))
return flag.ErrHelp
}
if len(args) != 1 {
if globalConf.DefaultCommandset != "" {
args = append(args, globalConf.DefaultCommandset)
} else {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: No commandset specified and no 'defaultCommandset' specified in sg.config.yaml\n"))
return flag.ErrHelp
}
}
set, ok := globalConf.Commandsets[args[0]]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: commandset %q not found :(", args[0]))
return flag.ErrHelp
}
// If the commandset requires the dev-private repository to be cloned, we
// check that it's at the right location here.
if set.RequiresDevPrivate {
repoRoot, err := root.RepositoryRoot()
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to determine repository root location: %s", err))
os.Exit(1)
}
devPrivatePath := filepath.Join(repoRoot, "..", "dev-private")
exists, err := pathExists(devPrivatePath)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to check whether dev-private repository exists: %s", err))
os.Exit(1)
}
if !exists {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: dev-private repository not found!"))
out.WriteLine(output.Linef("", output.StyleWarning, "It's expected to exist at: %s", devPrivatePath))
out.WriteLine(output.Line("", output.StyleWarning, "If you're not a Sourcegraph employee you probably want to run: sg start oss"))
out.WriteLine(output.Line("", output.StyleWarning, "If you're a Sourcegraph employee, see the documentation for how to clone it: https://docs.sourcegraph.com/dev/getting-started/quickstart_2_clone_repository"))
out.Write("")
overwritePath := filepath.Join(repoRoot, "sg.config.overwrite.yaml")
out.WriteLine(output.Linef("", output.StylePending, "If you know what you're doing and want disable the check, add the following to %s:", overwritePath))
out.Write("")
out.Write(fmt.Sprintf(` commandsets:
%s:
requiresDevPrivate: false
`, set.Name))
out.Write("")
os.Exit(1)
}
}
var checks []run.Check
for _, name := range set.Checks {
check, ok := globalConf.Checks[name]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: check %s not found in config", name))
continue
}
checks = append(checks, check)
}
ok, err := run.Checks(ctx, globalConf.Env, checks...)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks could not be run: %s", err))
}
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks did not pass, aborting start of commandset %s", set.Name))
return nil
}
cmds := make([]run.Command, 0, len(set.Commands))
for _, name := range set.Commands {
cmd, ok := globalConf.Commands[name]
if !ok {
return errors.Errorf("command %q not found in commandset %q", name, args[0])
}
cmds = append(cmds, cmd)
}
if len(cmds) == 0 {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: no commands to run"))
}
levelOverrides := logLevelOverrides()
for _, cmd := range cmds {
enrichWithLogLevels(&cmd, levelOverrides)
}
env := globalConf.Env
for k, v := range set.Env {
env[k] = v
}
return run.Commands(ctx, env, *verboseFlag, cmds...)
}
// logLevelOverrides builds a map of commands -> log level that should be overridden in the environment.
func logLevelOverrides() map[string]string {
levelServices := make(map[string][]string)
levelServices["debug"] = parseCsv(*debugStartServices)
levelServices["info"] = parseCsv(*infoStartServices)
levelServices["warn"] = parseCsv(*warnStartServices)
levelServices["error"] = parseCsv(*errorStartServices)
levelServices["crit"] = parseCsv(*critStartServices)
overrides := make(map[string]string)
for level, services := range levelServices {
for _, service := range services {
overrides[service] = level
}
}
return overrides
}
// enrichWithLogLevels will add any logger level overrides to a given command if they have been specified.
func enrichWithLogLevels(cmd *run.Command, overrides map[string]string) {
logLevelVariable := "SRC_LOG_LEVEL"
if level, ok := overrides[cmd.Name]; ok {
out.WriteLine(output.Linef("", output.StylePending, "Setting log level: %s for command %s.", level, cmd.Name))
if cmd.Env == nil {
cmd.Env = make(map[string]string, 1)
cmd.Env[logLevelVariable] = level
}
cmd.Env[logLevelVariable] = level
}
}
// parseCsv takes an input comma seperated string and returns a list of tokens each trimmed for whitespace
func parseCsv(input string) []string {
tokens := strings.Split(input, ",")
results := make([]string, 0, len(tokens))
for _, token := range tokens {
results = append(results, strings.TrimSpace(token))
}
return results
}
var deprecationStyle = output.CombineStyles(output.Fg256Color(255), output.Bg256Color(124))
func runSetExec(ctx context.Context, args []string) error {
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _______________________________________________________________________ "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "/ `sg run-set` is deprecated - use `sg start` instead! \\"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! Run `sg start -help` for usage information. !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "\\_______________________________________________________________________/"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " L_ ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / _)! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / /__L "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____/ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\_(____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\__/ "))
return startExec(ctx, args)
}
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
| structStartCmdLongHelp() | identifier_name |
sg_start.go | package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"github.com/cockroachdb/errors"
"github.com/peterbourgon/ff/v3/ffcli"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/run"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/stdout"
"github.com/sourcegraph/sourcegraph/dev/sg/root"
"github.com/sourcegraph/sourcegraph/lib/output"
)
var (
startFlagSet = flag.NewFlagSet("sg start", flag.ExitOnError)
debugStartServices = startFlagSet.String("debug", "", "Comma separated list of services to set at debug log level.")
infoStartServices = startFlagSet.String("info", "", "Comma separated list of services to set at info log level.")
warnStartServices = startFlagSet.String("warn", "", "Comma separated list of services to set at warn log level.")
errorStartServices = startFlagSet.String("error", "", "Comma separated list of services to set at error log level.")
critStartServices = startFlagSet.String("crit", "", "Comma separated list of services to set at crit log level.")
startCommand = &ffcli.Command{
Name: "start",
ShortUsage: "sg start [commandset]",
ShortHelp: "🌟Starts the given commandset. Without a commandset it starts the default Sourcegraph dev environment.",
LongHelp: constructStartCmdLongHelp(),
FlagSet: startFlagSet,
Exec: startExec,
}
// run-set is the deprecated older version of `start`
runSetFlagSet = flag.NewFlagSet("sg run-set", flag.ExitOnError)
runSetCommand = &ffcli.Command{
Name: "run-set",
ShortUsage: "sg run-set <commandset>",
ShortHelp: "DEPRECATED. Use 'sg start' instead. Run the given commandset.",
FlagSet: runSetFlagSet,
Exec: runSetExec,
}
)
func constructStartCmdLongHelp() string {
var out strings.Builder
fmt.Fprintf(&out, `Runs the given commandset.
If no commandset is specified, it starts the commandset with the name 'default'.
Use this to start your Sourcegraph environment!
`)
// Attempt to parse config to list available commands, but don't fail on
// error, because we should never error when the user wants --help output.
_, _ = parseConf(*configFlag, *overwriteConfigFlag)
if globalConf != nil {
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "AVAILABLE COMMANDSETS IN %s%s%s\n", output.StyleBold, *configFlag, output.StyleReset)
var names []string
for name := range globalConf.Commandsets {
switch name {
case "enterprise-codeintel":
names = append(names, fmt.Sprintf(" %s 🧠", name))
case "batches":
names = append(names, fmt.Sprintf(" %s 🦡", name))
default:
names = append(names, fmt.Sprintf(" %s", name))
}
}
sort.Strings(names)
fmt.Fprint(&out, strings.Join(names, "\n"))
}
return out.String()
}
func startExec(ctx context.Context, args []string) error {
ok, errLine := parseConf(*configFlag, *overwriteConfigFlag)
if !ok {
out.WriteLine(errLine)
os.Exit(1)
}
if len(args) > 2 {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: too many arguments"))
return flag.ErrHelp
}
if len(args) != 1 {
if globalConf.DefaultCommandset != "" {
args = append(args, globalConf.DefaultCommandset)
} else {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: No commandset specified and no 'defaultCommandset' specified in sg.config.yaml\n"))
return flag.ErrHelp
}
}
set, ok := globalConf.Commandsets[args[0]]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: commandset %q not found :(", args[0]))
return flag.ErrHelp
}
// If the commandset requires the dev-private repository to be cloned, we
// check that it's at the right location here.
if set.RequiresDevPrivate {
repoRoot, err := root.RepositoryRoot()
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to determine repository root location: %s", err))
os.Exit(1)
}
devPrivatePath := filepath.Join(repoRoot, "..", "dev-private")
exists, err := pathExists(devPrivatePath)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to check whether dev-private repository exists: %s", err))
os.Exit(1)
}
if !exists {
out. | checks []run.Check
for _, name := range set.Checks {
check, ok := globalConf.Checks[name]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: check %s not found in config", name))
continue
}
checks = append(checks, check)
}
ok, err := run.Checks(ctx, globalConf.Env, checks...)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks could not be run: %s", err))
}
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks did not pass, aborting start of commandset %s", set.Name))
return nil
}
cmds := make([]run.Command, 0, len(set.Commands))
for _, name := range set.Commands {
cmd, ok := globalConf.Commands[name]
if !ok {
return errors.Errorf("command %q not found in commandset %q", name, args[0])
}
cmds = append(cmds, cmd)
}
if len(cmds) == 0 {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: no commands to run"))
}
levelOverrides := logLevelOverrides()
for _, cmd := range cmds {
enrichWithLogLevels(&cmd, levelOverrides)
}
env := globalConf.Env
for k, v := range set.Env {
env[k] = v
}
return run.Commands(ctx, env, *verboseFlag, cmds...)
}
// logLevelOverrides builds a map of commands -> log level that should be overridden in the environment.
func logLevelOverrides() map[string]string {
levelServices := make(map[string][]string)
levelServices["debug"] = parseCsv(*debugStartServices)
levelServices["info"] = parseCsv(*infoStartServices)
levelServices["warn"] = parseCsv(*warnStartServices)
levelServices["error"] = parseCsv(*errorStartServices)
levelServices["crit"] = parseCsv(*critStartServices)
overrides := make(map[string]string)
for level, services := range levelServices {
for _, service := range services {
overrides[service] = level
}
}
return overrides
}
// enrichWithLogLevels will add any logger level overrides to a given command if they have been specified.
func enrichWithLogLevels(cmd *run.Command, overrides map[string]string) {
logLevelVariable := "SRC_LOG_LEVEL"
if level, ok := overrides[cmd.Name]; ok {
out.WriteLine(output.Linef("", output.StylePending, "Setting log level: %s for command %s.", level, cmd.Name))
if cmd.Env == nil {
cmd.Env = make(map[string]string, 1)
cmd.Env[logLevelVariable] = level
}
cmd.Env[logLevelVariable] = level
}
}
// parseCsv takes an input comma seperated string and returns a list of tokens each trimmed for whitespace
func parseCsv(input string) []string {
tokens := strings.Split(input, ",")
results := make([]string, 0, len(tokens))
for _, token := range tokens {
results = append(results, strings.TrimSpace(token))
}
return results
}
var deprecationStyle = output.CombineStyles(output.Fg256Color(255), output.Bg256Color(124))
func runSetExec(ctx context.Context, args []string) error {
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _______________________________________________________________________ "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "/ `sg run-set` is deprecated - use `sg start` instead! \\"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! Run `sg start -help` for usage information. !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "\\_______________________________________________________________________/"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " L_ ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / _)! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / /__L "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____/ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\_(____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\__/ "))
return startExec(ctx, args)
}
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
| WriteLine(output.Linef("", output.StyleWarning, "ERROR: dev-private repository not found!"))
out.WriteLine(output.Linef("", output.StyleWarning, "It's expected to exist at: %s", devPrivatePath))
out.WriteLine(output.Line("", output.StyleWarning, "If you're not a Sourcegraph employee you probably want to run: sg start oss"))
out.WriteLine(output.Line("", output.StyleWarning, "If you're a Sourcegraph employee, see the documentation for how to clone it: https://docs.sourcegraph.com/dev/getting-started/quickstart_2_clone_repository"))
out.Write("")
overwritePath := filepath.Join(repoRoot, "sg.config.overwrite.yaml")
out.WriteLine(output.Linef("", output.StylePending, "If you know what you're doing and want disable the check, add the following to %s:", overwritePath))
out.Write("")
out.Write(fmt.Sprintf(` commandsets:
%s:
requiresDevPrivate: false
`, set.Name))
out.Write("")
os.Exit(1)
}
}
var | conditional_block |
sg_start.go | package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"github.com/cockroachdb/errors"
"github.com/peterbourgon/ff/v3/ffcli"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/run"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/stdout"
"github.com/sourcegraph/sourcegraph/dev/sg/root"
"github.com/sourcegraph/sourcegraph/lib/output"
)
var (
startFlagSet = flag.NewFlagSet("sg start", flag.ExitOnError)
debugStartServices = startFlagSet.String("debug", "", "Comma separated list of services to set at debug log level.")
infoStartServices = startFlagSet.String("info", "", "Comma separated list of services to set at info log level.")
warnStartServices = startFlagSet.String("warn", "", "Comma separated list of services to set at warn log level.")
errorStartServices = startFlagSet.String("error", "", "Comma separated list of services to set at error log level.")
critStartServices = startFlagSet.String("crit", "", "Comma separated list of services to set at crit log level.")
startCommand = &ffcli.Command{
Name: "start",
ShortUsage: "sg start [commandset]",
ShortHelp: "🌟Starts the given commandset. Without a commandset it starts the default Sourcegraph dev environment.",
LongHelp: constructStartCmdLongHelp(),
FlagSet: startFlagSet,
Exec: startExec,
}
// run-set is the deprecated older version of `start`
runSetFlagSet = flag.NewFlagSet("sg run-set", flag.ExitOnError)
runSetCommand = &ffcli.Command{
Name: "run-set",
ShortUsage: "sg run-set <commandset>",
ShortHelp: "DEPRECATED. Use 'sg start' instead. Run the given commandset.",
FlagSet: runSetFlagSet,
Exec: runSetExec,
}
)
func constructStartCmdLongHelp() string {
var out strings.Builder
fmt.Fprintf(&out, `Runs the given commandset.
If no commandset is specified, it starts the commandset with the name 'default'.
Use this to start your Sourcegraph environment!
`)
// Attempt to parse config to list available commands, but don't fail on
// error, because we should never error when the user wants --help output.
_, _ = parseConf(*configFlag, *overwriteConfigFlag)
if globalConf != nil {
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "AVAILABLE COMMANDSETS IN %s%s%s\n", output.StyleBold, *configFlag, output.StyleReset)
var names []string
for name := range globalConf.Commandsets {
switch name {
case "enterprise-codeintel":
names = append(names, fmt.Sprintf(" %s 🧠", name))
case "batches":
names = append(names, fmt.Sprintf(" %s 🦡", name))
default:
names = append(names, fmt.Sprintf(" %s", name))
}
}
sort.Strings(names) | }
return out.String()
}
func startExec(ctx context.Context, args []string) error {
ok, errLine := parseConf(*configFlag, *overwriteConfigFlag)
if !ok {
out.WriteLine(errLine)
os.Exit(1)
}
if len(args) > 2 {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: too many arguments"))
return flag.ErrHelp
}
if len(args) != 1 {
if globalConf.DefaultCommandset != "" {
args = append(args, globalConf.DefaultCommandset)
} else {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: No commandset specified and no 'defaultCommandset' specified in sg.config.yaml\n"))
return flag.ErrHelp
}
}
set, ok := globalConf.Commandsets[args[0]]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: commandset %q not found :(", args[0]))
return flag.ErrHelp
}
// If the commandset requires the dev-private repository to be cloned, we
// check that it's at the right location here.
if set.RequiresDevPrivate {
repoRoot, err := root.RepositoryRoot()
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to determine repository root location: %s", err))
os.Exit(1)
}
devPrivatePath := filepath.Join(repoRoot, "..", "dev-private")
exists, err := pathExists(devPrivatePath)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to check whether dev-private repository exists: %s", err))
os.Exit(1)
}
if !exists {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: dev-private repository not found!"))
out.WriteLine(output.Linef("", output.StyleWarning, "It's expected to exist at: %s", devPrivatePath))
out.WriteLine(output.Line("", output.StyleWarning, "If you're not a Sourcegraph employee you probably want to run: sg start oss"))
out.WriteLine(output.Line("", output.StyleWarning, "If you're a Sourcegraph employee, see the documentation for how to clone it: https://docs.sourcegraph.com/dev/getting-started/quickstart_2_clone_repository"))
out.Write("")
overwritePath := filepath.Join(repoRoot, "sg.config.overwrite.yaml")
out.WriteLine(output.Linef("", output.StylePending, "If you know what you're doing and want disable the check, add the following to %s:", overwritePath))
out.Write("")
out.Write(fmt.Sprintf(` commandsets:
%s:
requiresDevPrivate: false
`, set.Name))
out.Write("")
os.Exit(1)
}
}
var checks []run.Check
for _, name := range set.Checks {
check, ok := globalConf.Checks[name]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: check %s not found in config", name))
continue
}
checks = append(checks, check)
}
ok, err := run.Checks(ctx, globalConf.Env, checks...)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks could not be run: %s", err))
}
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks did not pass, aborting start of commandset %s", set.Name))
return nil
}
cmds := make([]run.Command, 0, len(set.Commands))
for _, name := range set.Commands {
cmd, ok := globalConf.Commands[name]
if !ok {
return errors.Errorf("command %q not found in commandset %q", name, args[0])
}
cmds = append(cmds, cmd)
}
if len(cmds) == 0 {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: no commands to run"))
}
levelOverrides := logLevelOverrides()
for _, cmd := range cmds {
enrichWithLogLevels(&cmd, levelOverrides)
}
env := globalConf.Env
for k, v := range set.Env {
env[k] = v
}
return run.Commands(ctx, env, *verboseFlag, cmds...)
}
// logLevelOverrides builds a map of commands -> log level that should be overridden in the environment.
func logLevelOverrides() map[string]string {
levelServices := make(map[string][]string)
levelServices["debug"] = parseCsv(*debugStartServices)
levelServices["info"] = parseCsv(*infoStartServices)
levelServices["warn"] = parseCsv(*warnStartServices)
levelServices["error"] = parseCsv(*errorStartServices)
levelServices["crit"] = parseCsv(*critStartServices)
overrides := make(map[string]string)
for level, services := range levelServices {
for _, service := range services {
overrides[service] = level
}
}
return overrides
}
// enrichWithLogLevels will add any logger level overrides to a given command if they have been specified.
func enrichWithLogLevels(cmd *run.Command, overrides map[string]string) {
logLevelVariable := "SRC_LOG_LEVEL"
if level, ok := overrides[cmd.Name]; ok {
out.WriteLine(output.Linef("", output.StylePending, "Setting log level: %s for command %s.", level, cmd.Name))
if cmd.Env == nil {
cmd.Env = make(map[string]string, 1)
cmd.Env[logLevelVariable] = level
}
cmd.Env[logLevelVariable] = level
}
}
// parseCsv takes an input comma seperated string and returns a list of tokens each trimmed for whitespace
func parseCsv(input string) []string {
tokens := strings.Split(input, ",")
results := make([]string, 0, len(tokens))
for _, token := range tokens {
results = append(results, strings.TrimSpace(token))
}
return results
}
var deprecationStyle = output.CombineStyles(output.Fg256Color(255), output.Bg256Color(124))
func runSetExec(ctx context.Context, args []string) error {
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _______________________________________________________________________ "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "/ `sg run-set` is deprecated - use `sg start` instead! \\"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! Run `sg start -help` for usage information. !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "\\_______________________________________________________________________/"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " L_ ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / _)! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / /__L "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____/ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\_(____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\__/ "))
return startExec(ctx, args)
}
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
} | fmt.Fprint(&out, strings.Join(names, "\n")) | random_line_split |
hunk.rs | use std::cmp::min;
use lazy_static::lazy_static;
use crate::cli;
use crate::config::{delta_unreachable, Config};
use crate::delta::{DiffType, InMergeConflict, MergeParents, State, StateMachine};
use crate::paint::{prepare, prepare_raw_line};
use crate::style;
use crate::utils::process::{self, CallingProcess};
use crate::utils::tabs;
// HACK: WordDiff should probably be a distinct top-level line state
pub fn is_word_diff() -> bool {
#[cfg(not(test))]
{
*CACHED_IS_WORD_DIFF
}
#[cfg(test)]
{
compute_is_word_diff()
}
}
lazy_static! {
static ref CACHED_IS_WORD_DIFF: bool = compute_is_word_diff();
}
fn compute_is_word_diff() -> bool {
match &*process::calling_process() {
CallingProcess::GitDiff(cmd_line)
| CallingProcess::GitShow(cmd_line, _)
| CallingProcess::GitLog(cmd_line)
| CallingProcess::GitReflog(cmd_line) => {
cmd_line.long_options.contains("--word-diff")
|| cmd_line.long_options.contains("--word-diff-regex")
|| cmd_line.long_options.contains("--color-words")
}
_ => false,
}
}
impl<'a> StateMachine<'a> {
#[inline]
fn test_hunk_line(&self) -> bool {
matches!(
self.state,
State::HunkHeader(_, _, _, _)
| State::HunkZero(_, _)
| State::HunkMinus(_, _)
| State::HunkPlus(_, _)
)
}
/// Handle a hunk line, i.e. a minus line, a plus line, or an unchanged line.
// In the case of a minus or plus line, we store the line in a
// buffer. When we exit the changed region we process the collected
// minus and plus lines jointly, in order to paint detailed
// highlighting according to inferred edit operations. In the case of
// an unchanged line, we paint it immediately.
pub fn handle_hunk_line(&mut self) -> std::io::Result<bool> |
}
// Return Some(prepared_raw_line) if delta should emit this line raw.
fn maybe_raw_line(
raw_line: &str,
state_style_is_raw: bool,
n_parents: usize,
non_raw_styles: &[style::Style],
config: &Config,
) -> Option<String> {
let emit_raw_line = is_word_diff()
|| config.inspect_raw_lines == cli::InspectRawLines::True
&& style::line_has_style_other_than(raw_line, non_raw_styles)
|| state_style_is_raw;
if emit_raw_line {
Some(prepare_raw_line(raw_line, n_parents, config))
} else {
None
}
}
// Return the new state corresponding to `new_line`, given the previous state. A return value of
// None means that `new_line` is not recognized as a hunk line.
fn new_line_state(
new_line: &str,
new_raw_line: &str,
prev_state: &State,
config: &Config,
) -> Option<State> {
use DiffType::*;
use MergeParents::*;
use State::*;
if is_word_diff() {
return Some(HunkZero(
Unified,
maybe_raw_line(new_raw_line, config.zero_style.is_raw, 0, &[], config),
));
}
// 1. Given the previous line state, compute the new line diff type. These are basically the
// same, except that a string prefix is converted into an integer number of parents (prefix
// length).
let diff_type = match prev_state {
HunkMinus(Unified, _)
| HunkZero(Unified, _)
| HunkPlus(Unified, _)
| HunkHeader(Unified, _, _, _) => Unified,
HunkHeader(Combined(Number(n), InMergeConflict::No), _, _, _) => {
Combined(Number(*n), InMergeConflict::No)
}
// The prefixes are specific to the previous line, but the number of merge parents remains
// equal to the prefix length.
HunkHeader(Combined(Prefix(prefix), InMergeConflict::No), _, _, _) => {
Combined(Number(prefix.len()), InMergeConflict::No)
}
HunkMinus(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkZero(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkPlus(Combined(Prefix(prefix), in_merge_conflict), _) => {
Combined(Number(prefix.len()), in_merge_conflict.clone())
}
HunkMinus(Combined(Number(n), in_merge_conflict), _)
| HunkZero(Combined(Number(n), in_merge_conflict), _)
| HunkPlus(Combined(Number(n), in_merge_conflict), _) => {
Combined(Number(*n), in_merge_conflict.clone())
}
_ => delta_unreachable(&format!(
"Unexpected state in new_line_state: {prev_state:?}",
)),
};
// 2. Given the new diff state, and the new line, compute the new prefix.
let (prefix_char, prefix, in_merge_conflict) = match diff_type.clone() {
Unified => (new_line.chars().next(), None, None),
Combined(Number(n_parents), in_merge_conflict) => {
let prefix = &new_line[..min(n_parents, new_line.len())];
let prefix_char = match prefix.chars().find(|c| c == &'-' || c == &'+') {
Some(c) => Some(c),
None => match prefix.chars().find(|c| c != &' ') {
None => Some(' '),
Some(_) => None,
},
};
(
prefix_char,
Some(prefix.to_string()),
Some(in_merge_conflict),
)
}
_ => delta_unreachable(""),
};
let maybe_minus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.minus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_MINUS_STYLE, config.git_minus_style],
config,
)
};
let maybe_zero_raw_line = || {
maybe_raw_line(
new_raw_line,
config.zero_style.is_raw,
diff_type.n_parents(),
&[],
config,
)
};
let maybe_plus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.plus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_PLUS_STYLE, config.git_plus_style],
config,
)
};
// 3. Given the new prefix, compute the full new line state...except without its raw_line, which
// is added later. TODO: that is not a sensible design.
match (prefix_char, prefix, in_merge_conflict) {
(Some('-'), None, None) => Some(HunkMinus(Unified, maybe_minus_raw_line())),
(Some(' '), None, None) => Some(HunkZero(Unified, maybe_zero_raw_line())),
(Some('+'), None, None) => Some(HunkPlus(Unified, maybe_plus_raw_line())),
(Some('-'), Some(prefix), Some(in_merge_conflict)) => Some(HunkMinus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_minus_raw_line(),
)),
(Some(' '), Some(prefix), Some(in_merge_conflict)) => Some(HunkZero(
Combined(Prefix(prefix), in_merge_conflict),
maybe_zero_raw_line(),
)),
(Some('+'), Some(prefix), Some(in_merge_conflict)) => Some(HunkPlus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_plus_raw_line(),
)),
_ => None,
}
}
#[cfg(test)]
mod tests {
use crate::tests::integration_test_utils::DeltaTest;
mod word_diff {
use super::*;
#[test]
fn test_word_diff() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --word-diff")
.explain_ansi()
.with_input(GIT_DIFF_WORD_DIFF)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(red)[-aaa-](green){+bbb+}(normal)
",
);
}
#[test]
fn test_color_words() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --color-words")
.explain_ansi()
.with_input(GIT_DIFF_COLOR_WORDS)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(red)aaa(green)bbb(normal)
",
);
}
#[test]
#[ignore] // FIXME
fn test_color_words_map_styles() {
DeltaTest::with_args(&[
"--map-styles",
"red => bold yellow #dddddd, green => bold blue #dddddd",
])
.with_calling_process("git diff --color-words")
.explain_ansi()
.with_input(GIT_DIFF_COLOR_WORDS)
.inspect()
.expect_after_skip(
11,
r##"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(bold yellow "#dddddd")aaa(bold blue "#dddddd")bbb(normal)
"##,
);
}
#[test]
fn test_hunk_line_style_raw() {
DeltaTest::with_args(&["--minus-style", "raw", "--plus-style", "raw"])
.explain_ansi()
.with_input(GIT_DIFF_WITH_COLOR)
.expect_after_skip(
14,
"
(red)aaa(normal)
(green)bbb(normal)
",
);
}
#[test]
fn test_hunk_line_style_raw_map_styles() {
DeltaTest::with_args(&[
"--minus-style",
"raw",
"--plus-style",
"raw",
"--map-styles",
"red => bold blue, green => dim yellow",
])
.explain_ansi()
.with_input(GIT_DIFF_WITH_COLOR)
.expect_after_skip(
14,
"
(bold blue)aaa(normal)
(dim yellow)bbb(normal)
",
);
}
const GIT_DIFF_WITH_COLOR: &str = r#"\
[33mcommit 3ef7fba7258fe473f1d8befff367bb793c786107[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Mon Dec 13 22:54:43 2021 -0500
753 Test file
[1mdiff --git a/file b/file[m
[1mindex 72943a1..f761ec1 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31m-aaa[m
[32m+[m[32mbbb[m
"#;
const GIT_DIFF_COLOR_WORDS: &str = r#"\
[33mcommit 6feea4949c20583aaf16eee84f38d34d6a7f1741[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Sat Dec 11 17:08:56 2021 -0500
file v2
[1mdiff --git a/file b/file[m
[1mindex c005da6..962086f 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31maaa[m[32mbbb[m
"#;
const GIT_DIFF_WORD_DIFF: &str = r#"\
[33mcommit 6feea4949c20583aaf16eee84f38d34d6a7f1741[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Sat Dec 11 17:08:56 2021 -0500
file v2
[1mdiff --git a/file b/file[m
[1mindex c005da6..962086f 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31m[-aaa-][m[32m{+bbb+}[m
"#;
}
}
| {
use DiffType::*;
use State::*;
// A true hunk line should start with one of: '+', '-', ' '. However, handle_hunk_line
// handles all lines until the state transitions away from the hunk states.
if !self.test_hunk_line() {
return Ok(false);
}
// Don't let the line buffers become arbitrarily large -- if we
// were to allow that, then for a large deleted/added file we
// would process the entire file before painting anything.
if self.painter.minus_lines.len() > self.config.line_buffer_size
|| self.painter.plus_lines.len() > self.config.line_buffer_size
{
self.painter.paint_buffered_minus_and_plus_lines();
}
if let State::HunkHeader(_, parsed_hunk_header, line, raw_line) = &self.state.clone() {
self.emit_hunk_header_line(parsed_hunk_header, line, raw_line)?;
}
self.state = match new_line_state(&self.line, &self.raw_line, &self.state, self.config) {
Some(HunkMinus(diff_type, raw_line)) => {
if let HunkPlus(_, _) = self.state {
// We have just entered a new subhunk; process the previous one
// and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
}
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkMinus(diff_type, raw_line);
self.painter.minus_lines.push((line, state.clone()));
state
}
Some(HunkPlus(diff_type, raw_line)) => {
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkPlus(diff_type, raw_line);
self.painter.plus_lines.push((line, state.clone()));
state
}
Some(HunkZero(diff_type, raw_line)) => {
// We are in a zero (unchanged) line, therefore we have just exited a subhunk (a
// sequence of consecutive minus (removed) and/or plus (added) lines). Process that
// subhunk and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
let n_parents = if is_word_diff() {
0
} else {
diff_type.n_parents()
};
let line = prepare(&self.line, n_parents, self.config);
let state = State::HunkZero(diff_type, raw_line);
self.painter.paint_zero_line(&line, state.clone());
state
}
_ => {
// The first character here could be e.g. '\' from '\ No newline at end of file'. This
// is not a hunk line, but the parser does not have a more accurate state corresponding
// to this.
self.painter.paint_buffered_minus_and_plus_lines();
self.painter
.output_buffer
.push_str(&tabs::expand(&self.raw_line, &self.config.tab_cfg));
self.painter.output_buffer.push('\n');
State::HunkZero(Unified, None)
}
};
self.painter.emit()?;
Ok(true)
} | identifier_body |
hunk.rs | use std::cmp::min;
use lazy_static::lazy_static;
use crate::cli;
use crate::config::{delta_unreachable, Config};
use crate::delta::{DiffType, InMergeConflict, MergeParents, State, StateMachine};
use crate::paint::{prepare, prepare_raw_line};
use crate::style;
use crate::utils::process::{self, CallingProcess};
use crate::utils::tabs;
// HACK: WordDiff should probably be a distinct top-level line state
pub fn is_word_diff() -> bool {
#[cfg(not(test))]
{
*CACHED_IS_WORD_DIFF
}
#[cfg(test)]
{
compute_is_word_diff()
}
}
lazy_static! {
static ref CACHED_IS_WORD_DIFF: bool = compute_is_word_diff();
}
fn compute_is_word_diff() -> bool {
match &*process::calling_process() {
CallingProcess::GitDiff(cmd_line)
| CallingProcess::GitShow(cmd_line, _)
| CallingProcess::GitLog(cmd_line)
| CallingProcess::GitReflog(cmd_line) => {
cmd_line.long_options.contains("--word-diff")
|| cmd_line.long_options.contains("--word-diff-regex")
|| cmd_line.long_options.contains("--color-words")
}
_ => false,
}
}
impl<'a> StateMachine<'a> {
#[inline]
fn | (&self) -> bool {
matches!(
self.state,
State::HunkHeader(_, _, _, _)
| State::HunkZero(_, _)
| State::HunkMinus(_, _)
| State::HunkPlus(_, _)
)
}
/// Handle a hunk line, i.e. a minus line, a plus line, or an unchanged line.
// In the case of a minus or plus line, we store the line in a
// buffer. When we exit the changed region we process the collected
// minus and plus lines jointly, in order to paint detailed
// highlighting according to inferred edit operations. In the case of
// an unchanged line, we paint it immediately.
pub fn handle_hunk_line(&mut self) -> std::io::Result<bool> {
use DiffType::*;
use State::*;
// A true hunk line should start with one of: '+', '-', ' '. However, handle_hunk_line
// handles all lines until the state transitions away from the hunk states.
if !self.test_hunk_line() {
return Ok(false);
}
// Don't let the line buffers become arbitrarily large -- if we
// were to allow that, then for a large deleted/added file we
// would process the entire file before painting anything.
if self.painter.minus_lines.len() > self.config.line_buffer_size
|| self.painter.plus_lines.len() > self.config.line_buffer_size
{
self.painter.paint_buffered_minus_and_plus_lines();
}
if let State::HunkHeader(_, parsed_hunk_header, line, raw_line) = &self.state.clone() {
self.emit_hunk_header_line(parsed_hunk_header, line, raw_line)?;
}
self.state = match new_line_state(&self.line, &self.raw_line, &self.state, self.config) {
Some(HunkMinus(diff_type, raw_line)) => {
if let HunkPlus(_, _) = self.state {
// We have just entered a new subhunk; process the previous one
// and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
}
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkMinus(diff_type, raw_line);
self.painter.minus_lines.push((line, state.clone()));
state
}
Some(HunkPlus(diff_type, raw_line)) => {
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkPlus(diff_type, raw_line);
self.painter.plus_lines.push((line, state.clone()));
state
}
Some(HunkZero(diff_type, raw_line)) => {
// We are in a zero (unchanged) line, therefore we have just exited a subhunk (a
// sequence of consecutive minus (removed) and/or plus (added) lines). Process that
// subhunk and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
let n_parents = if is_word_diff() {
0
} else {
diff_type.n_parents()
};
let line = prepare(&self.line, n_parents, self.config);
let state = State::HunkZero(diff_type, raw_line);
self.painter.paint_zero_line(&line, state.clone());
state
}
_ => {
// The first character here could be e.g. '\' from '\ No newline at end of file'. This
// is not a hunk line, but the parser does not have a more accurate state corresponding
// to this.
self.painter.paint_buffered_minus_and_plus_lines();
self.painter
.output_buffer
.push_str(&tabs::expand(&self.raw_line, &self.config.tab_cfg));
self.painter.output_buffer.push('\n');
State::HunkZero(Unified, None)
}
};
self.painter.emit()?;
Ok(true)
}
}
// Return Some(prepared_raw_line) if delta should emit this line raw.
fn maybe_raw_line(
raw_line: &str,
state_style_is_raw: bool,
n_parents: usize,
non_raw_styles: &[style::Style],
config: &Config,
) -> Option<String> {
let emit_raw_line = is_word_diff()
|| config.inspect_raw_lines == cli::InspectRawLines::True
&& style::line_has_style_other_than(raw_line, non_raw_styles)
|| state_style_is_raw;
if emit_raw_line {
Some(prepare_raw_line(raw_line, n_parents, config))
} else {
None
}
}
// Return the new state corresponding to `new_line`, given the previous state. A return value of
// None means that `new_line` is not recognized as a hunk line.
fn new_line_state(
new_line: &str,
new_raw_line: &str,
prev_state: &State,
config: &Config,
) -> Option<State> {
use DiffType::*;
use MergeParents::*;
use State::*;
if is_word_diff() {
return Some(HunkZero(
Unified,
maybe_raw_line(new_raw_line, config.zero_style.is_raw, 0, &[], config),
));
}
// 1. Given the previous line state, compute the new line diff type. These are basically the
// same, except that a string prefix is converted into an integer number of parents (prefix
// length).
let diff_type = match prev_state {
HunkMinus(Unified, _)
| HunkZero(Unified, _)
| HunkPlus(Unified, _)
| HunkHeader(Unified, _, _, _) => Unified,
HunkHeader(Combined(Number(n), InMergeConflict::No), _, _, _) => {
Combined(Number(*n), InMergeConflict::No)
}
// The prefixes are specific to the previous line, but the number of merge parents remains
// equal to the prefix length.
HunkHeader(Combined(Prefix(prefix), InMergeConflict::No), _, _, _) => {
Combined(Number(prefix.len()), InMergeConflict::No)
}
HunkMinus(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkZero(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkPlus(Combined(Prefix(prefix), in_merge_conflict), _) => {
Combined(Number(prefix.len()), in_merge_conflict.clone())
}
HunkMinus(Combined(Number(n), in_merge_conflict), _)
| HunkZero(Combined(Number(n), in_merge_conflict), _)
| HunkPlus(Combined(Number(n), in_merge_conflict), _) => {
Combined(Number(*n), in_merge_conflict.clone())
}
_ => delta_unreachable(&format!(
"Unexpected state in new_line_state: {prev_state:?}",
)),
};
// 2. Given the new diff state, and the new line, compute the new prefix.
let (prefix_char, prefix, in_merge_conflict) = match diff_type.clone() {
Unified => (new_line.chars().next(), None, None),
Combined(Number(n_parents), in_merge_conflict) => {
let prefix = &new_line[..min(n_parents, new_line.len())];
let prefix_char = match prefix.chars().find(|c| c == &'-' || c == &'+') {
Some(c) => Some(c),
None => match prefix.chars().find(|c| c != &' ') {
None => Some(' '),
Some(_) => None,
},
};
(
prefix_char,
Some(prefix.to_string()),
Some(in_merge_conflict),
)
}
_ => delta_unreachable(""),
};
let maybe_minus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.minus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_MINUS_STYLE, config.git_minus_style],
config,
)
};
let maybe_zero_raw_line = || {
maybe_raw_line(
new_raw_line,
config.zero_style.is_raw,
diff_type.n_parents(),
&[],
config,
)
};
let maybe_plus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.plus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_PLUS_STYLE, config.git_plus_style],
config,
)
};
// 3. Given the new prefix, compute the full new line state...except without its raw_line, which
// is added later. TODO: that is not a sensible design.
match (prefix_char, prefix, in_merge_conflict) {
(Some('-'), None, None) => Some(HunkMinus(Unified, maybe_minus_raw_line())),
(Some(' '), None, None) => Some(HunkZero(Unified, maybe_zero_raw_line())),
(Some('+'), None, None) => Some(HunkPlus(Unified, maybe_plus_raw_line())),
(Some('-'), Some(prefix), Some(in_merge_conflict)) => Some(HunkMinus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_minus_raw_line(),
)),
(Some(' '), Some(prefix), Some(in_merge_conflict)) => Some(HunkZero(
Combined(Prefix(prefix), in_merge_conflict),
maybe_zero_raw_line(),
)),
(Some('+'), Some(prefix), Some(in_merge_conflict)) => Some(HunkPlus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_plus_raw_line(),
)),
_ => None,
}
}
#[cfg(test)]
mod tests {
use crate::tests::integration_test_utils::DeltaTest;
mod word_diff {
use super::*;
#[test]
fn test_word_diff() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --word-diff")
.explain_ansi()
.with_input(GIT_DIFF_WORD_DIFF)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(red)[-aaa-](green){+bbb+}(normal)
",
);
}
#[test]
fn test_color_words() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --color-words")
.explain_ansi()
.with_input(GIT_DIFF_COLOR_WORDS)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(red)aaa(green)bbb(normal)
",
);
}
#[test]
#[ignore] // FIXME
fn test_color_words_map_styles() {
DeltaTest::with_args(&[
"--map-styles",
"red => bold yellow #dddddd, green => bold blue #dddddd",
])
.with_calling_process("git diff --color-words")
.explain_ansi()
.with_input(GIT_DIFF_COLOR_WORDS)
.inspect()
.expect_after_skip(
11,
r##"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(bold yellow "#dddddd")aaa(bold blue "#dddddd")bbb(normal)
"##,
);
}
#[test]
fn test_hunk_line_style_raw() {
DeltaTest::with_args(&["--minus-style", "raw", "--plus-style", "raw"])
.explain_ansi()
.with_input(GIT_DIFF_WITH_COLOR)
.expect_after_skip(
14,
"
(red)aaa(normal)
(green)bbb(normal)
",
);
}
#[test]
fn test_hunk_line_style_raw_map_styles() {
DeltaTest::with_args(&[
"--minus-style",
"raw",
"--plus-style",
"raw",
"--map-styles",
"red => bold blue, green => dim yellow",
])
.explain_ansi()
.with_input(GIT_DIFF_WITH_COLOR)
.expect_after_skip(
14,
"
(bold blue)aaa(normal)
(dim yellow)bbb(normal)
",
);
}
const GIT_DIFF_WITH_COLOR: &str = r#"\
[33mcommit 3ef7fba7258fe473f1d8befff367bb793c786107[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Mon Dec 13 22:54:43 2021 -0500
753 Test file
[1mdiff --git a/file b/file[m
[1mindex 72943a1..f761ec1 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31m-aaa[m
[32m+[m[32mbbb[m
"#;
const GIT_DIFF_COLOR_WORDS: &str = r#"\
[33mcommit 6feea4949c20583aaf16eee84f38d34d6a7f1741[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Sat Dec 11 17:08:56 2021 -0500
file v2
[1mdiff --git a/file b/file[m
[1mindex c005da6..962086f 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31maaa[m[32mbbb[m
"#;
const GIT_DIFF_WORD_DIFF: &str = r#"\
[33mcommit 6feea4949c20583aaf16eee84f38d34d6a7f1741[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Sat Dec 11 17:08:56 2021 -0500
file v2
[1mdiff --git a/file b/file[m
[1mindex c005da6..962086f 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31m[-aaa-][m[32m{+bbb+}[m
"#;
}
}
| test_hunk_line | identifier_name |
hunk.rs | use std::cmp::min;
use lazy_static::lazy_static;
use crate::cli;
use crate::config::{delta_unreachable, Config};
use crate::delta::{DiffType, InMergeConflict, MergeParents, State, StateMachine};
use crate::paint::{prepare, prepare_raw_line};
use crate::style;
use crate::utils::process::{self, CallingProcess};
use crate::utils::tabs;
// HACK: WordDiff should probably be a distinct top-level line state
pub fn is_word_diff() -> bool {
#[cfg(not(test))]
{
*CACHED_IS_WORD_DIFF
}
#[cfg(test)]
{
compute_is_word_diff()
}
}
lazy_static! {
static ref CACHED_IS_WORD_DIFF: bool = compute_is_word_diff();
}
fn compute_is_word_diff() -> bool {
match &*process::calling_process() {
CallingProcess::GitDiff(cmd_line)
| CallingProcess::GitShow(cmd_line, _)
| CallingProcess::GitLog(cmd_line)
| CallingProcess::GitReflog(cmd_line) => {
cmd_line.long_options.contains("--word-diff")
|| cmd_line.long_options.contains("--word-diff-regex")
|| cmd_line.long_options.contains("--color-words")
}
_ => false,
}
}
impl<'a> StateMachine<'a> {
#[inline]
fn test_hunk_line(&self) -> bool {
matches!(
self.state,
State::HunkHeader(_, _, _, _)
| State::HunkZero(_, _)
| State::HunkMinus(_, _)
| State::HunkPlus(_, _)
)
}
/// Handle a hunk line, i.e. a minus line, a plus line, or an unchanged line.
// In the case of a minus or plus line, we store the line in a
// buffer. When we exit the changed region we process the collected
// minus and plus lines jointly, in order to paint detailed
// highlighting according to inferred edit operations. In the case of
// an unchanged line, we paint it immediately.
pub fn handle_hunk_line(&mut self) -> std::io::Result<bool> {
use DiffType::*;
use State::*;
// A true hunk line should start with one of: '+', '-', ' '. However, handle_hunk_line
// handles all lines until the state transitions away from the hunk states.
if !self.test_hunk_line() {
return Ok(false);
}
// Don't let the line buffers become arbitrarily large -- if we
// were to allow that, then for a large deleted/added file we
// would process the entire file before painting anything.
if self.painter.minus_lines.len() > self.config.line_buffer_size | if let State::HunkHeader(_, parsed_hunk_header, line, raw_line) = &self.state.clone() {
self.emit_hunk_header_line(parsed_hunk_header, line, raw_line)?;
}
self.state = match new_line_state(&self.line, &self.raw_line, &self.state, self.config) {
Some(HunkMinus(diff_type, raw_line)) => {
if let HunkPlus(_, _) = self.state {
// We have just entered a new subhunk; process the previous one
// and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
}
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkMinus(diff_type, raw_line);
self.painter.minus_lines.push((line, state.clone()));
state
}
Some(HunkPlus(diff_type, raw_line)) => {
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkPlus(diff_type, raw_line);
self.painter.plus_lines.push((line, state.clone()));
state
}
Some(HunkZero(diff_type, raw_line)) => {
// We are in a zero (unchanged) line, therefore we have just exited a subhunk (a
// sequence of consecutive minus (removed) and/or plus (added) lines). Process that
// subhunk and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
let n_parents = if is_word_diff() {
0
} else {
diff_type.n_parents()
};
let line = prepare(&self.line, n_parents, self.config);
let state = State::HunkZero(diff_type, raw_line);
self.painter.paint_zero_line(&line, state.clone());
state
}
_ => {
// The first character here could be e.g. '\' from '\ No newline at end of file'. This
// is not a hunk line, but the parser does not have a more accurate state corresponding
// to this.
self.painter.paint_buffered_minus_and_plus_lines();
self.painter
.output_buffer
.push_str(&tabs::expand(&self.raw_line, &self.config.tab_cfg));
self.painter.output_buffer.push('\n');
State::HunkZero(Unified, None)
}
};
self.painter.emit()?;
Ok(true)
}
}
// Return Some(prepared_raw_line) if delta should emit this line raw.
fn maybe_raw_line(
raw_line: &str,
state_style_is_raw: bool,
n_parents: usize,
non_raw_styles: &[style::Style],
config: &Config,
) -> Option<String> {
let emit_raw_line = is_word_diff()
|| config.inspect_raw_lines == cli::InspectRawLines::True
&& style::line_has_style_other_than(raw_line, non_raw_styles)
|| state_style_is_raw;
if emit_raw_line {
Some(prepare_raw_line(raw_line, n_parents, config))
} else {
None
}
}
// Return the new state corresponding to `new_line`, given the previous state. A return value of
// None means that `new_line` is not recognized as a hunk line.
fn new_line_state(
new_line: &str,
new_raw_line: &str,
prev_state: &State,
config: &Config,
) -> Option<State> {
use DiffType::*;
use MergeParents::*;
use State::*;
if is_word_diff() {
return Some(HunkZero(
Unified,
maybe_raw_line(new_raw_line, config.zero_style.is_raw, 0, &[], config),
));
}
// 1. Given the previous line state, compute the new line diff type. These are basically the
// same, except that a string prefix is converted into an integer number of parents (prefix
// length).
let diff_type = match prev_state {
HunkMinus(Unified, _)
| HunkZero(Unified, _)
| HunkPlus(Unified, _)
| HunkHeader(Unified, _, _, _) => Unified,
HunkHeader(Combined(Number(n), InMergeConflict::No), _, _, _) => {
Combined(Number(*n), InMergeConflict::No)
}
// The prefixes are specific to the previous line, but the number of merge parents remains
// equal to the prefix length.
HunkHeader(Combined(Prefix(prefix), InMergeConflict::No), _, _, _) => {
Combined(Number(prefix.len()), InMergeConflict::No)
}
HunkMinus(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkZero(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkPlus(Combined(Prefix(prefix), in_merge_conflict), _) => {
Combined(Number(prefix.len()), in_merge_conflict.clone())
}
HunkMinus(Combined(Number(n), in_merge_conflict), _)
| HunkZero(Combined(Number(n), in_merge_conflict), _)
| HunkPlus(Combined(Number(n), in_merge_conflict), _) => {
Combined(Number(*n), in_merge_conflict.clone())
}
_ => delta_unreachable(&format!(
"Unexpected state in new_line_state: {prev_state:?}",
)),
};
// 2. Given the new diff state, and the new line, compute the new prefix.
let (prefix_char, prefix, in_merge_conflict) = match diff_type.clone() {
Unified => (new_line.chars().next(), None, None),
Combined(Number(n_parents), in_merge_conflict) => {
let prefix = &new_line[..min(n_parents, new_line.len())];
let prefix_char = match prefix.chars().find(|c| c == &'-' || c == &'+') {
Some(c) => Some(c),
None => match prefix.chars().find(|c| c != &' ') {
None => Some(' '),
Some(_) => None,
},
};
(
prefix_char,
Some(prefix.to_string()),
Some(in_merge_conflict),
)
}
_ => delta_unreachable(""),
};
let maybe_minus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.minus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_MINUS_STYLE, config.git_minus_style],
config,
)
};
let maybe_zero_raw_line = || {
maybe_raw_line(
new_raw_line,
config.zero_style.is_raw,
diff_type.n_parents(),
&[],
config,
)
};
let maybe_plus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.plus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_PLUS_STYLE, config.git_plus_style],
config,
)
};
// 3. Given the new prefix, compute the full new line state...except without its raw_line, which
// is added later. TODO: that is not a sensible design.
match (prefix_char, prefix, in_merge_conflict) {
(Some('-'), None, None) => Some(HunkMinus(Unified, maybe_minus_raw_line())),
(Some(' '), None, None) => Some(HunkZero(Unified, maybe_zero_raw_line())),
(Some('+'), None, None) => Some(HunkPlus(Unified, maybe_plus_raw_line())),
(Some('-'), Some(prefix), Some(in_merge_conflict)) => Some(HunkMinus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_minus_raw_line(),
)),
(Some(' '), Some(prefix), Some(in_merge_conflict)) => Some(HunkZero(
Combined(Prefix(prefix), in_merge_conflict),
maybe_zero_raw_line(),
)),
(Some('+'), Some(prefix), Some(in_merge_conflict)) => Some(HunkPlus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_plus_raw_line(),
)),
_ => None,
}
}
#[cfg(test)]
mod tests {
use crate::tests::integration_test_utils::DeltaTest;
mod word_diff {
use super::*;
#[test]
fn test_word_diff() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --word-diff")
.explain_ansi()
.with_input(GIT_DIFF_WORD_DIFF)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(red)[-aaa-](green){+bbb+}(normal)
",
);
}
#[test]
fn test_color_words() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --color-words")
.explain_ansi()
.with_input(GIT_DIFF_COLOR_WORDS)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(red)aaa(green)bbb(normal)
",
);
}
#[test]
#[ignore] // FIXME
fn test_color_words_map_styles() {
DeltaTest::with_args(&[
"--map-styles",
"red => bold yellow #dddddd, green => bold blue #dddddd",
])
.with_calling_process("git diff --color-words")
.explain_ansi()
.with_input(GIT_DIFF_COLOR_WORDS)
.inspect()
.expect_after_skip(
11,
r##"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(bold yellow "#dddddd")aaa(bold blue "#dddddd")bbb(normal)
"##,
);
}
#[test]
fn test_hunk_line_style_raw() {
DeltaTest::with_args(&["--minus-style", "raw", "--plus-style", "raw"])
.explain_ansi()
.with_input(GIT_DIFF_WITH_COLOR)
.expect_after_skip(
14,
"
(red)aaa(normal)
(green)bbb(normal)
",
);
}
#[test]
fn test_hunk_line_style_raw_map_styles() {
DeltaTest::with_args(&[
"--minus-style",
"raw",
"--plus-style",
"raw",
"--map-styles",
"red => bold blue, green => dim yellow",
])
.explain_ansi()
.with_input(GIT_DIFF_WITH_COLOR)
.expect_after_skip(
14,
"
(bold blue)aaa(normal)
(dim yellow)bbb(normal)
",
);
}
const GIT_DIFF_WITH_COLOR: &str = r#"\
[33mcommit 3ef7fba7258fe473f1d8befff367bb793c786107[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Mon Dec 13 22:54:43 2021 -0500
753 Test file
[1mdiff --git a/file b/file[m
[1mindex 72943a1..f761ec1 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31m-aaa[m
[32m+[m[32mbbb[m
"#;
const GIT_DIFF_COLOR_WORDS: &str = r#"\
[33mcommit 6feea4949c20583aaf16eee84f38d34d6a7f1741[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Sat Dec 11 17:08:56 2021 -0500
file v2
[1mdiff --git a/file b/file[m
[1mindex c005da6..962086f 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31maaa[m[32mbbb[m
"#;
const GIT_DIFF_WORD_DIFF: &str = r#"\
[33mcommit 6feea4949c20583aaf16eee84f38d34d6a7f1741[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Sat Dec 11 17:08:56 2021 -0500
file v2
[1mdiff --git a/file b/file[m
[1mindex c005da6..962086f 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31m[-aaa-][m[32m{+bbb+}[m
"#;
}
} | || self.painter.plus_lines.len() > self.config.line_buffer_size
{
self.painter.paint_buffered_minus_and_plus_lines();
} | random_line_split |
hunk.rs | use std::cmp::min;
use lazy_static::lazy_static;
use crate::cli;
use crate::config::{delta_unreachable, Config};
use crate::delta::{DiffType, InMergeConflict, MergeParents, State, StateMachine};
use crate::paint::{prepare, prepare_raw_line};
use crate::style;
use crate::utils::process::{self, CallingProcess};
use crate::utils::tabs;
// HACK: WordDiff should probably be a distinct top-level line state
pub fn is_word_diff() -> bool {
#[cfg(not(test))]
{
*CACHED_IS_WORD_DIFF
}
#[cfg(test)]
{
compute_is_word_diff()
}
}
lazy_static! {
static ref CACHED_IS_WORD_DIFF: bool = compute_is_word_diff();
}
fn compute_is_word_diff() -> bool {
match &*process::calling_process() {
CallingProcess::GitDiff(cmd_line)
| CallingProcess::GitShow(cmd_line, _)
| CallingProcess::GitLog(cmd_line)
| CallingProcess::GitReflog(cmd_line) => {
cmd_line.long_options.contains("--word-diff")
|| cmd_line.long_options.contains("--word-diff-regex")
|| cmd_line.long_options.contains("--color-words")
}
_ => false,
}
}
impl<'a> StateMachine<'a> {
#[inline]
fn test_hunk_line(&self) -> bool {
matches!(
self.state,
State::HunkHeader(_, _, _, _)
| State::HunkZero(_, _)
| State::HunkMinus(_, _)
| State::HunkPlus(_, _)
)
}
/// Handle a hunk line, i.e. a minus line, a plus line, or an unchanged line.
// In the case of a minus or plus line, we store the line in a
// buffer. When we exit the changed region we process the collected
// minus and plus lines jointly, in order to paint detailed
// highlighting according to inferred edit operations. In the case of
// an unchanged line, we paint it immediately.
pub fn handle_hunk_line(&mut self) -> std::io::Result<bool> {
use DiffType::*;
use State::*;
// A true hunk line should start with one of: '+', '-', ' '. However, handle_hunk_line
// handles all lines until the state transitions away from the hunk states.
if !self.test_hunk_line() {
return Ok(false);
}
// Don't let the line buffers become arbitrarily large -- if we
// were to allow that, then for a large deleted/added file we
// would process the entire file before painting anything.
if self.painter.minus_lines.len() > self.config.line_buffer_size
|| self.painter.plus_lines.len() > self.config.line_buffer_size
{
self.painter.paint_buffered_minus_and_plus_lines();
}
if let State::HunkHeader(_, parsed_hunk_header, line, raw_line) = &self.state.clone() {
self.emit_hunk_header_line(parsed_hunk_header, line, raw_line)?;
}
self.state = match new_line_state(&self.line, &self.raw_line, &self.state, self.config) {
Some(HunkMinus(diff_type, raw_line)) => {
if let HunkPlus(_, _) = self.state {
// We have just entered a new subhunk; process the previous one
// and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
}
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkMinus(diff_type, raw_line);
self.painter.minus_lines.push((line, state.clone()));
state
}
Some(HunkPlus(diff_type, raw_line)) => {
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkPlus(diff_type, raw_line);
self.painter.plus_lines.push((line, state.clone()));
state
}
Some(HunkZero(diff_type, raw_line)) => {
// We are in a zero (unchanged) line, therefore we have just exited a subhunk (a
// sequence of consecutive minus (removed) and/or plus (added) lines). Process that
// subhunk and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
let n_parents = if is_word_diff() {
0
} else {
diff_type.n_parents()
};
let line = prepare(&self.line, n_parents, self.config);
let state = State::HunkZero(diff_type, raw_line);
self.painter.paint_zero_line(&line, state.clone());
state
}
_ => {
// The first character here could be e.g. '\' from '\ No newline at end of file'. This
// is not a hunk line, but the parser does not have a more accurate state corresponding
// to this.
self.painter.paint_buffered_minus_and_plus_lines();
self.painter
.output_buffer
.push_str(&tabs::expand(&self.raw_line, &self.config.tab_cfg));
self.painter.output_buffer.push('\n');
State::HunkZero(Unified, None)
}
};
self.painter.emit()?;
Ok(true)
}
}
// Return Some(prepared_raw_line) if delta should emit this line raw.
fn maybe_raw_line(
raw_line: &str,
state_style_is_raw: bool,
n_parents: usize,
non_raw_styles: &[style::Style],
config: &Config,
) -> Option<String> {
let emit_raw_line = is_word_diff()
|| config.inspect_raw_lines == cli::InspectRawLines::True
&& style::line_has_style_other_than(raw_line, non_raw_styles)
|| state_style_is_raw;
if emit_raw_line {
Some(prepare_raw_line(raw_line, n_parents, config))
} else {
None
}
}
// Return the new state corresponding to `new_line`, given the previous state. A return value of
// None means that `new_line` is not recognized as a hunk line.
fn new_line_state(
new_line: &str,
new_raw_line: &str,
prev_state: &State,
config: &Config,
) -> Option<State> {
use DiffType::*;
use MergeParents::*;
use State::*;
if is_word_diff() |
// 1. Given the previous line state, compute the new line diff type. These are basically the
// same, except that a string prefix is converted into an integer number of parents (prefix
// length).
let diff_type = match prev_state {
HunkMinus(Unified, _)
| HunkZero(Unified, _)
| HunkPlus(Unified, _)
| HunkHeader(Unified, _, _, _) => Unified,
HunkHeader(Combined(Number(n), InMergeConflict::No), _, _, _) => {
Combined(Number(*n), InMergeConflict::No)
}
// The prefixes are specific to the previous line, but the number of merge parents remains
// equal to the prefix length.
HunkHeader(Combined(Prefix(prefix), InMergeConflict::No), _, _, _) => {
Combined(Number(prefix.len()), InMergeConflict::No)
}
HunkMinus(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkZero(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkPlus(Combined(Prefix(prefix), in_merge_conflict), _) => {
Combined(Number(prefix.len()), in_merge_conflict.clone())
}
HunkMinus(Combined(Number(n), in_merge_conflict), _)
| HunkZero(Combined(Number(n), in_merge_conflict), _)
| HunkPlus(Combined(Number(n), in_merge_conflict), _) => {
Combined(Number(*n), in_merge_conflict.clone())
}
_ => delta_unreachable(&format!(
"Unexpected state in new_line_state: {prev_state:?}",
)),
};
// 2. Given the new diff state, and the new line, compute the new prefix.
let (prefix_char, prefix, in_merge_conflict) = match diff_type.clone() {
Unified => (new_line.chars().next(), None, None),
Combined(Number(n_parents), in_merge_conflict) => {
let prefix = &new_line[..min(n_parents, new_line.len())];
let prefix_char = match prefix.chars().find(|c| c == &'-' || c == &'+') {
Some(c) => Some(c),
None => match prefix.chars().find(|c| c != &' ') {
None => Some(' '),
Some(_) => None,
},
};
(
prefix_char,
Some(prefix.to_string()),
Some(in_merge_conflict),
)
}
_ => delta_unreachable(""),
};
let maybe_minus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.minus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_MINUS_STYLE, config.git_minus_style],
config,
)
};
let maybe_zero_raw_line = || {
maybe_raw_line(
new_raw_line,
config.zero_style.is_raw,
diff_type.n_parents(),
&[],
config,
)
};
let maybe_plus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.plus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_PLUS_STYLE, config.git_plus_style],
config,
)
};
// 3. Given the new prefix, compute the full new line state...except without its raw_line, which
// is added later. TODO: that is not a sensible design.
match (prefix_char, prefix, in_merge_conflict) {
(Some('-'), None, None) => Some(HunkMinus(Unified, maybe_minus_raw_line())),
(Some(' '), None, None) => Some(HunkZero(Unified, maybe_zero_raw_line())),
(Some('+'), None, None) => Some(HunkPlus(Unified, maybe_plus_raw_line())),
(Some('-'), Some(prefix), Some(in_merge_conflict)) => Some(HunkMinus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_minus_raw_line(),
)),
(Some(' '), Some(prefix), Some(in_merge_conflict)) => Some(HunkZero(
Combined(Prefix(prefix), in_merge_conflict),
maybe_zero_raw_line(),
)),
(Some('+'), Some(prefix), Some(in_merge_conflict)) => Some(HunkPlus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_plus_raw_line(),
)),
_ => None,
}
}
#[cfg(test)]
mod tests {
use crate::tests::integration_test_utils::DeltaTest;
mod word_diff {
use super::*;
#[test]
fn test_word_diff() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --word-diff")
.explain_ansi()
.with_input(GIT_DIFF_WORD_DIFF)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(red)[-aaa-](green){+bbb+}(normal)
",
);
}
#[test]
fn test_color_words() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --color-words")
.explain_ansi()
.with_input(GIT_DIFF_COLOR_WORDS)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(red)aaa(green)bbb(normal)
",
);
}
#[test]
#[ignore] // FIXME
fn test_color_words_map_styles() {
DeltaTest::with_args(&[
"--map-styles",
"red => bold yellow #dddddd, green => bold blue #dddddd",
])
.with_calling_process("git diff --color-words")
.explain_ansi()
.with_input(GIT_DIFF_COLOR_WORDS)
.inspect()
.expect_after_skip(
11,
r##"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
(bold yellow "#dddddd")aaa(bold blue "#dddddd")bbb(normal)
"##,
);
}
#[test]
fn test_hunk_line_style_raw() {
DeltaTest::with_args(&["--minus-style", "raw", "--plus-style", "raw"])
.explain_ansi()
.with_input(GIT_DIFF_WITH_COLOR)
.expect_after_skip(
14,
"
(red)aaa(normal)
(green)bbb(normal)
",
);
}
#[test]
fn test_hunk_line_style_raw_map_styles() {
DeltaTest::with_args(&[
"--minus-style",
"raw",
"--plus-style",
"raw",
"--map-styles",
"red => bold blue, green => dim yellow",
])
.explain_ansi()
.with_input(GIT_DIFF_WITH_COLOR)
.expect_after_skip(
14,
"
(bold blue)aaa(normal)
(dim yellow)bbb(normal)
",
);
}
const GIT_DIFF_WITH_COLOR: &str = r#"\
[33mcommit 3ef7fba7258fe473f1d8befff367bb793c786107[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Mon Dec 13 22:54:43 2021 -0500
753 Test file
[1mdiff --git a/file b/file[m
[1mindex 72943a1..f761ec1 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31m-aaa[m
[32m+[m[32mbbb[m
"#;
const GIT_DIFF_COLOR_WORDS: &str = r#"\
[33mcommit 6feea4949c20583aaf16eee84f38d34d6a7f1741[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Sat Dec 11 17:08:56 2021 -0500
file v2
[1mdiff --git a/file b/file[m
[1mindex c005da6..962086f 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31maaa[m[32mbbb[m
"#;
const GIT_DIFF_WORD_DIFF: &str = r#"\
[33mcommit 6feea4949c20583aaf16eee84f38d34d6a7f1741[m
Author: Dan Davison <dandavison7@gmail.com>
Date: Sat Dec 11 17:08:56 2021 -0500
file v2
[1mdiff --git a/file b/file[m
[1mindex c005da6..962086f 100644[m
[1m--- a/file[m
[1m+++ b/file[m
[31m@@ -1 +1 @@[m
[31m[-aaa-][m[32m{+bbb+}[m
"#;
}
}
| {
return Some(HunkZero(
Unified,
maybe_raw_line(new_raw_line, config.zero_style.is_raw, 0, &[], config),
));
} | conditional_block |
ps_couture.py | #!/usr/bin/env python
'''
Copyright (C) 2006 Jean-Francois Barraud, barraud@math.univ-lille1.fr
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
barraud@math.univ-lille1.fr
Quick description:
This script deforms an object (the pattern) along other paths (skeletons)...
The first selected object is the pattern
the last selected ones are the skeletons.
Imagine a straight horizontal line L in the middle of the bounding box of the pattern.
Consider the normal bundle of L: the collection of all the vertical lines meeting L.
Consider this as the initial state of the plane; in particular, think of the pattern
as painted on these lines.
Now move and bend L to make it fit a skeleton, and see what happens to the normals:
they move and rotate, deforming the pattern.
Mod pour creation de point couture par Vantieghem David 2018-2019
'''
# standard library
import copy
import math
import re
import random
import simplestyle
# local library
import inkex
import cubicsuperpath
import bezmisc
import pathmodifier
import simpletransform
inkex.localize()
def offset(pathcomp,dx,dy):
for ctl in pathcomp:
for pt in ctl:
pt[0]+=dx
pt[1]+=dy
def linearize(p,tolerance=0.001):
'''
This function recieves a component of a 'cubicsuperpath' and returns two things:
The path subdivided in many straight segments, and an array containing the length of each segment.
We could work with bezier path as well, but bezier arc lengths are (re)computed for each point
in the deformed object. For complex paths, this might take a while.
'''
zero=0.000001
i=0
d=0
lengths=[]
while i<len(p)-1:
box = bezmisc.pointdistance(p[i ][1],p[i ][2])
box += bezmisc.pointdistance(p[i ][2],p[i+1][0])
box += bezmisc.pointdistance(p[i+1][0],p[i+1][1])
chord = bezmisc.pointdistance(p[i][1], p[i+1][1])
if (box - chord) > tolerance:
b1, b2 = bezmisc.beziersplitatt([p[i][1],p[i][2],p[i+1][0],p[i+1][1]], 0.5)
p[i ][2][0],p[i ][2][1]=b1[1]
p[i+1][0][0],p[i+1][0][1]=b2[2]
p.insert(i+1,[[b1[2][0],b1[2][1]],[b1[3][0],b1[3][1]],[b2[1][0],b2[1][1]]])
else:
d=(box+chord)/2
lengths.append(d)
i+=1
new=[p[i][1] for i in range(0,len(p)-1) if lengths[i]>zero]
new.append(p[-1][1])
lengths=[l for l in lengths if l>zero]
return(new,lengths)
def addDot(self,idPoint,labelPoint,diametre,typepoint, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M dia,0 A dia,dia 0 0 1 0,dia dia,dia 0 0 1 -dia,0 dia,dia 0 0 1 0,-dia dia,dia 0 0 1 dia,0 Z'
ligneH='M 0,0 H dia'
ligneV='M 0,0 V dia'
rayon=ligneH.replace('dia',str(self.unittouu(diametre))) #valeur par defaut.
if typepoint=="LigneV":
rayon=ligneV.replace('dia',str(self.unittouu(diametre)))
if typepoint=="Cercle":
rayon=cercle.replace('dia',str(self.unittouu(diametre)/2))
dot.set('d',rayon)
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
def addMark(self,x,y,idPoint,labelPoint,diametre, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M 0,0 V dia'
rayon=cercle.replace('dia',str(self.unittouu(diametre)))
dot.set('d',rayon)
dot.set('x', str(x))
dot.set('y', str(y))
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
Style['stroke']= Couleur
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
return dot
def addText(self,x,y,text):
new = inkex.etree.Element(inkex.addNS('text','svg'))
new.set('style', "font-style:normal;font-weight:normal;font-size:10px;line-height:100%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")#simplestyle.formatStyle(s))
new.set('x', str(x))
new.set('y', str(y))
new.text = str(text)
self.current_layer.append(new)
return new
class Pointsellier(pathmodifier.Diffeo):
def __init__(self):
pathmodifier.Diffeo.__init__(self)
self.OptionParser.add_option("--title")
self.OptionParser.add_option("--diamlong",
action="store", type="string",
dest="diamlong", default="1.0mm")
self.OptionParser.add_option("--typePoint",
action="store", type="string",
dest="typePoint", default="LigneH")
self.OptionParser.add_option("--textInfos",
action="store", type="inkbool",
dest="textInfos", default=False)
self.OptionParser.add_option("-t", "--toffset",
action="store", type="string",
dest="toffset", default="0.1mm")
self.OptionParser.add_option("-p", "--space",
action="store", type="string",
dest="space", default="3.0mm")
self.OptionParser.add_option("--autoOffset",
action="store", type="inkbool",
dest="autoOffset", default=False)
self.OptionParser.add_option("-r","--nrepeat",
action="store", type="int",
dest="nrepeat", default=1,help="nombre d'objets")
self.OptionParser.add_option("--autoRepeat",
action="store", type="inkbool",
dest="autoRepeat", default=False)
self.OptionParser.add_option("--autoMask",
action="store", type="inkbool",
dest="autoMask", default=False)
self.OptionParser.add_option("--autoMark",
action="store", type="inkbool",
dest="autoMark", default=False)
self.OptionParser.add_option("--typeMark",
action="store", type="string",
dest="typeMark", default="markX")
self.OptionParser.add_option( "--nrepeat2",
action="store", type="int",
dest="nrepeat2", default=1,help="nombre d'objets")
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab",
help="The selected UI-tab when OK was pressed")
def lengthtotime(self,l):
'''
Recieves an arc length l, and returns the index of the segment in self.skelcomp
containing the coresponding point, to gether with the position of the point on this segment.
If the deformer is closed, do computations modulo the toal length.
'''
if self.skelcompIsClosed:
l=l % sum(self.lengths)
if l<=0:
return 0,l/self.lengths[0]
i=0
while (i<len(self.lengths)) and (self.lengths[i]<=l):
l-=self.lengths[i]
i+=1
t=l/self.lengths[min(i,len(self.lengths)-1)]
return i, t
def applyDiffeo(self,bpt,vects=()):
'''
The kernel of this stuff:
bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.
'''
s=bpt[0]-self.skelcomp[0][0]
i,t=self.lengthtotime(s)
if i==len(self.skelcomp)-1:#je regarde si je suis au debut du skelete car sinon j'ai pas de vecteur
x,y=bezmisc.tpoint(self.skelcomp[i-1],self.skelcomp[i],1+t)
dx=(self.skelcomp[i][0]-self.skelcomp[i-1][0])/self.lengths[-1]
dy=(self.skelcomp[i][1]-self.skelcomp[i-1][1])/self.lengths[-1]
else:
x,y=bezmisc.tpoint(self.skelcomp[i],self.skelcomp[i+1],t)
dx=(self.skelcomp[i+1][0]-self.skelcomp[i][0])/self.lengths[i]
dy=(self.skelcomp[i+1][1]-self.skelcomp[i][1])/self.lengths[i]
vx=0
vy=bpt[1]-self.skelcomp[0][1]
bpt[0]=x+vx*dx-vy*dy
bpt[1]=y+vx*dy+vy*dx
for v in vects:
vx=v[0]-self.skelcomp[0][0]-s
vy=v[1]-self.skelcomp[0][1]
v[0]=x+vx*dx-vy*dy
v[1]=y+vx*dy+vy*dx
def effect(self): | if len(self.options.ids)<1 and len(self.options.ids)>1:
inkex.errormsg("This extension requires only one selected paths.")
return
#liste des chemins, preparation
idList=self.options.ids
idList=pathmodifier.zSort(self.document.getroot(),idList)
id = idList[-1]
idpoint=id+'-'+ str(random.randint(1, 99)) #id du paterns creer a partir du chemin selectionner
idpointMark=id+'-'+ str(random.randint(1, 99))
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
style = simplestyle.parseStyle(node.get('style')) #je recupere l'ancien style
style['stroke']='#00ff00' #je modifie la valeur
if self.options.autoMask==True:
style['display']='none'
node.set('style', simplestyle.formatStyle(style) ) #j'applique la modifi
#gestion du skelete (le chemin selectionner)
self.skeletons=self.selected
self.expandGroupsUnlinkClones(self.skeletons, True, False)
self.objectsToPaths(self.skeletons)
for skelnode in self.skeletons.itervalues(): #calcul de la longeur du chemin
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
self.skelcomp,self.lengths=linearize(comp)
longeur=sum(self.lengths)
distance=self.unittouu(self.options.space)
taille= self.unittouu(self.options.diamlong)
MaxCopies=max(1,int(round((longeur+distance)/distance)))
NbCopies= self.options.nrepeat #nombre de copie desirer a integrer dans les choix a modifier pour ne pas depasser les valeurs maxi
if NbCopies > MaxCopies:
NbCopies=MaxCopies #on limitte le nombre de copie au maxi possible sur le chemin
if self.options.autoRepeat: #gestion du calcul auto
NbCopies=MaxCopies
if self.options.autoOffset: #gestion du decallage automatique
tOffset=((longeur-(NbCopies-1)*distance)/2)-taille/2
else:
tOffset=self.unittouu(self.options.toffset)
#gestion du paterns
labelpoint='Point: '+ idpoint+ ' Nbr:' + str(NbCopies)+' longueur:'+str(round(self.uutounit(longeur,'mm'),2))+'mm'
addDot(self,idpoint,labelpoint,self.options.diamlong,self.options.typePoint,0)#creation du cercle de base
self.patterns={idpoint:self.getElementById(idpoint)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patterns.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patterns.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
xoffset=self.skelcomp[0][0]-bbox[0]+tOffset
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
if self.options.textInfos:
addText(self,xoffset,yoffset,labelpoint)
width=distance*NbCopies
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbCopies,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
inkex.errormsg("This extension need a path, not groups.")
if self.options.autoMark:
if self.options.typeMark=="markFraction":
Fraction= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
distance=(width)/Fraction #distance inter point
NbrMark=max(1,int(round((width+distance)/distance)))
infos= " Marquage 1/"+ str(Fraction)
couleur= '#ff0000'
else:
Repeat= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
NbrMark=max(1,int(round((NbCopies/Repeat))))
distance=distance*Repeat #distance inter point
infos=" Marquage tous les " + str(Repeat) + " points"
couleur= '#ffaa00'
labelMark="Mark: "+idpoint + infos
addMark(self,0,0,idpointMark,labelMark,self.options.diamlong,couleur)
self.patternsMark={idpointMark:self.getElementById(idpointMark)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patternsMark.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patternsMark.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
# a tester si les point au dessus sont utilisable pour positionner les autres a upoi ressemble skelcomp ??
xoffset=self.skelcomp[0][0]-bbox[0] +tOffset+taille/2
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
width=distance*NbrMark
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbrMark,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
inkex.errormsg("This extension need a path, not groups.")
if __name__ == '__main__':
e = Pointsellier()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99 | random_line_split | |
ps_couture.py | #!/usr/bin/env python
'''
Copyright (C) 2006 Jean-Francois Barraud, barraud@math.univ-lille1.fr
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
barraud@math.univ-lille1.fr
Quick description:
This script deforms an object (the pattern) along other paths (skeletons)...
The first selected object is the pattern
the last selected ones are the skeletons.
Imagine a straight horizontal line L in the middle of the bounding box of the pattern.
Consider the normal bundle of L: the collection of all the vertical lines meeting L.
Consider this as the initial state of the plane; in particular, think of the pattern
as painted on these lines.
Now move and bend L to make it fit a skeleton, and see what happens to the normals:
they move and rotate, deforming the pattern.
Mod pour creation de point couture par Vantieghem David 2018-2019
'''
# standard library
import copy
import math
import re
import random
import simplestyle
# local library
import inkex
import cubicsuperpath
import bezmisc
import pathmodifier
import simpletransform
inkex.localize()
def offset(pathcomp,dx,dy):
for ctl in pathcomp:
for pt in ctl:
pt[0]+=dx
pt[1]+=dy
def linearize(p,tolerance=0.001):
'''
This function recieves a component of a 'cubicsuperpath' and returns two things:
The path subdivided in many straight segments, and an array containing the length of each segment.
We could work with bezier path as well, but bezier arc lengths are (re)computed for each point
in the deformed object. For complex paths, this might take a while.
'''
zero=0.000001
i=0
d=0
lengths=[]
while i<len(p)-1:
box = bezmisc.pointdistance(p[i ][1],p[i ][2])
box += bezmisc.pointdistance(p[i ][2],p[i+1][0])
box += bezmisc.pointdistance(p[i+1][0],p[i+1][1])
chord = bezmisc.pointdistance(p[i][1], p[i+1][1])
if (box - chord) > tolerance:
b1, b2 = bezmisc.beziersplitatt([p[i][1],p[i][2],p[i+1][0],p[i+1][1]], 0.5)
p[i ][2][0],p[i ][2][1]=b1[1]
p[i+1][0][0],p[i+1][0][1]=b2[2]
p.insert(i+1,[[b1[2][0],b1[2][1]],[b1[3][0],b1[3][1]],[b2[1][0],b2[1][1]]])
else:
d=(box+chord)/2
lengths.append(d)
i+=1
new=[p[i][1] for i in range(0,len(p)-1) if lengths[i]>zero]
new.append(p[-1][1])
lengths=[l for l in lengths if l>zero]
return(new,lengths)
def addDot(self,idPoint,labelPoint,diametre,typepoint, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M dia,0 A dia,dia 0 0 1 0,dia dia,dia 0 0 1 -dia,0 dia,dia 0 0 1 0,-dia dia,dia 0 0 1 dia,0 Z'
ligneH='M 0,0 H dia'
ligneV='M 0,0 V dia'
rayon=ligneH.replace('dia',str(self.unittouu(diametre))) #valeur par defaut.
if typepoint=="LigneV":
rayon=ligneV.replace('dia',str(self.unittouu(diametre)))
if typepoint=="Cercle":
rayon=cercle.replace('dia',str(self.unittouu(diametre)/2))
dot.set('d',rayon)
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
def addMark(self,x,y,idPoint,labelPoint,diametre, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M 0,0 V dia'
rayon=cercle.replace('dia',str(self.unittouu(diametre)))
dot.set('d',rayon)
dot.set('x', str(x))
dot.set('y', str(y))
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
Style['stroke']= Couleur
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
return dot
def addText(self,x,y,text):
new = inkex.etree.Element(inkex.addNS('text','svg'))
new.set('style', "font-style:normal;font-weight:normal;font-size:10px;line-height:100%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")#simplestyle.formatStyle(s))
new.set('x', str(x))
new.set('y', str(y))
new.text = str(text)
self.current_layer.append(new)
return new
class Pointsellier(pathmodifier.Diffeo):
def __init__(self):
pathmodifier.Diffeo.__init__(self)
self.OptionParser.add_option("--title")
self.OptionParser.add_option("--diamlong",
action="store", type="string",
dest="diamlong", default="1.0mm")
self.OptionParser.add_option("--typePoint",
action="store", type="string",
dest="typePoint", default="LigneH")
self.OptionParser.add_option("--textInfos",
action="store", type="inkbool",
dest="textInfos", default=False)
self.OptionParser.add_option("-t", "--toffset",
action="store", type="string",
dest="toffset", default="0.1mm")
self.OptionParser.add_option("-p", "--space",
action="store", type="string",
dest="space", default="3.0mm")
self.OptionParser.add_option("--autoOffset",
action="store", type="inkbool",
dest="autoOffset", default=False)
self.OptionParser.add_option("-r","--nrepeat",
action="store", type="int",
dest="nrepeat", default=1,help="nombre d'objets")
self.OptionParser.add_option("--autoRepeat",
action="store", type="inkbool",
dest="autoRepeat", default=False)
self.OptionParser.add_option("--autoMask",
action="store", type="inkbool",
dest="autoMask", default=False)
self.OptionParser.add_option("--autoMark",
action="store", type="inkbool",
dest="autoMark", default=False)
self.OptionParser.add_option("--typeMark",
action="store", type="string",
dest="typeMark", default="markX")
self.OptionParser.add_option( "--nrepeat2",
action="store", type="int",
dest="nrepeat2", default=1,help="nombre d'objets")
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab",
help="The selected UI-tab when OK was pressed")
def | (self,l):
'''
Recieves an arc length l, and returns the index of the segment in self.skelcomp
containing the coresponding point, to gether with the position of the point on this segment.
If the deformer is closed, do computations modulo the toal length.
'''
if self.skelcompIsClosed:
l=l % sum(self.lengths)
if l<=0:
return 0,l/self.lengths[0]
i=0
while (i<len(self.lengths)) and (self.lengths[i]<=l):
l-=self.lengths[i]
i+=1
t=l/self.lengths[min(i,len(self.lengths)-1)]
return i, t
def applyDiffeo(self,bpt,vects=()):
'''
The kernel of this stuff:
bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.
'''
s=bpt[0]-self.skelcomp[0][0]
i,t=self.lengthtotime(s)
if i==len(self.skelcomp)-1:#je regarde si je suis au debut du skelete car sinon j'ai pas de vecteur
x,y=bezmisc.tpoint(self.skelcomp[i-1],self.skelcomp[i],1+t)
dx=(self.skelcomp[i][0]-self.skelcomp[i-1][0])/self.lengths[-1]
dy=(self.skelcomp[i][1]-self.skelcomp[i-1][1])/self.lengths[-1]
else:
x,y=bezmisc.tpoint(self.skelcomp[i],self.skelcomp[i+1],t)
dx=(self.skelcomp[i+1][0]-self.skelcomp[i][0])/self.lengths[i]
dy=(self.skelcomp[i+1][1]-self.skelcomp[i][1])/self.lengths[i]
vx=0
vy=bpt[1]-self.skelcomp[0][1]
bpt[0]=x+vx*dx-vy*dy
bpt[1]=y+vx*dy+vy*dx
for v in vects:
vx=v[0]-self.skelcomp[0][0]-s
vy=v[1]-self.skelcomp[0][1]
v[0]=x+vx*dx-vy*dy
v[1]=y+vx*dy+vy*dx
def effect(self):
if len(self.options.ids)<1 and len(self.options.ids)>1:
inkex.errormsg("This extension requires only one selected paths.")
return
#liste des chemins, preparation
idList=self.options.ids
idList=pathmodifier.zSort(self.document.getroot(),idList)
id = idList[-1]
idpoint=id+'-'+ str(random.randint(1, 99)) #id du paterns creer a partir du chemin selectionner
idpointMark=id+'-'+ str(random.randint(1, 99))
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
style = simplestyle.parseStyle(node.get('style')) #je recupere l'ancien style
style['stroke']='#00ff00' #je modifie la valeur
if self.options.autoMask==True:
style['display']='none'
node.set('style', simplestyle.formatStyle(style) ) #j'applique la modifi
#gestion du skelete (le chemin selectionner)
self.skeletons=self.selected
self.expandGroupsUnlinkClones(self.skeletons, True, False)
self.objectsToPaths(self.skeletons)
for skelnode in self.skeletons.itervalues(): #calcul de la longeur du chemin
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
self.skelcomp,self.lengths=linearize(comp)
longeur=sum(self.lengths)
distance=self.unittouu(self.options.space)
taille= self.unittouu(self.options.diamlong)
MaxCopies=max(1,int(round((longeur+distance)/distance)))
NbCopies= self.options.nrepeat #nombre de copie desirer a integrer dans les choix a modifier pour ne pas depasser les valeurs maxi
if NbCopies > MaxCopies:
NbCopies=MaxCopies #on limitte le nombre de copie au maxi possible sur le chemin
if self.options.autoRepeat: #gestion du calcul auto
NbCopies=MaxCopies
if self.options.autoOffset: #gestion du decallage automatique
tOffset=((longeur-(NbCopies-1)*distance)/2)-taille/2
else:
tOffset=self.unittouu(self.options.toffset)
#gestion du paterns
labelpoint='Point: '+ idpoint+ ' Nbr:' + str(NbCopies)+' longueur:'+str(round(self.uutounit(longeur,'mm'),2))+'mm'
addDot(self,idpoint,labelpoint,self.options.diamlong,self.options.typePoint,0)#creation du cercle de base
self.patterns={idpoint:self.getElementById(idpoint)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patterns.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patterns.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
xoffset=self.skelcomp[0][0]-bbox[0]+tOffset
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
if self.options.textInfos:
addText(self,xoffset,yoffset,labelpoint)
width=distance*NbCopies
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbCopies,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
inkex.errormsg("This extension need a path, not groups.")
if self.options.autoMark:
if self.options.typeMark=="markFraction":
Fraction= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
distance=(width)/Fraction #distance inter point
NbrMark=max(1,int(round((width+distance)/distance)))
infos= " Marquage 1/"+ str(Fraction)
couleur= '#ff0000'
else:
Repeat= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
NbrMark=max(1,int(round((NbCopies/Repeat))))
distance=distance*Repeat #distance inter point
infos=" Marquage tous les " + str(Repeat) + " points"
couleur= '#ffaa00'
labelMark="Mark: "+idpoint + infos
addMark(self,0,0,idpointMark,labelMark,self.options.diamlong,couleur)
self.patternsMark={idpointMark:self.getElementById(idpointMark)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patternsMark.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patternsMark.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
# a tester si les point au dessus sont utilisable pour positionner les autres a upoi ressemble skelcomp ??
xoffset=self.skelcomp[0][0]-bbox[0] +tOffset+taille/2
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
width=distance*NbrMark
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbrMark,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
inkex.errormsg("This extension need a path, not groups.")
if __name__ == '__main__':
e = Pointsellier()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
| lengthtotime | identifier_name |
ps_couture.py | #!/usr/bin/env python
'''
Copyright (C) 2006 Jean-Francois Barraud, barraud@math.univ-lille1.fr
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
barraud@math.univ-lille1.fr
Quick description:
This script deforms an object (the pattern) along other paths (skeletons)...
The first selected object is the pattern
the last selected ones are the skeletons.
Imagine a straight horizontal line L in the middle of the bounding box of the pattern.
Consider the normal bundle of L: the collection of all the vertical lines meeting L.
Consider this as the initial state of the plane; in particular, think of the pattern
as painted on these lines.
Now move and bend L to make it fit a skeleton, and see what happens to the normals:
they move and rotate, deforming the pattern.
Mod pour creation de point couture par Vantieghem David 2018-2019
'''
# standard library
import copy
import math
import re
import random
import simplestyle
# local library
import inkex
import cubicsuperpath
import bezmisc
import pathmodifier
import simpletransform
inkex.localize()
def offset(pathcomp,dx,dy):
for ctl in pathcomp:
for pt in ctl:
pt[0]+=dx
pt[1]+=dy
def linearize(p,tolerance=0.001):
'''
This function recieves a component of a 'cubicsuperpath' and returns two things:
The path subdivided in many straight segments, and an array containing the length of each segment.
We could work with bezier path as well, but bezier arc lengths are (re)computed for each point
in the deformed object. For complex paths, this might take a while.
'''
zero=0.000001
i=0
d=0
lengths=[]
while i<len(p)-1:
box = bezmisc.pointdistance(p[i ][1],p[i ][2])
box += bezmisc.pointdistance(p[i ][2],p[i+1][0])
box += bezmisc.pointdistance(p[i+1][0],p[i+1][1])
chord = bezmisc.pointdistance(p[i][1], p[i+1][1])
if (box - chord) > tolerance:
b1, b2 = bezmisc.beziersplitatt([p[i][1],p[i][2],p[i+1][0],p[i+1][1]], 0.5)
p[i ][2][0],p[i ][2][1]=b1[1]
p[i+1][0][0],p[i+1][0][1]=b2[2]
p.insert(i+1,[[b1[2][0],b1[2][1]],[b1[3][0],b1[3][1]],[b2[1][0],b2[1][1]]])
else:
d=(box+chord)/2
lengths.append(d)
i+=1
new=[p[i][1] for i in range(0,len(p)-1) if lengths[i]>zero]
new.append(p[-1][1])
lengths=[l for l in lengths if l>zero]
return(new,lengths)
def addDot(self,idPoint,labelPoint,diametre,typepoint, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M dia,0 A dia,dia 0 0 1 0,dia dia,dia 0 0 1 -dia,0 dia,dia 0 0 1 0,-dia dia,dia 0 0 1 dia,0 Z'
ligneH='M 0,0 H dia'
ligneV='M 0,0 V dia'
rayon=ligneH.replace('dia',str(self.unittouu(diametre))) #valeur par defaut.
if typepoint=="LigneV":
rayon=ligneV.replace('dia',str(self.unittouu(diametre)))
if typepoint=="Cercle":
rayon=cercle.replace('dia',str(self.unittouu(diametre)/2))
dot.set('d',rayon)
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
def addMark(self,x,y,idPoint,labelPoint,diametre, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M 0,0 V dia'
rayon=cercle.replace('dia',str(self.unittouu(diametre)))
dot.set('d',rayon)
dot.set('x', str(x))
dot.set('y', str(y))
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
Style['stroke']= Couleur
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
return dot
def addText(self,x,y,text):
new = inkex.etree.Element(inkex.addNS('text','svg'))
new.set('style', "font-style:normal;font-weight:normal;font-size:10px;line-height:100%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")#simplestyle.formatStyle(s))
new.set('x', str(x))
new.set('y', str(y))
new.text = str(text)
self.current_layer.append(new)
return new
class Pointsellier(pathmodifier.Diffeo):
def __init__(self):
pathmodifier.Diffeo.__init__(self)
self.OptionParser.add_option("--title")
self.OptionParser.add_option("--diamlong",
action="store", type="string",
dest="diamlong", default="1.0mm")
self.OptionParser.add_option("--typePoint",
action="store", type="string",
dest="typePoint", default="LigneH")
self.OptionParser.add_option("--textInfos",
action="store", type="inkbool",
dest="textInfos", default=False)
self.OptionParser.add_option("-t", "--toffset",
action="store", type="string",
dest="toffset", default="0.1mm")
self.OptionParser.add_option("-p", "--space",
action="store", type="string",
dest="space", default="3.0mm")
self.OptionParser.add_option("--autoOffset",
action="store", type="inkbool",
dest="autoOffset", default=False)
self.OptionParser.add_option("-r","--nrepeat",
action="store", type="int",
dest="nrepeat", default=1,help="nombre d'objets")
self.OptionParser.add_option("--autoRepeat",
action="store", type="inkbool",
dest="autoRepeat", default=False)
self.OptionParser.add_option("--autoMask",
action="store", type="inkbool",
dest="autoMask", default=False)
self.OptionParser.add_option("--autoMark",
action="store", type="inkbool",
dest="autoMark", default=False)
self.OptionParser.add_option("--typeMark",
action="store", type="string",
dest="typeMark", default="markX")
self.OptionParser.add_option( "--nrepeat2",
action="store", type="int",
dest="nrepeat2", default=1,help="nombre d'objets")
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab",
help="The selected UI-tab when OK was pressed")
def lengthtotime(self,l):
'''
Recieves an arc length l, and returns the index of the segment in self.skelcomp
containing the coresponding point, to gether with the position of the point on this segment.
If the deformer is closed, do computations modulo the toal length.
'''
if self.skelcompIsClosed:
l=l % sum(self.lengths)
if l<=0:
return 0,l/self.lengths[0]
i=0
while (i<len(self.lengths)) and (self.lengths[i]<=l):
l-=self.lengths[i]
i+=1
t=l/self.lengths[min(i,len(self.lengths)-1)]
return i, t
def applyDiffeo(self,bpt,vects=()):
'''
The kernel of this stuff:
bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.
'''
s=bpt[0]-self.skelcomp[0][0]
i,t=self.lengthtotime(s)
if i==len(self.skelcomp)-1:#je regarde si je suis au debut du skelete car sinon j'ai pas de vecteur
x,y=bezmisc.tpoint(self.skelcomp[i-1],self.skelcomp[i],1+t)
dx=(self.skelcomp[i][0]-self.skelcomp[i-1][0])/self.lengths[-1]
dy=(self.skelcomp[i][1]-self.skelcomp[i-1][1])/self.lengths[-1]
else:
x,y=bezmisc.tpoint(self.skelcomp[i],self.skelcomp[i+1],t)
dx=(self.skelcomp[i+1][0]-self.skelcomp[i][0])/self.lengths[i]
dy=(self.skelcomp[i+1][1]-self.skelcomp[i][1])/self.lengths[i]
vx=0
vy=bpt[1]-self.skelcomp[0][1]
bpt[0]=x+vx*dx-vy*dy
bpt[1]=y+vx*dy+vy*dx
for v in vects:
vx=v[0]-self.skelcomp[0][0]-s
vy=v[1]-self.skelcomp[0][1]
v[0]=x+vx*dx-vy*dy
v[1]=y+vx*dy+vy*dx
def effect(self):
if len(self.options.ids)<1 and len(self.options.ids)>1:
inkex.errormsg("This extension requires only one selected paths.")
return
#liste des chemins, preparation
idList=self.options.ids
idList=pathmodifier.zSort(self.document.getroot(),idList)
id = idList[-1]
idpoint=id+'-'+ str(random.randint(1, 99)) #id du paterns creer a partir du chemin selectionner
idpointMark=id+'-'+ str(random.randint(1, 99))
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
style = simplestyle.parseStyle(node.get('style')) #je recupere l'ancien style
style['stroke']='#00ff00' #je modifie la valeur
if self.options.autoMask==True:
style['display']='none'
node.set('style', simplestyle.formatStyle(style) ) #j'applique la modifi
#gestion du skelete (le chemin selectionner)
self.skeletons=self.selected
self.expandGroupsUnlinkClones(self.skeletons, True, False)
self.objectsToPaths(self.skeletons)
for skelnode in self.skeletons.itervalues(): #calcul de la longeur du chemin
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
self.skelcomp,self.lengths=linearize(comp)
longeur=sum(self.lengths)
distance=self.unittouu(self.options.space)
taille= self.unittouu(self.options.diamlong)
MaxCopies=max(1,int(round((longeur+distance)/distance)))
NbCopies= self.options.nrepeat #nombre de copie desirer a integrer dans les choix a modifier pour ne pas depasser les valeurs maxi
if NbCopies > MaxCopies:
NbCopies=MaxCopies #on limitte le nombre de copie au maxi possible sur le chemin
if self.options.autoRepeat: #gestion du calcul auto
NbCopies=MaxCopies
if self.options.autoOffset: #gestion du decallage automatique
tOffset=((longeur-(NbCopies-1)*distance)/2)-taille/2
else:
tOffset=self.unittouu(self.options.toffset)
#gestion du paterns
labelpoint='Point: '+ idpoint+ ' Nbr:' + str(NbCopies)+' longueur:'+str(round(self.uutounit(longeur,'mm'),2))+'mm'
addDot(self,idpoint,labelpoint,self.options.diamlong,self.options.typePoint,0)#creation du cercle de base
self.patterns={idpoint:self.getElementById(idpoint)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patterns.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patterns.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
xoffset=self.skelcomp[0][0]-bbox[0]+tOffset
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
if self.options.textInfos:
addText(self,xoffset,yoffset,labelpoint)
width=distance*NbCopies
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbCopies,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
inkex.errormsg("This extension need a path, not groups.")
if self.options.autoMark:
if self.options.typeMark=="markFraction":
Fraction= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
distance=(width)/Fraction #distance inter point
NbrMark=max(1,int(round((width+distance)/distance)))
infos= " Marquage 1/"+ str(Fraction)
couleur= '#ff0000'
else:
Repeat= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
NbrMark=max(1,int(round((NbCopies/Repeat))))
distance=distance*Repeat #distance inter point
infos=" Marquage tous les " + str(Repeat) + " points"
couleur= '#ffaa00'
labelMark="Mark: "+idpoint + infos
addMark(self,0,0,idpointMark,labelMark,self.options.diamlong,couleur)
self.patternsMark={idpointMark:self.getElementById(idpointMark)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patternsMark.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patternsMark.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
# a tester si les point au dessus sont utilisable pour positionner les autres a upoi ressemble skelcomp ??
xoffset=self.skelcomp[0][0]-bbox[0] +tOffset+taille/2
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
width=distance*NbrMark
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbrMark,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
|
if __name__ == '__main__':
e = Pointsellier()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
| inkex.errormsg("This extension need a path, not groups.") | conditional_block |
ps_couture.py | #!/usr/bin/env python
'''
Copyright (C) 2006 Jean-Francois Barraud, barraud@math.univ-lille1.fr
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
barraud@math.univ-lille1.fr
Quick description:
This script deforms an object (the pattern) along other paths (skeletons)...
The first selected object is the pattern
the last selected ones are the skeletons.
Imagine a straight horizontal line L in the middle of the bounding box of the pattern.
Consider the normal bundle of L: the collection of all the vertical lines meeting L.
Consider this as the initial state of the plane; in particular, think of the pattern
as painted on these lines.
Now move and bend L to make it fit a skeleton, and see what happens to the normals:
they move and rotate, deforming the pattern.
Mod pour creation de point couture par Vantieghem David 2018-2019
'''
# standard library
import copy
import math
import re
import random
import simplestyle
# local library
import inkex
import cubicsuperpath
import bezmisc
import pathmodifier
import simpletransform
inkex.localize()
def offset(pathcomp,dx,dy):
for ctl in pathcomp:
for pt in ctl:
pt[0]+=dx
pt[1]+=dy
def linearize(p,tolerance=0.001):
'''
This function recieves a component of a 'cubicsuperpath' and returns two things:
The path subdivided in many straight segments, and an array containing the length of each segment.
We could work with bezier path as well, but bezier arc lengths are (re)computed for each point
in the deformed object. For complex paths, this might take a while.
'''
zero=0.000001
i=0
d=0
lengths=[]
while i<len(p)-1:
box = bezmisc.pointdistance(p[i ][1],p[i ][2])
box += bezmisc.pointdistance(p[i ][2],p[i+1][0])
box += bezmisc.pointdistance(p[i+1][0],p[i+1][1])
chord = bezmisc.pointdistance(p[i][1], p[i+1][1])
if (box - chord) > tolerance:
b1, b2 = bezmisc.beziersplitatt([p[i][1],p[i][2],p[i+1][0],p[i+1][1]], 0.5)
p[i ][2][0],p[i ][2][1]=b1[1]
p[i+1][0][0],p[i+1][0][1]=b2[2]
p.insert(i+1,[[b1[2][0],b1[2][1]],[b1[3][0],b1[3][1]],[b2[1][0],b2[1][1]]])
else:
d=(box+chord)/2
lengths.append(d)
i+=1
new=[p[i][1] for i in range(0,len(p)-1) if lengths[i]>zero]
new.append(p[-1][1])
lengths=[l for l in lengths if l>zero]
return(new,lengths)
def addDot(self,idPoint,labelPoint,diametre,typepoint, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M dia,0 A dia,dia 0 0 1 0,dia dia,dia 0 0 1 -dia,0 dia,dia 0 0 1 0,-dia dia,dia 0 0 1 dia,0 Z'
ligneH='M 0,0 H dia'
ligneV='M 0,0 V dia'
rayon=ligneH.replace('dia',str(self.unittouu(diametre))) #valeur par defaut.
if typepoint=="LigneV":
rayon=ligneV.replace('dia',str(self.unittouu(diametre)))
if typepoint=="Cercle":
rayon=cercle.replace('dia',str(self.unittouu(diametre)/2))
dot.set('d',rayon)
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
def addMark(self,x,y,idPoint,labelPoint,diametre, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M 0,0 V dia'
rayon=cercle.replace('dia',str(self.unittouu(diametre)))
dot.set('d',rayon)
dot.set('x', str(x))
dot.set('y', str(y))
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
Style['stroke']= Couleur
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
return dot
def addText(self,x,y,text):
new = inkex.etree.Element(inkex.addNS('text','svg'))
new.set('style', "font-style:normal;font-weight:normal;font-size:10px;line-height:100%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")#simplestyle.formatStyle(s))
new.set('x', str(x))
new.set('y', str(y))
new.text = str(text)
self.current_layer.append(new)
return new
class Pointsellier(pathmodifier.Diffeo):
|
if __name__ == '__main__':
e = Pointsellier()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
| def __init__(self):
pathmodifier.Diffeo.__init__(self)
self.OptionParser.add_option("--title")
self.OptionParser.add_option("--diamlong",
action="store", type="string",
dest="diamlong", default="1.0mm")
self.OptionParser.add_option("--typePoint",
action="store", type="string",
dest="typePoint", default="LigneH")
self.OptionParser.add_option("--textInfos",
action="store", type="inkbool",
dest="textInfos", default=False)
self.OptionParser.add_option("-t", "--toffset",
action="store", type="string",
dest="toffset", default="0.1mm")
self.OptionParser.add_option("-p", "--space",
action="store", type="string",
dest="space", default="3.0mm")
self.OptionParser.add_option("--autoOffset",
action="store", type="inkbool",
dest="autoOffset", default=False)
self.OptionParser.add_option("-r","--nrepeat",
action="store", type="int",
dest="nrepeat", default=1,help="nombre d'objets")
self.OptionParser.add_option("--autoRepeat",
action="store", type="inkbool",
dest="autoRepeat", default=False)
self.OptionParser.add_option("--autoMask",
action="store", type="inkbool",
dest="autoMask", default=False)
self.OptionParser.add_option("--autoMark",
action="store", type="inkbool",
dest="autoMark", default=False)
self.OptionParser.add_option("--typeMark",
action="store", type="string",
dest="typeMark", default="markX")
self.OptionParser.add_option( "--nrepeat2",
action="store", type="int",
dest="nrepeat2", default=1,help="nombre d'objets")
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab",
help="The selected UI-tab when OK was pressed")
def lengthtotime(self,l):
'''
Recieves an arc length l, and returns the index of the segment in self.skelcomp
containing the coresponding point, to gether with the position of the point on this segment.
If the deformer is closed, do computations modulo the toal length.
'''
if self.skelcompIsClosed:
l=l % sum(self.lengths)
if l<=0:
return 0,l/self.lengths[0]
i=0
while (i<len(self.lengths)) and (self.lengths[i]<=l):
l-=self.lengths[i]
i+=1
t=l/self.lengths[min(i,len(self.lengths)-1)]
return i, t
def applyDiffeo(self,bpt,vects=()):
'''
The kernel of this stuff:
bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.
'''
s=bpt[0]-self.skelcomp[0][0]
i,t=self.lengthtotime(s)
if i==len(self.skelcomp)-1:#je regarde si je suis au debut du skelete car sinon j'ai pas de vecteur
x,y=bezmisc.tpoint(self.skelcomp[i-1],self.skelcomp[i],1+t)
dx=(self.skelcomp[i][0]-self.skelcomp[i-1][0])/self.lengths[-1]
dy=(self.skelcomp[i][1]-self.skelcomp[i-1][1])/self.lengths[-1]
else:
x,y=bezmisc.tpoint(self.skelcomp[i],self.skelcomp[i+1],t)
dx=(self.skelcomp[i+1][0]-self.skelcomp[i][0])/self.lengths[i]
dy=(self.skelcomp[i+1][1]-self.skelcomp[i][1])/self.lengths[i]
vx=0
vy=bpt[1]-self.skelcomp[0][1]
bpt[0]=x+vx*dx-vy*dy
bpt[1]=y+vx*dy+vy*dx
for v in vects:
vx=v[0]-self.skelcomp[0][0]-s
vy=v[1]-self.skelcomp[0][1]
v[0]=x+vx*dx-vy*dy
v[1]=y+vx*dy+vy*dx
def effect(self):
if len(self.options.ids)<1 and len(self.options.ids)>1:
inkex.errormsg("This extension requires only one selected paths.")
return
#liste des chemins, preparation
idList=self.options.ids
idList=pathmodifier.zSort(self.document.getroot(),idList)
id = idList[-1]
idpoint=id+'-'+ str(random.randint(1, 99)) #id du paterns creer a partir du chemin selectionner
idpointMark=id+'-'+ str(random.randint(1, 99))
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
style = simplestyle.parseStyle(node.get('style')) #je recupere l'ancien style
style['stroke']='#00ff00' #je modifie la valeur
if self.options.autoMask==True:
style['display']='none'
node.set('style', simplestyle.formatStyle(style) ) #j'applique la modifi
#gestion du skelete (le chemin selectionner)
self.skeletons=self.selected
self.expandGroupsUnlinkClones(self.skeletons, True, False)
self.objectsToPaths(self.skeletons)
for skelnode in self.skeletons.itervalues(): #calcul de la longeur du chemin
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
self.skelcomp,self.lengths=linearize(comp)
longeur=sum(self.lengths)
distance=self.unittouu(self.options.space)
taille= self.unittouu(self.options.diamlong)
MaxCopies=max(1,int(round((longeur+distance)/distance)))
NbCopies= self.options.nrepeat #nombre de copie desirer a integrer dans les choix a modifier pour ne pas depasser les valeurs maxi
if NbCopies > MaxCopies:
NbCopies=MaxCopies #on limitte le nombre de copie au maxi possible sur le chemin
if self.options.autoRepeat: #gestion du calcul auto
NbCopies=MaxCopies
if self.options.autoOffset: #gestion du decallage automatique
tOffset=((longeur-(NbCopies-1)*distance)/2)-taille/2
else:
tOffset=self.unittouu(self.options.toffset)
#gestion du paterns
labelpoint='Point: '+ idpoint+ ' Nbr:' + str(NbCopies)+' longueur:'+str(round(self.uutounit(longeur,'mm'),2))+'mm'
addDot(self,idpoint,labelpoint,self.options.diamlong,self.options.typePoint,0)#creation du cercle de base
self.patterns={idpoint:self.getElementById(idpoint)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patterns.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patterns.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
xoffset=self.skelcomp[0][0]-bbox[0]+tOffset
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
if self.options.textInfos:
addText(self,xoffset,yoffset,labelpoint)
width=distance*NbCopies
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbCopies,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
inkex.errormsg("This extension need a path, not groups.")
if self.options.autoMark:
if self.options.typeMark=="markFraction":
Fraction= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
distance=(width)/Fraction #distance inter point
NbrMark=max(1,int(round((width+distance)/distance)))
infos= " Marquage 1/"+ str(Fraction)
couleur= '#ff0000'
else:
Repeat= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
NbrMark=max(1,int(round((NbCopies/Repeat))))
distance=distance*Repeat #distance inter point
infos=" Marquage tous les " + str(Repeat) + " points"
couleur= '#ffaa00'
labelMark="Mark: "+idpoint + infos
addMark(self,0,0,idpointMark,labelMark,self.options.diamlong,couleur)
self.patternsMark={idpointMark:self.getElementById(idpointMark)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patternsMark.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patternsMark.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
# a tester si les point au dessus sont utilisable pour positionner les autres a upoi ressemble skelcomp ??
xoffset=self.skelcomp[0][0]-bbox[0] +tOffset+taille/2
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
width=distance*NbrMark
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbrMark,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
inkex.errormsg("This extension need a path, not groups.") | identifier_body |
utils.py | import numpy as np
import tensorflow as tf
from collections import OrderedDict
import nltk
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from tensorflow.python import pywrap_tensorflow
from pdb import set_trace as bp
import data_utils as dp
import data_utils
import sys, os
from tensorflow.python.ops import clip_ops
from tensorflow.python.framework import ops
from collections import defaultdict
import codecs
import cPickle
from tensorflow.python.ops import math_ops, variable_scope
from embedding_metrics import greedy_match, extrema_score, average_score
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
def | (text, wordtoix, opt, is_cnn = True):
sent = [wordtoix[x] for x in text.split()]
return prepare_data_for_cnn([sent for i in range(opt.batch_size)], opt)
def prepare_data_for_cnn(seqs_x, opt):
maxlen=opt.maxlen
filter_h=opt.filter_shape
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
new_lengths_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
if l_x < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
else:
new_seqs_x.append(s_x[l_x-maxlen+1:])
new_lengths_x.append(maxlen-1)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
if len(lengths_x) < 1 :
return None, None
pad = filter_h -1
x = []
for rev in seqs_x:
xx = []
for i in xrange(pad):
xx.append(0)
for idx in rev:
xx.append(idx)
while len(xx) < maxlen + 2*pad:
xx.append(0)
x.append(xx)
x = np.array(x,dtype='int32')
return x
def prepare_data_for_rnn(seqs_x, opt, is_add_GO = True):
maxlen=opt.sent_len -2 #+ opt.filter_shape - 1 # 49
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
if l_x < maxlen-2:
new_seqs_x.append(s_x)
else:
#new_seqs_x.append(s_x[l_x-maxlen+1:])
new_seqs_x.append(s_x[:maxlen-2]+[2])
seqs_x = new_seqs_x
lengths_x = [len(s) for s in seqs_x]
if len(lengths_x) < 1 :
return None, None
n_samples = len(seqs_x)
maxlen_x = np.max(lengths_x)
x = np.zeros(( n_samples, opt.sent_len)).astype('int32')
for idx, s_x in enumerate(seqs_x):
if is_add_GO:
x[idx, 0] = 1 # GO symbol
x[idx, 1:lengths_x[idx]+1] = s_x
else:
x[idx, :lengths_x[idx]] = s_x
return x
def restore_from_save(t_vars, sess, opt, prefix = 'd_', load_path = None):
if not load_path:
load_path = opt.load_path
if opt.load_from_pretrain:
save_keys = tensors_key_in_file(load_path)
#print(save_keys.keys())
ss = set([var.name[2:][:-2] for var in t_vars])&set([s[2:] for s in save_keys.keys()])
cc = {var.name[2:][:-2]:var for var in t_vars}
ss_right_shape = set([s for s in ss if cc[s].get_shape() == save_keys[prefix+s]]) # only restore variables with correct shape
ss_wrong_shape = ss - ss_right_shape
cc2 = {prefix+ var.name[2:][:-2]:var for var in t_vars if var.name[2:][:-2] in ss_right_shape} # name in file -> var
loader = tf.train.Saver(var_list=cc2)
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
else:
save_keys = tensors_key_in_file(load_path)
ss = [var for var in t_vars if var.name[:-2] in save_keys.keys()]
ss_right_shape = [var.name for var in ss if var.get_shape() == save_keys[var.name[:-2]]]
ss_wrong_shape = set([v.name for v in ss]) - set(ss_right_shape)
#ss = [var for var in ss if 'OptimizeLoss' not in var]
loader = tf.train.Saver(var_list= [var for var in t_vars if var.name in ss_right_shape])
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
_buckets = [(60,60)]
def read_data(source_path, target_path, opt):
"""
From tensorflow tutorial translate.py
Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not opt.max_train_data_size or counter < opt.max_train_data_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if opt.minlen <len(source_ids) < min(source_size, opt.maxlen) and opt.minlen <len(target_ids) < min(target_size, opt.maxlen):
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def read_pair_data_full(src_f, tgt_f, dic_f, train_prop = 0.9, max_num=None, rev_src=False, rev_tgt = False, is_text_src = False, is_text_tgt = False, p_f = '../data/', from_p = True):
#train, val = [], []
if from_p:
p_f = src_f[:-3] + str(max_num) + '.p'
if os.path.exists(p_f):
with open(p_f, 'rb') as pfile:
train, val, test, wordtoix, ixtoword = cPickle.load(pfile)
return train, val, test, wordtoix, ixtoword
wordtoix, ixtoword = {}, {}
print "Start reading dic file . . ."
if os.path.exists(dic_f):
print("loading Dictionary")
counter=0
with codecs.open(dic_f,"r",'utf-8') as f:
s=f.readline()
while s:
s=s.rstrip('\n').rstrip("\r")
#print("s==",s)
wordtoix[s]=counter
ixtoword[counter]=s
counter+=1
s=f.readline()
def shift_id(x):
return x
src, tgt = [], []
print "Start reading src file . . ."
with codecs.open(src_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count, max_l = 0, 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_src:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
max_l = max(max_l, len(tokens))
if not rev_src: # reverse source
src.append(tokens)
else :
src.append(tokens[::-1])
#pdb.set_trace()
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Source cnt: " + str(count) + " maxLen: " + str(max_l)
print "Start reading tgt file . . ."
with codecs.open(tgt_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count = 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_tgt:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
if not rev_tgt: # reverse source
tgt.append(tokens)
else :
tgt.append(tokens[::-1])
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Target cnt: " + str(count) + " maxLen: " + str(max_l)
assert(len(src)==len(tgt))
all_pairs = np.array(zip(*[tgt, src]))
if not train_prop:
train , val, test = all_pairs, [], []
else:
idx = np.random.choice(len(all_pairs), int(np.floor(train_prop*len(all_pairs))))
rem_idx = np.array(list(set(range(len(all_pairs)))-set(idx)))
#v_idx = np.random.choice(rem_idx, int(np.floor(0.5*len(rem_idx))))
v_idx = np.random.choice(rem_idx, len(rem_idx)-2000)
t_idx = np.array(list(set(rem_idx)-set(v_idx)))
#pdb.set_trace()
train, val, test = all_pairs[idx], all_pairs[v_idx], all_pairs[t_idx]
if from_p:
with open(p_f, 'wb') as pfile:
cPickle.dump([train, val, test, wordtoix, ixtoword], pfile)
#print(counter)
#pdb.set_trace()
return train, val, test, wordtoix, ixtoword
def read_test(test_file, wordtoix):
print "Start reading test file . . ."
test = []
with codecs.open(test_file,"r",'utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip("\n").rstrip("\r").split('\t')
conv = []
for l in line:
sent=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in l.split()] + [2]
conv.append(sent)
# bp()
test.append(conv)
return test
def tensors_key_in_file(file_name):
"""Return tensors key in a checkpoint file.
Args:
file_name: Name of the checkpoint file.
"""
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
return reader.get_variable_to_shape_map()
except Exception as e: # pylint: disable=broad-except
print(str(e))
return None
def get_minibatches_idx(n, minibatch_size, shuffle=False):
idx_list = np.arange(n, dtype="int32")
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
# if (minibatch_start != n):
# # Make a minibatch out of what is left
# minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# def normalizing_L1(x, axis):
# norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
# normalized = x / (norm)
# return normalized
def normalizing(x, axis):
norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
normalized = x / (norm)
return normalized
def normalizing_sum(x, axis):
# sum(x) == 1
sum_prob = tf.reduce_sum(x, axis=axis, keep_dims=True)
normalized = x / sum_prob
return normalized
def _p(pp, name):
return '%s_%s' % (pp, name)
def dropout(X, trng, p=0.):
if p != 0:
retain_prob = 1 - p
X = X / retain_prob * trng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
return X
""" used for initialization of the parameters. """
def ortho_weight(ndim):
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype(config.floatX)
def uniform_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.uniform(low=-scale, high=scale, size=(nin, nout))
return W.astype(config.floatX)
def normal_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.randn(nin, nout) * scale
return W.astype(config.floatX)
def zero_bias(ndim):
b = np.zeros((ndim,))
return b.astype(config.floatX)
"""auxiliary function for KDE"""
def log_mean_exp(A,b,sigma):
a=-0.5*((A-theano.tensor.tile(b,[A.shape[0],1]))**2).sum(1)/(sigma**2)
max_=a.max()
return max_+theano.tensor.log(theano.tensor.exp(a-theano.tensor.tile(max_,a.shape[0])).mean())
'''calculate KDE'''
def cal_nkde(X,mu,sigma):
s1,updates=theano.scan(lambda i,s: s+log_mean_exp(mu,X[i,:],sigma), sequences=[theano.tensor.arange(X.shape[0])],outputs_info=[np.asarray(0.,dtype="float32")])
E=s1[-1]
Z=mu.shape[0]*theano.tensor.log(sigma*np.sqrt(np.pi*2))
return (Z-E)/mu.shape[0]
def cal_relevance(generated, reference, embedding): # embedding V* E
generated = [[g] for g in generated]
reference = [[s] for s in reference]
#bp()
relevance_score = [0.0,0.0,0.0]
relevance_score[0] = greedy_match(reference, generated, embedding)
relevance_score[1] = average_score(reference, generated, embedding)
relevance_score[2] = extrema_score(reference, generated, embedding)
return relevance_score
def cal_BLEU(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2],score[1:]):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
return BLEUscore
def cal_BLEU_4(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2,3],score):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
BLEUscore[3] = BLEUscore[3]/len(generated)
return BLEUscore
def cal_BLEU_4_nltk(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
reference = [[s] for s in reference]
#bp()
chencherry = SmoothingFunction()
# Note: please keep smoothing turned on, because there is a bug in NLTK without smoothing (see below).
if is_corpus:
return nltk.translate.bleu_score.corpus_bleu(reference, generated, smoothing_function=chencherry.method2) # smoothing options: 0-7
else:
return np.mean([nltk.translate.bleu_score.sentence_bleu(r, g, smoothing_function=chencherry.method2) for r,g in zip(reference, generated)]) # smoothing options: 0-7
def cal_entropy(generated):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
etp_score = [0.0,0.0,0.0,0.0]
div_score = [0.0,0.0,0.0,0.0]
counter = [defaultdict(int),defaultdict(int),defaultdict(int),defaultdict(int)]
for gg in generated:
g = gg.rstrip('2').split()
for n in range(4):
for idx in range(len(g)-n):
ngram = ' '.join(g[idx:idx+n+1])
counter[n][ngram] += 1
for n in range(4):
total = sum(counter[n].values()) +1e-10
for v in counter[n].values():
etp_score[n] += - (v+0.0) /total * (np.log(v+0.0) - np.log(total))
div_score[n] = (len(counter[n].values())+0.0) /total
return etp_score, div_score
def prepare_for_bleu(sentence):
sent=[x for x in sentence if x!=0]
while len(sent)<4:
sent.append(0)
#sent = ' '.join([ixtoword[x] for x in sent])
sent = ' '.join([str(x) for x in sent])
return sent
def _clip_gradients_seperate_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients = [clip_ops.clip_by_norm(grad, clip_gradients) for grad in gradients]
return list(zip(clipped_gradients, variables))
def binary_round(x):
"""
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
using the straight through estimator for the gradient.
"""
g = tf.get_default_graph()
with ops.name_scope("BinaryRound") as name:
with g.gradient_override_map({"Round": "Identity"}):
return tf.round(x, name=name)
@tf.RegisterGradient("CustomGrad")
def _const_mul_grad(unused_op, grad):
return grad/1e4
def one_hot_round(x):
"""
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
using the straight through estimator for the gradient.
"""
g = tf.get_default_graph()
with g.gradient_override_map({"Log": "Identity"}):
x = tf.log(x)
x = 1e4 * x
with g.gradient_override_map({"Identity": "CustomGrad"}):
x = tf.identity(x, name="Identity")
with g.gradient_override_map({"Softmax": "Identity"}):
x = tf.nn.softmax(x)
with g.gradient_override_map({"Round": "Identity"}):
return tf.round(x) # B L V
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def reshaping(x, opt, gen_turn = None):
if gen_turn==None: gen_turn = opt.num_turn-opt.n_context
x = np.array(x)
dim = x.shape
x = np.reshape(x, [dim[0]/opt.batch_size/(gen_turn), (gen_turn), opt.batch_size, -1])
x = np.transpose(x, (0,2,1,3))
return np.squeeze(x.reshape([dim[0],-1]))
| sent2idx | identifier_name |
utils.py | import numpy as np
import tensorflow as tf
from collections import OrderedDict
import nltk
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from tensorflow.python import pywrap_tensorflow
from pdb import set_trace as bp
import data_utils as dp
import data_utils
import sys, os
from tensorflow.python.ops import clip_ops
from tensorflow.python.framework import ops
from collections import defaultdict
import codecs
import cPickle
from tensorflow.python.ops import math_ops, variable_scope
from embedding_metrics import greedy_match, extrema_score, average_score
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
def sent2idx(text, wordtoix, opt, is_cnn = True):
sent = [wordtoix[x] for x in text.split()]
return prepare_data_for_cnn([sent for i in range(opt.batch_size)], opt)
def prepare_data_for_cnn(seqs_x, opt):
maxlen=opt.maxlen
filter_h=opt.filter_shape
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
new_lengths_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
if l_x < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
else:
new_seqs_x.append(s_x[l_x-maxlen+1:])
new_lengths_x.append(maxlen-1)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
if len(lengths_x) < 1 :
return None, None
pad = filter_h -1
x = []
for rev in seqs_x:
xx = []
for i in xrange(pad):
xx.append(0)
for idx in rev:
xx.append(idx)
while len(xx) < maxlen + 2*pad:
xx.append(0)
x.append(xx)
x = np.array(x,dtype='int32')
return x
def prepare_data_for_rnn(seqs_x, opt, is_add_GO = True):
maxlen=opt.sent_len -2 #+ opt.filter_shape - 1 # 49
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
|
seqs_x = new_seqs_x
lengths_x = [len(s) for s in seqs_x]
if len(lengths_x) < 1 :
return None, None
n_samples = len(seqs_x)
maxlen_x = np.max(lengths_x)
x = np.zeros(( n_samples, opt.sent_len)).astype('int32')
for idx, s_x in enumerate(seqs_x):
if is_add_GO:
x[idx, 0] = 1 # GO symbol
x[idx, 1:lengths_x[idx]+1] = s_x
else:
x[idx, :lengths_x[idx]] = s_x
return x
def restore_from_save(t_vars, sess, opt, prefix = 'd_', load_path = None):
if not load_path:
load_path = opt.load_path
if opt.load_from_pretrain:
save_keys = tensors_key_in_file(load_path)
#print(save_keys.keys())
ss = set([var.name[2:][:-2] for var in t_vars])&set([s[2:] for s in save_keys.keys()])
cc = {var.name[2:][:-2]:var for var in t_vars}
ss_right_shape = set([s for s in ss if cc[s].get_shape() == save_keys[prefix+s]]) # only restore variables with correct shape
ss_wrong_shape = ss - ss_right_shape
cc2 = {prefix+ var.name[2:][:-2]:var for var in t_vars if var.name[2:][:-2] in ss_right_shape} # name in file -> var
loader = tf.train.Saver(var_list=cc2)
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
else:
save_keys = tensors_key_in_file(load_path)
ss = [var for var in t_vars if var.name[:-2] in save_keys.keys()]
ss_right_shape = [var.name for var in ss if var.get_shape() == save_keys[var.name[:-2]]]
ss_wrong_shape = set([v.name for v in ss]) - set(ss_right_shape)
#ss = [var for var in ss if 'OptimizeLoss' not in var]
loader = tf.train.Saver(var_list= [var for var in t_vars if var.name in ss_right_shape])
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
_buckets = [(60,60)]
def read_data(source_path, target_path, opt):
"""
From tensorflow tutorial translate.py
Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not opt.max_train_data_size or counter < opt.max_train_data_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if opt.minlen <len(source_ids) < min(source_size, opt.maxlen) and opt.minlen <len(target_ids) < min(target_size, opt.maxlen):
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def read_pair_data_full(src_f, tgt_f, dic_f, train_prop = 0.9, max_num=None, rev_src=False, rev_tgt = False, is_text_src = False, is_text_tgt = False, p_f = '../data/', from_p = True):
#train, val = [], []
if from_p:
p_f = src_f[:-3] + str(max_num) + '.p'
if os.path.exists(p_f):
with open(p_f, 'rb') as pfile:
train, val, test, wordtoix, ixtoword = cPickle.load(pfile)
return train, val, test, wordtoix, ixtoword
wordtoix, ixtoword = {}, {}
print "Start reading dic file . . ."
if os.path.exists(dic_f):
print("loading Dictionary")
counter=0
with codecs.open(dic_f,"r",'utf-8') as f:
s=f.readline()
while s:
s=s.rstrip('\n').rstrip("\r")
#print("s==",s)
wordtoix[s]=counter
ixtoword[counter]=s
counter+=1
s=f.readline()
def shift_id(x):
return x
src, tgt = [], []
print "Start reading src file . . ."
with codecs.open(src_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count, max_l = 0, 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_src:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
max_l = max(max_l, len(tokens))
if not rev_src: # reverse source
src.append(tokens)
else :
src.append(tokens[::-1])
#pdb.set_trace()
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Source cnt: " + str(count) + " maxLen: " + str(max_l)
print "Start reading tgt file . . ."
with codecs.open(tgt_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count = 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_tgt:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
if not rev_tgt: # reverse source
tgt.append(tokens)
else :
tgt.append(tokens[::-1])
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Target cnt: " + str(count) + " maxLen: " + str(max_l)
assert(len(src)==len(tgt))
all_pairs = np.array(zip(*[tgt, src]))
if not train_prop:
train , val, test = all_pairs, [], []
else:
idx = np.random.choice(len(all_pairs), int(np.floor(train_prop*len(all_pairs))))
rem_idx = np.array(list(set(range(len(all_pairs)))-set(idx)))
#v_idx = np.random.choice(rem_idx, int(np.floor(0.5*len(rem_idx))))
v_idx = np.random.choice(rem_idx, len(rem_idx)-2000)
t_idx = np.array(list(set(rem_idx)-set(v_idx)))
#pdb.set_trace()
train, val, test = all_pairs[idx], all_pairs[v_idx], all_pairs[t_idx]
if from_p:
with open(p_f, 'wb') as pfile:
cPickle.dump([train, val, test, wordtoix, ixtoword], pfile)
#print(counter)
#pdb.set_trace()
return train, val, test, wordtoix, ixtoword
def read_test(test_file, wordtoix):
print "Start reading test file . . ."
test = []
with codecs.open(test_file,"r",'utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip("\n").rstrip("\r").split('\t')
conv = []
for l in line:
sent=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in l.split()] + [2]
conv.append(sent)
# bp()
test.append(conv)
return test
def tensors_key_in_file(file_name):
"""Return tensors key in a checkpoint file.
Args:
file_name: Name of the checkpoint file.
"""
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
return reader.get_variable_to_shape_map()
except Exception as e: # pylint: disable=broad-except
print(str(e))
return None
def get_minibatches_idx(n, minibatch_size, shuffle=False):
idx_list = np.arange(n, dtype="int32")
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
# if (minibatch_start != n):
# # Make a minibatch out of what is left
# minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# def normalizing_L1(x, axis):
# norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
# normalized = x / (norm)
# return normalized
def normalizing(x, axis):
norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
normalized = x / (norm)
return normalized
def normalizing_sum(x, axis):
# sum(x) == 1
sum_prob = tf.reduce_sum(x, axis=axis, keep_dims=True)
normalized = x / sum_prob
return normalized
def _p(pp, name):
return '%s_%s' % (pp, name)
def dropout(X, trng, p=0.):
if p != 0:
retain_prob = 1 - p
X = X / retain_prob * trng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
return X
""" used for initialization of the parameters. """
def ortho_weight(ndim):
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype(config.floatX)
def uniform_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.uniform(low=-scale, high=scale, size=(nin, nout))
return W.astype(config.floatX)
def normal_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.randn(nin, nout) * scale
return W.astype(config.floatX)
def zero_bias(ndim):
b = np.zeros((ndim,))
return b.astype(config.floatX)
"""auxiliary function for KDE"""
def log_mean_exp(A,b,sigma):
a=-0.5*((A-theano.tensor.tile(b,[A.shape[0],1]))**2).sum(1)/(sigma**2)
max_=a.max()
return max_+theano.tensor.log(theano.tensor.exp(a-theano.tensor.tile(max_,a.shape[0])).mean())
'''calculate KDE'''
def cal_nkde(X,mu,sigma):
s1,updates=theano.scan(lambda i,s: s+log_mean_exp(mu,X[i,:],sigma), sequences=[theano.tensor.arange(X.shape[0])],outputs_info=[np.asarray(0.,dtype="float32")])
E=s1[-1]
Z=mu.shape[0]*theano.tensor.log(sigma*np.sqrt(np.pi*2))
return (Z-E)/mu.shape[0]
def cal_relevance(generated, reference, embedding): # embedding V* E
generated = [[g] for g in generated]
reference = [[s] for s in reference]
#bp()
relevance_score = [0.0,0.0,0.0]
relevance_score[0] = greedy_match(reference, generated, embedding)
relevance_score[1] = average_score(reference, generated, embedding)
relevance_score[2] = extrema_score(reference, generated, embedding)
return relevance_score
def cal_BLEU(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2],score[1:]):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
return BLEUscore
def cal_BLEU_4(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2,3],score):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
BLEUscore[3] = BLEUscore[3]/len(generated)
return BLEUscore
def cal_BLEU_4_nltk(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
reference = [[s] for s in reference]
#bp()
chencherry = SmoothingFunction()
# Note: please keep smoothing turned on, because there is a bug in NLTK without smoothing (see below).
if is_corpus:
return nltk.translate.bleu_score.corpus_bleu(reference, generated, smoothing_function=chencherry.method2) # smoothing options: 0-7
else:
return np.mean([nltk.translate.bleu_score.sentence_bleu(r, g, smoothing_function=chencherry.method2) for r,g in zip(reference, generated)]) # smoothing options: 0-7
def cal_entropy(generated):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
etp_score = [0.0,0.0,0.0,0.0]
div_score = [0.0,0.0,0.0,0.0]
counter = [defaultdict(int),defaultdict(int),defaultdict(int),defaultdict(int)]
for gg in generated:
g = gg.rstrip('2').split()
for n in range(4):
for idx in range(len(g)-n):
ngram = ' '.join(g[idx:idx+n+1])
counter[n][ngram] += 1
for n in range(4):
total = sum(counter[n].values()) +1e-10
for v in counter[n].values():
etp_score[n] += - (v+0.0) /total * (np.log(v+0.0) - np.log(total))
div_score[n] = (len(counter[n].values())+0.0) /total
return etp_score, div_score
def prepare_for_bleu(sentence):
sent=[x for x in sentence if x!=0]
while len(sent)<4:
sent.append(0)
#sent = ' '.join([ixtoword[x] for x in sent])
sent = ' '.join([str(x) for x in sent])
return sent
def _clip_gradients_seperate_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients = [clip_ops.clip_by_norm(grad, clip_gradients) for grad in gradients]
return list(zip(clipped_gradients, variables))
def binary_round(x):
"""
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
using the straight through estimator for the gradient.
"""
g = tf.get_default_graph()
with ops.name_scope("BinaryRound") as name:
with g.gradient_override_map({"Round": "Identity"}):
return tf.round(x, name=name)
@tf.RegisterGradient("CustomGrad")
def _const_mul_grad(unused_op, grad):
return grad/1e4
def one_hot_round(x):
"""
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
using the straight through estimator for the gradient.
"""
g = tf.get_default_graph()
with g.gradient_override_map({"Log": "Identity"}):
x = tf.log(x)
x = 1e4 * x
with g.gradient_override_map({"Identity": "CustomGrad"}):
x = tf.identity(x, name="Identity")
with g.gradient_override_map({"Softmax": "Identity"}):
x = tf.nn.softmax(x)
with g.gradient_override_map({"Round": "Identity"}):
return tf.round(x) # B L V
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def reshaping(x, opt, gen_turn = None):
if gen_turn==None: gen_turn = opt.num_turn-opt.n_context
x = np.array(x)
dim = x.shape
x = np.reshape(x, [dim[0]/opt.batch_size/(gen_turn), (gen_turn), opt.batch_size, -1])
x = np.transpose(x, (0,2,1,3))
return np.squeeze(x.reshape([dim[0],-1]))
| if l_x < maxlen-2:
new_seqs_x.append(s_x)
else:
#new_seqs_x.append(s_x[l_x-maxlen+1:])
new_seqs_x.append(s_x[:maxlen-2]+[2]) | conditional_block |
utils.py | import numpy as np
import tensorflow as tf
from collections import OrderedDict
import nltk
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from tensorflow.python import pywrap_tensorflow
from pdb import set_trace as bp
import data_utils as dp
import data_utils
import sys, os
from tensorflow.python.ops import clip_ops
from tensorflow.python.framework import ops
from collections import defaultdict
import codecs
import cPickle
from tensorflow.python.ops import math_ops, variable_scope
from embedding_metrics import greedy_match, extrema_score, average_score
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
def sent2idx(text, wordtoix, opt, is_cnn = True):
sent = [wordtoix[x] for x in text.split()]
return prepare_data_for_cnn([sent for i in range(opt.batch_size)], opt)
def prepare_data_for_cnn(seqs_x, opt):
maxlen=opt.maxlen
filter_h=opt.filter_shape
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
new_lengths_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
if l_x < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
else:
new_seqs_x.append(s_x[l_x-maxlen+1:])
new_lengths_x.append(maxlen-1)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
if len(lengths_x) < 1 :
return None, None
pad = filter_h -1
x = []
for rev in seqs_x:
xx = []
for i in xrange(pad):
xx.append(0)
for idx in rev:
xx.append(idx)
while len(xx) < maxlen + 2*pad:
xx.append(0)
x.append(xx)
x = np.array(x,dtype='int32')
return x
def prepare_data_for_rnn(seqs_x, opt, is_add_GO = True):
maxlen=opt.sent_len -2 #+ opt.filter_shape - 1 # 49
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
if l_x < maxlen-2:
new_seqs_x.append(s_x)
else:
#new_seqs_x.append(s_x[l_x-maxlen+1:])
new_seqs_x.append(s_x[:maxlen-2]+[2])
seqs_x = new_seqs_x
lengths_x = [len(s) for s in seqs_x]
if len(lengths_x) < 1 :
return None, None
n_samples = len(seqs_x)
maxlen_x = np.max(lengths_x)
x = np.zeros(( n_samples, opt.sent_len)).astype('int32')
for idx, s_x in enumerate(seqs_x):
if is_add_GO:
x[idx, 0] = 1 # GO symbol
x[idx, 1:lengths_x[idx]+1] = s_x
else:
x[idx, :lengths_x[idx]] = s_x
return x
def restore_from_save(t_vars, sess, opt, prefix = 'd_', load_path = None):
if not load_path:
load_path = opt.load_path
if opt.load_from_pretrain:
save_keys = tensors_key_in_file(load_path)
#print(save_keys.keys())
ss = set([var.name[2:][:-2] for var in t_vars])&set([s[2:] for s in save_keys.keys()])
cc = {var.name[2:][:-2]:var for var in t_vars}
ss_right_shape = set([s for s in ss if cc[s].get_shape() == save_keys[prefix+s]]) # only restore variables with correct shape
ss_wrong_shape = ss - ss_right_shape
cc2 = {prefix+ var.name[2:][:-2]:var for var in t_vars if var.name[2:][:-2] in ss_right_shape} # name in file -> var
loader = tf.train.Saver(var_list=cc2)
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
else:
save_keys = tensors_key_in_file(load_path)
ss = [var for var in t_vars if var.name[:-2] in save_keys.keys()]
ss_right_shape = [var.name for var in ss if var.get_shape() == save_keys[var.name[:-2]]]
ss_wrong_shape = set([v.name for v in ss]) - set(ss_right_shape)
#ss = [var for var in ss if 'OptimizeLoss' not in var]
loader = tf.train.Saver(var_list= [var for var in t_vars if var.name in ss_right_shape])
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
_buckets = [(60,60)]
def read_data(source_path, target_path, opt):
"""
From tensorflow tutorial translate.py
Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not opt.max_train_data_size or counter < opt.max_train_data_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if opt.minlen <len(source_ids) < min(source_size, opt.maxlen) and opt.minlen <len(target_ids) < min(target_size, opt.maxlen):
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def read_pair_data_full(src_f, tgt_f, dic_f, train_prop = 0.9, max_num=None, rev_src=False, rev_tgt = False, is_text_src = False, is_text_tgt = False, p_f = '../data/', from_p = True):
#train, val = [], []
if from_p:
p_f = src_f[:-3] + str(max_num) + '.p'
if os.path.exists(p_f):
with open(p_f, 'rb') as pfile:
train, val, test, wordtoix, ixtoword = cPickle.load(pfile)
return train, val, test, wordtoix, ixtoword
wordtoix, ixtoword = {}, {}
print "Start reading dic file . . ."
if os.path.exists(dic_f):
print("loading Dictionary")
counter=0
with codecs.open(dic_f,"r",'utf-8') as f:
s=f.readline()
while s:
s=s.rstrip('\n').rstrip("\r")
#print("s==",s)
wordtoix[s]=counter
ixtoword[counter]=s
counter+=1
s=f.readline()
def shift_id(x):
return x
src, tgt = [], []
print "Start reading src file . . ."
with codecs.open(src_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count, max_l = 0, 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_src:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
max_l = max(max_l, len(tokens))
if not rev_src: # reverse source
src.append(tokens)
else :
src.append(tokens[::-1])
#pdb.set_trace()
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Source cnt: " + str(count) + " maxLen: " + str(max_l)
print "Start reading tgt file . . ."
with codecs.open(tgt_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count = 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_tgt:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
if not rev_tgt: # reverse source
tgt.append(tokens)
else :
tgt.append(tokens[::-1])
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Target cnt: " + str(count) + " maxLen: " + str(max_l)
assert(len(src)==len(tgt))
all_pairs = np.array(zip(*[tgt, src]))
if not train_prop:
train , val, test = all_pairs, [], []
else:
idx = np.random.choice(len(all_pairs), int(np.floor(train_prop*len(all_pairs))))
rem_idx = np.array(list(set(range(len(all_pairs)))-set(idx)))
#v_idx = np.random.choice(rem_idx, int(np.floor(0.5*len(rem_idx))))
v_idx = np.random.choice(rem_idx, len(rem_idx)-2000)
t_idx = np.array(list(set(rem_idx)-set(v_idx)))
#pdb.set_trace()
train, val, test = all_pairs[idx], all_pairs[v_idx], all_pairs[t_idx]
if from_p:
with open(p_f, 'wb') as pfile:
cPickle.dump([train, val, test, wordtoix, ixtoword], pfile)
#print(counter)
#pdb.set_trace()
return train, val, test, wordtoix, ixtoword
def read_test(test_file, wordtoix):
print "Start reading test file . . ."
test = []
with codecs.open(test_file,"r",'utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip("\n").rstrip("\r").split('\t')
conv = []
for l in line:
sent=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in l.split()] + [2]
conv.append(sent)
# bp()
test.append(conv)
return test
def tensors_key_in_file(file_name):
"""Return tensors key in a checkpoint file.
Args:
file_name: Name of the checkpoint file.
"""
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
return reader.get_variable_to_shape_map()
except Exception as e: # pylint: disable=broad-except
print(str(e))
return None
def get_minibatches_idx(n, minibatch_size, shuffle=False):
idx_list = np.arange(n, dtype="int32")
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
# if (minibatch_start != n):
# # Make a minibatch out of what is left
# minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# def normalizing_L1(x, axis):
# norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
# normalized = x / (norm)
# return normalized
def normalizing(x, axis):
norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
normalized = x / (norm)
return normalized
def normalizing_sum(x, axis):
# sum(x) == 1
sum_prob = tf.reduce_sum(x, axis=axis, keep_dims=True)
normalized = x / sum_prob
return normalized
def _p(pp, name):
return '%s_%s' % (pp, name)
def dropout(X, trng, p=0.):
if p != 0:
retain_prob = 1 - p
X = X / retain_prob * trng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
return X
""" used for initialization of the parameters. """
def ortho_weight(ndim):
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype(config.floatX)
def uniform_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.uniform(low=-scale, high=scale, size=(nin, nout))
return W.astype(config.floatX)
def normal_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.randn(nin, nout) * scale
return W.astype(config.floatX)
def zero_bias(ndim):
b = np.zeros((ndim,))
return b.astype(config.floatX)
"""auxiliary function for KDE"""
def log_mean_exp(A,b,sigma):
a=-0.5*((A-theano.tensor.tile(b,[A.shape[0],1]))**2).sum(1)/(sigma**2)
max_=a.max()
return max_+theano.tensor.log(theano.tensor.exp(a-theano.tensor.tile(max_,a.shape[0])).mean())
'''calculate KDE'''
def cal_nkde(X,mu,sigma):
s1,updates=theano.scan(lambda i,s: s+log_mean_exp(mu,X[i,:],sigma), sequences=[theano.tensor.arange(X.shape[0])],outputs_info=[np.asarray(0.,dtype="float32")])
E=s1[-1]
Z=mu.shape[0]*theano.tensor.log(sigma*np.sqrt(np.pi*2))
return (Z-E)/mu.shape[0]
def cal_relevance(generated, reference, embedding): # embedding V* E
generated = [[g] for g in generated]
reference = [[s] for s in reference]
#bp()
relevance_score = [0.0,0.0,0.0]
relevance_score[0] = greedy_match(reference, generated, embedding)
relevance_score[1] = average_score(reference, generated, embedding)
relevance_score[2] = extrema_score(reference, generated, embedding)
return relevance_score
def cal_BLEU(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2],score[1:]):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
return BLEUscore
def cal_BLEU_4(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2,3],score):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
BLEUscore[3] = BLEUscore[3]/len(generated)
return BLEUscore
def cal_BLEU_4_nltk(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
reference = [[s] for s in reference]
#bp()
chencherry = SmoothingFunction()
# Note: please keep smoothing turned on, because there is a bug in NLTK without smoothing (see below).
if is_corpus:
return nltk.translate.bleu_score.corpus_bleu(reference, generated, smoothing_function=chencherry.method2) # smoothing options: 0-7
else:
return np.mean([nltk.translate.bleu_score.sentence_bleu(r, g, smoothing_function=chencherry.method2) for r,g in zip(reference, generated)]) # smoothing options: 0-7
def cal_entropy(generated):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
etp_score = [0.0,0.0,0.0,0.0]
div_score = [0.0,0.0,0.0,0.0]
counter = [defaultdict(int),defaultdict(int),defaultdict(int),defaultdict(int)]
for gg in generated:
g = gg.rstrip('2').split()
for n in range(4):
for idx in range(len(g)-n):
ngram = ' '.join(g[idx:idx+n+1])
counter[n][ngram] += 1
for n in range(4):
total = sum(counter[n].values()) +1e-10
for v in counter[n].values():
etp_score[n] += - (v+0.0) /total * (np.log(v+0.0) - np.log(total))
div_score[n] = (len(counter[n].values())+0.0) /total
return etp_score, div_score
def prepare_for_bleu(sentence):
sent=[x for x in sentence if x!=0]
while len(sent)<4:
sent.append(0)
#sent = ' '.join([ixtoword[x] for x in sent])
sent = ' '.join([str(x) for x in sent])
return sent
def _clip_gradients_seperate_norm(grads_and_vars, clip_gradients):
|
def binary_round(x):
"""
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
using the straight through estimator for the gradient.
"""
g = tf.get_default_graph()
with ops.name_scope("BinaryRound") as name:
with g.gradient_override_map({"Round": "Identity"}):
return tf.round(x, name=name)
@tf.RegisterGradient("CustomGrad")
def _const_mul_grad(unused_op, grad):
return grad/1e4
def one_hot_round(x):
"""
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
using the straight through estimator for the gradient.
"""
g = tf.get_default_graph()
with g.gradient_override_map({"Log": "Identity"}):
x = tf.log(x)
x = 1e4 * x
with g.gradient_override_map({"Identity": "CustomGrad"}):
x = tf.identity(x, name="Identity")
with g.gradient_override_map({"Softmax": "Identity"}):
x = tf.nn.softmax(x)
with g.gradient_override_map({"Round": "Identity"}):
return tf.round(x) # B L V
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def reshaping(x, opt, gen_turn = None):
if gen_turn==None: gen_turn = opt.num_turn-opt.n_context
x = np.array(x)
dim = x.shape
x = np.reshape(x, [dim[0]/opt.batch_size/(gen_turn), (gen_turn), opt.batch_size, -1])
x = np.transpose(x, (0,2,1,3))
return np.squeeze(x.reshape([dim[0],-1]))
| """Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients = [clip_ops.clip_by_norm(grad, clip_gradients) for grad in gradients]
return list(zip(clipped_gradients, variables)) | identifier_body |
utils.py | import numpy as np
import tensorflow as tf
from collections import OrderedDict
import nltk
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from tensorflow.python import pywrap_tensorflow
from pdb import set_trace as bp
import data_utils as dp
import data_utils
import sys, os
from tensorflow.python.ops import clip_ops
from tensorflow.python.framework import ops
from collections import defaultdict
import codecs
import cPickle
from tensorflow.python.ops import math_ops, variable_scope
from embedding_metrics import greedy_match, extrema_score, average_score
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
def sent2idx(text, wordtoix, opt, is_cnn = True):
sent = [wordtoix[x] for x in text.split()]
return prepare_data_for_cnn([sent for i in range(opt.batch_size)], opt)
def prepare_data_for_cnn(seqs_x, opt):
maxlen=opt.maxlen
filter_h=opt.filter_shape
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
new_lengths_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
if l_x < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
else:
new_seqs_x.append(s_x[l_x-maxlen+1:])
new_lengths_x.append(maxlen-1)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
if len(lengths_x) < 1 :
return None, None
pad = filter_h -1
x = []
for rev in seqs_x:
xx = []
for i in xrange(pad):
xx.append(0)
for idx in rev:
xx.append(idx)
while len(xx) < maxlen + 2*pad:
xx.append(0)
x.append(xx)
x = np.array(x,dtype='int32')
return x
def prepare_data_for_rnn(seqs_x, opt, is_add_GO = True):
maxlen=opt.sent_len -2 #+ opt.filter_shape - 1 # 49
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
if l_x < maxlen-2:
new_seqs_x.append(s_x)
else:
#new_seqs_x.append(s_x[l_x-maxlen+1:])
new_seqs_x.append(s_x[:maxlen-2]+[2])
seqs_x = new_seqs_x
lengths_x = [len(s) for s in seqs_x]
if len(lengths_x) < 1 :
return None, None
n_samples = len(seqs_x)
maxlen_x = np.max(lengths_x)
x = np.zeros(( n_samples, opt.sent_len)).astype('int32')
for idx, s_x in enumerate(seqs_x):
if is_add_GO:
x[idx, 0] = 1 # GO symbol
x[idx, 1:lengths_x[idx]+1] = s_x
else:
x[idx, :lengths_x[idx]] = s_x
return x
def restore_from_save(t_vars, sess, opt, prefix = 'd_', load_path = None):
if not load_path:
load_path = opt.load_path
if opt.load_from_pretrain:
save_keys = tensors_key_in_file(load_path)
#print(save_keys.keys())
ss = set([var.name[2:][:-2] for var in t_vars])&set([s[2:] for s in save_keys.keys()])
cc = {var.name[2:][:-2]:var for var in t_vars}
ss_right_shape = set([s for s in ss if cc[s].get_shape() == save_keys[prefix+s]]) # only restore variables with correct shape
ss_wrong_shape = ss - ss_right_shape
cc2 = {prefix+ var.name[2:][:-2]:var for var in t_vars if var.name[2:][:-2] in ss_right_shape} # name in file -> var
loader = tf.train.Saver(var_list=cc2)
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
else:
save_keys = tensors_key_in_file(load_path)
ss = [var for var in t_vars if var.name[:-2] in save_keys.keys()]
ss_right_shape = [var.name for var in ss if var.get_shape() == save_keys[var.name[:-2]]]
ss_wrong_shape = set([v.name for v in ss]) - set(ss_right_shape)
#ss = [var for var in ss if 'OptimizeLoss' not in var]
loader = tf.train.Saver(var_list= [var for var in t_vars if var.name in ss_right_shape])
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
_buckets = [(60,60)]
def read_data(source_path, target_path, opt):
"""
From tensorflow tutorial translate.py
Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not opt.max_train_data_size or counter < opt.max_train_data_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if opt.minlen <len(source_ids) < min(source_size, opt.maxlen) and opt.minlen <len(target_ids) < min(target_size, opt.maxlen):
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def read_pair_data_full(src_f, tgt_f, dic_f, train_prop = 0.9, max_num=None, rev_src=False, rev_tgt = False, is_text_src = False, is_text_tgt = False, p_f = '../data/', from_p = True):
#train, val = [], []
if from_p:
p_f = src_f[:-3] + str(max_num) + '.p'
if os.path.exists(p_f):
with open(p_f, 'rb') as pfile:
train, val, test, wordtoix, ixtoword = cPickle.load(pfile)
return train, val, test, wordtoix, ixtoword
wordtoix, ixtoword = {}, {}
print "Start reading dic file . . ."
if os.path.exists(dic_f):
print("loading Dictionary")
counter=0
with codecs.open(dic_f,"r",'utf-8') as f:
s=f.readline()
while s:
s=s.rstrip('\n').rstrip("\r")
#print("s==",s)
wordtoix[s]=counter
ixtoword[counter]=s
counter+=1
s=f.readline()
def shift_id(x):
return x
src, tgt = [], []
print "Start reading src file . . ."
with codecs.open(src_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count, max_l = 0, 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_src:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
max_l = max(max_l, len(tokens))
if not rev_src: # reverse source
src.append(tokens)
else :
src.append(tokens[::-1])
#pdb.set_trace()
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Source cnt: " + str(count) + " maxLen: " + str(max_l)
print "Start reading tgt file . . ."
with codecs.open(tgt_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count = 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_tgt:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
if not rev_tgt: # reverse source
tgt.append(tokens)
else :
tgt.append(tokens[::-1])
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Target cnt: " + str(count) + " maxLen: " + str(max_l)
assert(len(src)==len(tgt))
all_pairs = np.array(zip(*[tgt, src]))
if not train_prop:
train , val, test = all_pairs, [], []
else:
idx = np.random.choice(len(all_pairs), int(np.floor(train_prop*len(all_pairs))))
rem_idx = np.array(list(set(range(len(all_pairs)))-set(idx)))
#v_idx = np.random.choice(rem_idx, int(np.floor(0.5*len(rem_idx))))
v_idx = np.random.choice(rem_idx, len(rem_idx)-2000)
t_idx = np.array(list(set(rem_idx)-set(v_idx)))
#pdb.set_trace()
train, val, test = all_pairs[idx], all_pairs[v_idx], all_pairs[t_idx]
if from_p:
with open(p_f, 'wb') as pfile:
cPickle.dump([train, val, test, wordtoix, ixtoword], pfile)
#print(counter)
#pdb.set_trace()
return train, val, test, wordtoix, ixtoword
def read_test(test_file, wordtoix):
print "Start reading test file . . ."
test = []
with codecs.open(test_file,"r",'utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip("\n").rstrip("\r").split('\t')
conv = []
for l in line:
sent=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in l.split()] + [2]
conv.append(sent)
# bp()
test.append(conv)
return test
def tensors_key_in_file(file_name):
"""Return tensors key in a checkpoint file.
Args:
file_name: Name of the checkpoint file.
"""
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
return reader.get_variable_to_shape_map()
except Exception as e: # pylint: disable=broad-except
print(str(e))
return None
def get_minibatches_idx(n, minibatch_size, shuffle=False):
idx_list = np.arange(n, dtype="int32")
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
# if (minibatch_start != n):
# # Make a minibatch out of what is left
# minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# def normalizing_L1(x, axis):
# norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
# normalized = x / (norm)
# return normalized
def normalizing(x, axis):
norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
normalized = x / (norm)
return normalized
def normalizing_sum(x, axis):
# sum(x) == 1
sum_prob = tf.reduce_sum(x, axis=axis, keep_dims=True)
normalized = x / sum_prob
return normalized
def _p(pp, name):
return '%s_%s' % (pp, name)
def dropout(X, trng, p=0.):
if p != 0:
retain_prob = 1 - p
X = X / retain_prob * trng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
return X
""" used for initialization of the parameters. """
def ortho_weight(ndim):
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype(config.floatX)
def uniform_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.uniform(low=-scale, high=scale, size=(nin, nout))
return W.astype(config.floatX)
def normal_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.randn(nin, nout) * scale
return W.astype(config.floatX)
def zero_bias(ndim):
b = np.zeros((ndim,))
return b.astype(config.floatX)
"""auxiliary function for KDE"""
def log_mean_exp(A,b,sigma):
a=-0.5*((A-theano.tensor.tile(b,[A.shape[0],1]))**2).sum(1)/(sigma**2)
max_=a.max()
return max_+theano.tensor.log(theano.tensor.exp(a-theano.tensor.tile(max_,a.shape[0])).mean())
'''calculate KDE'''
def cal_nkde(X,mu,sigma):
s1,updates=theano.scan(lambda i,s: s+log_mean_exp(mu,X[i,:],sigma), sequences=[theano.tensor.arange(X.shape[0])],outputs_info=[np.asarray(0.,dtype="float32")])
E=s1[-1]
Z=mu.shape[0]*theano.tensor.log(sigma*np.sqrt(np.pi*2))
return (Z-E)/mu.shape[0]
def cal_relevance(generated, reference, embedding): # embedding V* E
generated = [[g] for g in generated]
reference = [[s] for s in reference]
#bp()
relevance_score = [0.0,0.0,0.0]
relevance_score[0] = greedy_match(reference, generated, embedding)
relevance_score[1] = average_score(reference, generated, embedding)
relevance_score[2] = extrema_score(reference, generated, embedding)
return relevance_score
def cal_BLEU(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2],score[1:]):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
return BLEUscore
def cal_BLEU_4(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2,3],score):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
BLEUscore[3] = BLEUscore[3]/len(generated)
return BLEUscore
def cal_BLEU_4_nltk(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
reference = [[s] for s in reference]
#bp()
chencherry = SmoothingFunction()
# Note: please keep smoothing turned on, because there is a bug in NLTK without smoothing (see below).
if is_corpus:
return nltk.translate.bleu_score.corpus_bleu(reference, generated, smoothing_function=chencherry.method2) # smoothing options: 0-7
else:
return np.mean([nltk.translate.bleu_score.sentence_bleu(r, g, smoothing_function=chencherry.method2) for r,g in zip(reference, generated)]) # smoothing options: 0-7
def cal_entropy(generated):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
etp_score = [0.0,0.0,0.0,0.0]
div_score = [0.0,0.0,0.0,0.0]
counter = [defaultdict(int),defaultdict(int),defaultdict(int),defaultdict(int)]
for gg in generated:
g = gg.rstrip('2').split()
for n in range(4):
for idx in range(len(g)-n):
ngram = ' '.join(g[idx:idx+n+1])
counter[n][ngram] += 1
for n in range(4):
total = sum(counter[n].values()) +1e-10
for v in counter[n].values():
etp_score[n] += - (v+0.0) /total * (np.log(v+0.0) - np.log(total))
div_score[n] = (len(counter[n].values())+0.0) /total
return etp_score, div_score
def prepare_for_bleu(sentence):
sent=[x for x in sentence if x!=0]
while len(sent)<4:
sent.append(0)
#sent = ' '.join([ixtoword[x] for x in sent]) | def _clip_gradients_seperate_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients = [clip_ops.clip_by_norm(grad, clip_gradients) for grad in gradients]
return list(zip(clipped_gradients, variables))
def binary_round(x):
"""
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
using the straight through estimator for the gradient.
"""
g = tf.get_default_graph()
with ops.name_scope("BinaryRound") as name:
with g.gradient_override_map({"Round": "Identity"}):
return tf.round(x, name=name)
@tf.RegisterGradient("CustomGrad")
def _const_mul_grad(unused_op, grad):
return grad/1e4
def one_hot_round(x):
"""
Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
using the straight through estimator for the gradient.
"""
g = tf.get_default_graph()
with g.gradient_override_map({"Log": "Identity"}):
x = tf.log(x)
x = 1e4 * x
with g.gradient_override_map({"Identity": "CustomGrad"}):
x = tf.identity(x, name="Identity")
with g.gradient_override_map({"Softmax": "Identity"}):
x = tf.nn.softmax(x)
with g.gradient_override_map({"Round": "Identity"}):
return tf.round(x) # B L V
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def reshaping(x, opt, gen_turn = None):
if gen_turn==None: gen_turn = opt.num_turn-opt.n_context
x = np.array(x)
dim = x.shape
x = np.reshape(x, [dim[0]/opt.batch_size/(gen_turn), (gen_turn), opt.batch_size, -1])
x = np.transpose(x, (0,2,1,3))
return np.squeeze(x.reshape([dim[0],-1])) | sent = ' '.join([str(x) for x in sent])
return sent
| random_line_split |
formatter.rs | use std::fmt;
use std::fmt::Write;
use std::iter::Iterator;
use std::str;
use std::string::String;
use types::*;
#[derive(Debug, PartialEq)]
pub struct Formatter<'a, 'b> {
pub key: &'a str,
fill: char,
align: Alignment, // default Right for numbers, Left for strings
sign: Sign,
alternate: bool,
width: Option<usize>,
thousands: bool,
precision: Option<usize>,
ty: Option<char>,
buff: &'b mut String,
pattern: &'a str,
}
fn is_alignment_token(c: char) -> bool {
match c {
'=' | '<' | '^' | '>' => true,
_ => false,
}
}
fn is_sign_element(c: char) -> bool {
match c {
' ' | '-' | '+' => true,
_ => false,
}
}
fn is_type_element(c: char) -> bool {
match c {
'b' | 'o' | 'x' | 'X' | 'e' | 'E' | 'f' | 'F' | '%' | 's' | '?' => true,
_ => false,
}
}
// get an integer from pos, returning the number of bytes
// consumed and the integer
fn get_integer(s: &[u8], pos: usize) -> (usize, Option<i64>) {
let (_, rest) = s.split_at(pos);
let mut consumed: usize = 0;
for b in rest {
match *b as char {
'0'..='9' => {}
_ => break,
};
consumed += 1;
}
if consumed == 0 {
(0, None)
} else {
let (intstr, _) = rest.split_at(consumed);
let val = unsafe {
// I think I can be reasonably sure that 0-9 chars are utf8 :)
match str::from_utf8_unchecked(intstr).parse::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
// The rest of this was pretty much strait up copied from python's format parser
// All credit goes to python source file: formatter_unicode.c
//
let mut format = FmtPy {
fill: ' ',
align: '\0',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: '\0',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let rest = rest.as_bytes();
let mut align_specified = false;
let mut fill_specified = false;
let end: usize = rest.len();
let mut pos: usize = 0;
// If the second char is an alignment token,
// then fake_fill as fill
if end - pos >= 1 + fake_fill.len_utf8()
&& is_alignment_token(rest[pos + fake_fill.len_utf8()] as char)
{
format.align = rest[pos + fake_fill.len_utf8()] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 1 + fake_fill.len_utf8();
} else if end - pos >= 1 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos += fake_fill.len_utf8();
}
// Parse the various sign options
if end - pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos += 1;
}
// If the next character is #, we're in alternate mode. This only
// applies to integers.
if end - pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos += 1;
}
// The special case for 0-padding (backwards compat)
if !fill_specified && end - pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if !align_specified {
format.align = '=';
}
pos += 1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing width".to_string(),
))
}
Some(v) => {
format.width = v;
}
}
}
// Comma signifies add thousands separators
if end - pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos += 1;
}
// Parse field precision
if end - pos > 0 && rest[pos] as char == '.' {
pos += 1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing precision".to_string(),
))
}
Some(v) => {
format.precision = v;
}
}
} else {
// Not having a precision after a dot is an error.
if consumed == 0 {
return Err(FmtError::Invalid(
"Format specifier missing precision".to_string(),
));
}
}
pos += consumed;
}
// Finally, parse the type field.
if end - pos > 1 {
// More than one char remain, invalid format specifier.
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end - pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::TypeError(msg));
}
// pos+=1;
}
// Do as much validating as we can, just by looking at the format
// specifier. Do not take into account what type of formatting
// we're doing (int, float, string).
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' | '%' | 'F' | '\0' => {} /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a, 'b> Formatter<'a, 'b> {
/// create Formatter from format string
pub fn from_str(s: &'a str, buff: &'b mut String) -> Result<Formatter<'a, 'b>> {
let mut found_colon = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => {
return Err(FmtError::Invalid("must specify identifier".to_string()))
}
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
found_colon = true;
break;
}
c = match chars.next() {
Some(c) => c,
None => {
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
let identifier = if found_colon {
let (i, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
i
} else {
identifier
};
let format = parse_like_python(rest)?;
Ok(Formatter {
key: identifier,
fill: format.fill,
align: match format.align {
'\0' => Alignment::Unspecified,
'<' => Alignment::Left,
'^' => Alignment::Center,
'>' => Alignment::Right,
'=' => Alignment::Equal,
_ => unreachable!(),
},
sign: match format.sign {
'\0' => Sign::Unspecified,
'+' => Sign::Plus,
'-' => Sign::Minus,
' ' => Sign::Space,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
'\0' => None,
_ => Some(format.ty),
},
buff: buff,
pattern: s,
})
}
/// call this to re-write the original format string verbatum
/// back to the output
pub fn skip(mut self) -> Result<()> {
self.buff.push('{');
self.write_str(self.pattern).unwrap();
self.buff.push('}');
Ok(())
}
/// fill getter
pub fn fill(&self) -> char {
self.fill
}
/// align getter
pub fn align(&self) -> Alignment {
self.align.clone()
}
// provide default for unspecified alignment
pub fn set_default_align(&mut self, align: Alignment) {
if self.align == Alignment::Unspecified {
self.align = align
}
}
/// width getter
pub fn width(&self) -> Option<usize> |
/// thousands getter
pub fn thousands(&self) -> bool {
self.thousands
}
/// precision getter
pub fn precision(&self) -> Option<usize> {
self.precision
}
/// set precision to None, used for formatting int, float, etc
pub fn set_precision(&mut self, precision: Option<usize>) {
self.precision = precision;
}
/// sign getter
pub fn sign(&self) -> Sign {
self.sign.clone()
}
/// sign plus getter
/// here because it is in fmt::Formatter
pub fn sign_plus(&self) -> bool {
self.sign == Sign::Plus
}
/// sign minus getter
/// here because it is in fmt::Formatter
pub fn sign_minus(&self) -> bool {
self.sign == Sign::Minus
}
/// alternate getter
pub fn alternate(&self) -> bool {
self.alternate
}
// sign_aware_zero_pad // Not supported
/// type getter
pub fn ty(&self) -> Option<char> {
self.ty
}
/// UNSTABLE: in the future, this may return true if all validty
/// checks for a float return true
/// return true if ty is valid for formatting integers
pub fn is_int_type(&self) -> bool {
match self.ty {
None => true,
Some(c) => match c {
'b' | 'o' | 'x' | 'X' => true,
_ => false,
},
}
}
/// UNSTABLE: in the future, this may return true if all validty
/// checks for a float return true
/// return true if ty is valid for formatting floats
pub fn is_float_type(&self) -> bool {
match self.ty {
None => true,
Some(c) => match c {
'f' | 'e' | 'E' => true,
_ => false,
},
}
}
}
impl<'a, 'b> fmt::Write for Formatter<'a, 'b> {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.buff.write_str(s)
}
}
| {
self.width
} | identifier_body |
formatter.rs | use std::fmt;
use std::fmt::Write;
use std::iter::Iterator;
use std::str;
use std::string::String;
use types::*;
#[derive(Debug, PartialEq)]
pub struct Formatter<'a, 'b> {
pub key: &'a str,
fill: char,
align: Alignment, // default Right for numbers, Left for strings
sign: Sign,
alternate: bool,
width: Option<usize>,
thousands: bool,
precision: Option<usize>,
ty: Option<char>,
buff: &'b mut String,
pattern: &'a str,
}
fn is_alignment_token(c: char) -> bool {
match c {
'=' | '<' | '^' | '>' => true,
_ => false,
}
}
fn is_sign_element(c: char) -> bool {
match c {
' ' | '-' | '+' => true,
_ => false,
}
}
fn is_type_element(c: char) -> bool {
match c {
'b' | 'o' | 'x' | 'X' | 'e' | 'E' | 'f' | 'F' | '%' | 's' | '?' => true,
_ => false,
}
}
// get an integer from pos, returning the number of bytes
// consumed and the integer
fn get_integer(s: &[u8], pos: usize) -> (usize, Option<i64>) {
let (_, rest) = s.split_at(pos);
let mut consumed: usize = 0;
for b in rest {
match *b as char {
'0'..='9' => {}
_ => break,
};
consumed += 1;
}
if consumed == 0 {
(0, None)
} else {
let (intstr, _) = rest.split_at(consumed);
let val = unsafe {
// I think I can be reasonably sure that 0-9 chars are utf8 :)
match str::from_utf8_unchecked(intstr).parse::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
// The rest of this was pretty much strait up copied from python's format parser
// All credit goes to python source file: formatter_unicode.c
//
let mut format = FmtPy {
fill: ' ',
align: '\0',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: '\0',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let rest = rest.as_bytes();
let mut align_specified = false;
let mut fill_specified = false;
let end: usize = rest.len();
let mut pos: usize = 0;
// If the second char is an alignment token,
// then fake_fill as fill
if end - pos >= 1 + fake_fill.len_utf8()
&& is_alignment_token(rest[pos + fake_fill.len_utf8()] as char)
{
format.align = rest[pos + fake_fill.len_utf8()] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 1 + fake_fill.len_utf8();
} else if end - pos >= 1 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos += fake_fill.len_utf8();
}
// Parse the various sign options
if end - pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos += 1;
} | if end - pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos += 1;
}
// The special case for 0-padding (backwards compat)
if !fill_specified && end - pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if !align_specified {
format.align = '=';
}
pos += 1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing width".to_string(),
))
}
Some(v) => {
format.width = v;
}
}
}
// Comma signifies add thousands separators
if end - pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos += 1;
}
// Parse field precision
if end - pos > 0 && rest[pos] as char == '.' {
pos += 1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing precision".to_string(),
))
}
Some(v) => {
format.precision = v;
}
}
} else {
// Not having a precision after a dot is an error.
if consumed == 0 {
return Err(FmtError::Invalid(
"Format specifier missing precision".to_string(),
));
}
}
pos += consumed;
}
// Finally, parse the type field.
if end - pos > 1 {
// More than one char remain, invalid format specifier.
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end - pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::TypeError(msg));
}
// pos+=1;
}
// Do as much validating as we can, just by looking at the format
// specifier. Do not take into account what type of formatting
// we're doing (int, float, string).
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' | '%' | 'F' | '\0' => {} /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a, 'b> Formatter<'a, 'b> {
/// create Formatter from format string
pub fn from_str(s: &'a str, buff: &'b mut String) -> Result<Formatter<'a, 'b>> {
let mut found_colon = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => {
return Err(FmtError::Invalid("must specify identifier".to_string()))
}
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
found_colon = true;
break;
}
c = match chars.next() {
Some(c) => c,
None => {
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
let identifier = if found_colon {
let (i, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
i
} else {
identifier
};
let format = parse_like_python(rest)?;
Ok(Formatter {
key: identifier,
fill: format.fill,
align: match format.align {
'\0' => Alignment::Unspecified,
'<' => Alignment::Left,
'^' => Alignment::Center,
'>' => Alignment::Right,
'=' => Alignment::Equal,
_ => unreachable!(),
},
sign: match format.sign {
'\0' => Sign::Unspecified,
'+' => Sign::Plus,
'-' => Sign::Minus,
' ' => Sign::Space,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
'\0' => None,
_ => Some(format.ty),
},
buff: buff,
pattern: s,
})
}
/// call this to re-write the original format string verbatum
/// back to the output
pub fn skip(mut self) -> Result<()> {
self.buff.push('{');
self.write_str(self.pattern).unwrap();
self.buff.push('}');
Ok(())
}
/// fill getter
pub fn fill(&self) -> char {
self.fill
}
/// align getter
pub fn align(&self) -> Alignment {
self.align.clone()
}
// provide default for unspecified alignment
pub fn set_default_align(&mut self, align: Alignment) {
if self.align == Alignment::Unspecified {
self.align = align
}
}
/// width getter
pub fn width(&self) -> Option<usize> {
self.width
}
/// thousands getter
pub fn thousands(&self) -> bool {
self.thousands
}
/// precision getter
pub fn precision(&self) -> Option<usize> {
self.precision
}
/// set precision to None, used for formatting int, float, etc
pub fn set_precision(&mut self, precision: Option<usize>) {
self.precision = precision;
}
/// sign getter
pub fn sign(&self) -> Sign {
self.sign.clone()
}
/// sign plus getter
/// here because it is in fmt::Formatter
pub fn sign_plus(&self) -> bool {
self.sign == Sign::Plus
}
/// sign minus getter
/// here because it is in fmt::Formatter
pub fn sign_minus(&self) -> bool {
self.sign == Sign::Minus
}
/// alternate getter
pub fn alternate(&self) -> bool {
self.alternate
}
// sign_aware_zero_pad // Not supported
/// type getter
pub fn ty(&self) -> Option<char> {
self.ty
}
/// UNSTABLE: in the future, this may return true if all validty
/// checks for a float return true
/// return true if ty is valid for formatting integers
pub fn is_int_type(&self) -> bool {
match self.ty {
None => true,
Some(c) => match c {
'b' | 'o' | 'x' | 'X' => true,
_ => false,
},
}
}
/// UNSTABLE: in the future, this may return true if all validty
/// checks for a float return true
/// return true if ty is valid for formatting floats
pub fn is_float_type(&self) -> bool {
match self.ty {
None => true,
Some(c) => match c {
'f' | 'e' | 'E' => true,
_ => false,
},
}
}
}
impl<'a, 'b> fmt::Write for Formatter<'a, 'b> {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.buff.write_str(s)
}
} |
// If the next character is #, we're in alternate mode. This only
// applies to integers. | random_line_split |
formatter.rs | use std::fmt;
use std::fmt::Write;
use std::iter::Iterator;
use std::str;
use std::string::String;
use types::*;
#[derive(Debug, PartialEq)]
pub struct Formatter<'a, 'b> {
pub key: &'a str,
fill: char,
align: Alignment, // default Right for numbers, Left for strings
sign: Sign,
alternate: bool,
width: Option<usize>,
thousands: bool,
precision: Option<usize>,
ty: Option<char>,
buff: &'b mut String,
pattern: &'a str,
}
fn is_alignment_token(c: char) -> bool {
match c {
'=' | '<' | '^' | '>' => true,
_ => false,
}
}
fn is_sign_element(c: char) -> bool {
match c {
' ' | '-' | '+' => true,
_ => false,
}
}
fn is_type_element(c: char) -> bool {
match c {
'b' | 'o' | 'x' | 'X' | 'e' | 'E' | 'f' | 'F' | '%' | 's' | '?' => true,
_ => false,
}
}
// get an integer from pos, returning the number of bytes
// consumed and the integer
fn get_integer(s: &[u8], pos: usize) -> (usize, Option<i64>) {
let (_, rest) = s.split_at(pos);
let mut consumed: usize = 0;
for b in rest {
match *b as char {
'0'..='9' => {}
_ => break,
};
consumed += 1;
}
if consumed == 0 {
(0, None)
} else {
let (intstr, _) = rest.split_at(consumed);
let val = unsafe {
// I think I can be reasonably sure that 0-9 chars are utf8 :)
match str::from_utf8_unchecked(intstr).parse::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
// The rest of this was pretty much strait up copied from python's format parser
// All credit goes to python source file: formatter_unicode.c
//
let mut format = FmtPy {
fill: ' ',
align: '\0',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: '\0',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let rest = rest.as_bytes();
let mut align_specified = false;
let mut fill_specified = false;
let end: usize = rest.len();
let mut pos: usize = 0;
// If the second char is an alignment token,
// then fake_fill as fill
if end - pos >= 1 + fake_fill.len_utf8()
&& is_alignment_token(rest[pos + fake_fill.len_utf8()] as char)
{
format.align = rest[pos + fake_fill.len_utf8()] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 1 + fake_fill.len_utf8();
} else if end - pos >= 1 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos += fake_fill.len_utf8();
}
// Parse the various sign options
if end - pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos += 1;
}
// If the next character is #, we're in alternate mode. This only
// applies to integers.
if end - pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos += 1;
}
// The special case for 0-padding (backwards compat)
if !fill_specified && end - pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if !align_specified {
format.align = '=';
}
pos += 1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing width".to_string(),
))
}
Some(v) => {
format.width = v;
}
}
}
// Comma signifies add thousands separators
if end - pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos += 1;
}
// Parse field precision
if end - pos > 0 && rest[pos] as char == '.' {
pos += 1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing precision".to_string(),
))
}
Some(v) => {
format.precision = v;
}
}
} else {
// Not having a precision after a dot is an error.
if consumed == 0 {
return Err(FmtError::Invalid(
"Format specifier missing precision".to_string(),
));
}
}
pos += consumed;
}
// Finally, parse the type field.
if end - pos > 1 {
// More than one char remain, invalid format specifier.
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end - pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::TypeError(msg));
}
// pos+=1;
}
// Do as much validating as we can, just by looking at the format
// specifier. Do not take into account what type of formatting
// we're doing (int, float, string).
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' | '%' | 'F' | '\0' => {} /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a, 'b> Formatter<'a, 'b> {
/// create Formatter from format string
pub fn from_str(s: &'a str, buff: &'b mut String) -> Result<Formatter<'a, 'b>> {
let mut found_colon = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => {
return Err(FmtError::Invalid("must specify identifier".to_string()))
}
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
found_colon = true;
break;
}
c = match chars.next() {
Some(c) => c,
None => {
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
let identifier = if found_colon {
let (i, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
i
} else {
identifier
};
let format = parse_like_python(rest)?;
Ok(Formatter {
key: identifier,
fill: format.fill,
align: match format.align {
'\0' => Alignment::Unspecified,
'<' => Alignment::Left,
'^' => Alignment::Center,
'>' => Alignment::Right,
'=' => Alignment::Equal,
_ => unreachable!(),
},
sign: match format.sign {
'\0' => Sign::Unspecified,
'+' => Sign::Plus,
'-' => Sign::Minus,
' ' => Sign::Space,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
'\0' => None,
_ => Some(format.ty),
},
buff: buff,
pattern: s,
})
}
/// call this to re-write the original format string verbatum
/// back to the output
pub fn skip(mut self) -> Result<()> {
self.buff.push('{');
self.write_str(self.pattern).unwrap();
self.buff.push('}');
Ok(())
}
/// fill getter
pub fn fill(&self) -> char {
self.fill
}
/// align getter
pub fn align(&self) -> Alignment {
self.align.clone()
}
// provide default for unspecified alignment
pub fn set_default_align(&mut self, align: Alignment) {
if self.align == Alignment::Unspecified {
self.align = align
}
}
/// width getter
pub fn width(&self) -> Option<usize> {
self.width
}
/// thousands getter
pub fn thousands(&self) -> bool {
self.thousands
}
/// precision getter
pub fn precision(&self) -> Option<usize> {
self.precision
}
/// set precision to None, used for formatting int, float, etc
pub fn set_precision(&mut self, precision: Option<usize>) {
self.precision = precision;
}
/// sign getter
pub fn sign(&self) -> Sign {
self.sign.clone()
}
/// sign plus getter
/// here because it is in fmt::Formatter
pub fn sign_plus(&self) -> bool {
self.sign == Sign::Plus
}
/// sign minus getter
/// here because it is in fmt::Formatter
pub fn sign_minus(&self) -> bool {
self.sign == Sign::Minus
}
/// alternate getter
pub fn | (&self) -> bool {
self.alternate
}
// sign_aware_zero_pad // Not supported
/// type getter
pub fn ty(&self) -> Option<char> {
self.ty
}
/// UNSTABLE: in the future, this may return true if all validty
/// checks for a float return true
/// return true if ty is valid for formatting integers
pub fn is_int_type(&self) -> bool {
match self.ty {
None => true,
Some(c) => match c {
'b' | 'o' | 'x' | 'X' => true,
_ => false,
},
}
}
/// UNSTABLE: in the future, this may return true if all validty
/// checks for a float return true
/// return true if ty is valid for formatting floats
pub fn is_float_type(&self) -> bool {
match self.ty {
None => true,
Some(c) => match c {
'f' | 'e' | 'E' => true,
_ => false,
},
}
}
}
impl<'a, 'b> fmt::Write for Formatter<'a, 'b> {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.buff.write_str(s)
}
}
| alternate | identifier_name |
formatter.rs | use std::fmt;
use std::fmt::Write;
use std::iter::Iterator;
use std::str;
use std::string::String;
use types::*;
#[derive(Debug, PartialEq)]
pub struct Formatter<'a, 'b> {
pub key: &'a str,
fill: char,
align: Alignment, // default Right for numbers, Left for strings
sign: Sign,
alternate: bool,
width: Option<usize>,
thousands: bool,
precision: Option<usize>,
ty: Option<char>,
buff: &'b mut String,
pattern: &'a str,
}
fn is_alignment_token(c: char) -> bool {
match c {
'=' | '<' | '^' | '>' => true,
_ => false,
}
}
fn is_sign_element(c: char) -> bool {
match c {
' ' | '-' | '+' => true,
_ => false,
}
}
fn is_type_element(c: char) -> bool {
match c {
'b' | 'o' | 'x' | 'X' | 'e' | 'E' | 'f' | 'F' | '%' | 's' | '?' => true,
_ => false,
}
}
// get an integer from pos, returning the number of bytes
// consumed and the integer
fn get_integer(s: &[u8], pos: usize) -> (usize, Option<i64>) {
let (_, rest) = s.split_at(pos);
let mut consumed: usize = 0;
for b in rest {
match *b as char {
'0'..='9' => {}
_ => break,
};
consumed += 1;
}
if consumed == 0 {
(0, None)
} else {
let (intstr, _) = rest.split_at(consumed);
let val = unsafe {
// I think I can be reasonably sure that 0-9 chars are utf8 :)
match str::from_utf8_unchecked(intstr).parse::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
// The rest of this was pretty much strait up copied from python's format parser
// All credit goes to python source file: formatter_unicode.c
//
let mut format = FmtPy {
fill: ' ',
align: '\0',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: '\0',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let rest = rest.as_bytes();
let mut align_specified = false;
let mut fill_specified = false;
let end: usize = rest.len();
let mut pos: usize = 0;
// If the second char is an alignment token,
// then fake_fill as fill
if end - pos >= 1 + fake_fill.len_utf8()
&& is_alignment_token(rest[pos + fake_fill.len_utf8()] as char)
{
format.align = rest[pos + fake_fill.len_utf8()] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 1 + fake_fill.len_utf8();
} else if end - pos >= 1 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos += fake_fill.len_utf8();
}
// Parse the various sign options
if end - pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos += 1;
}
// If the next character is #, we're in alternate mode. This only
// applies to integers.
if end - pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos += 1;
}
// The special case for 0-padding (backwards compat)
if !fill_specified && end - pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if !align_specified {
format.align = '=';
}
pos += 1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing width".to_string(),
))
}
Some(v) => {
format.width = v;
}
}
}
// Comma signifies add thousands separators
if end - pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos += 1;
}
// Parse field precision
if end - pos > 0 && rest[pos] as char == '.' {
pos += 1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing precision".to_string(),
))
}
Some(v) => |
}
} else {
// Not having a precision after a dot is an error.
if consumed == 0 {
return Err(FmtError::Invalid(
"Format specifier missing precision".to_string(),
));
}
}
pos += consumed;
}
// Finally, parse the type field.
if end - pos > 1 {
// More than one char remain, invalid format specifier.
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end - pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::TypeError(msg));
}
// pos+=1;
}
// Do as much validating as we can, just by looking at the format
// specifier. Do not take into account what type of formatting
// we're doing (int, float, string).
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' | '%' | 'F' | '\0' => {} /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a, 'b> Formatter<'a, 'b> {
/// create Formatter from format string
pub fn from_str(s: &'a str, buff: &'b mut String) -> Result<Formatter<'a, 'b>> {
let mut found_colon = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => {
return Err(FmtError::Invalid("must specify identifier".to_string()))
}
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
found_colon = true;
break;
}
c = match chars.next() {
Some(c) => c,
None => {
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
let identifier = if found_colon {
let (i, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
i
} else {
identifier
};
let format = parse_like_python(rest)?;
Ok(Formatter {
key: identifier,
fill: format.fill,
align: match format.align {
'\0' => Alignment::Unspecified,
'<' => Alignment::Left,
'^' => Alignment::Center,
'>' => Alignment::Right,
'=' => Alignment::Equal,
_ => unreachable!(),
},
sign: match format.sign {
'\0' => Sign::Unspecified,
'+' => Sign::Plus,
'-' => Sign::Minus,
' ' => Sign::Space,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
'\0' => None,
_ => Some(format.ty),
},
buff: buff,
pattern: s,
})
}
/// call this to re-write the original format string verbatum
/// back to the output
pub fn skip(mut self) -> Result<()> {
self.buff.push('{');
self.write_str(self.pattern).unwrap();
self.buff.push('}');
Ok(())
}
/// fill getter
pub fn fill(&self) -> char {
self.fill
}
/// align getter
pub fn align(&self) -> Alignment {
self.align.clone()
}
// provide default for unspecified alignment
pub fn set_default_align(&mut self, align: Alignment) {
if self.align == Alignment::Unspecified {
self.align = align
}
}
/// width getter
pub fn width(&self) -> Option<usize> {
self.width
}
/// thousands getter
pub fn thousands(&self) -> bool {
self.thousands
}
/// precision getter
pub fn precision(&self) -> Option<usize> {
self.precision
}
/// set precision to None, used for formatting int, float, etc
pub fn set_precision(&mut self, precision: Option<usize>) {
self.precision = precision;
}
/// sign getter
pub fn sign(&self) -> Sign {
self.sign.clone()
}
/// sign plus getter
/// here because it is in fmt::Formatter
pub fn sign_plus(&self) -> bool {
self.sign == Sign::Plus
}
/// sign minus getter
/// here because it is in fmt::Formatter
pub fn sign_minus(&self) -> bool {
self.sign == Sign::Minus
}
/// alternate getter
pub fn alternate(&self) -> bool {
self.alternate
}
// sign_aware_zero_pad // Not supported
/// type getter
pub fn ty(&self) -> Option<char> {
self.ty
}
/// UNSTABLE: in the future, this may return true if all validty
/// checks for a float return true
/// return true if ty is valid for formatting integers
pub fn is_int_type(&self) -> bool {
match self.ty {
None => true,
Some(c) => match c {
'b' | 'o' | 'x' | 'X' => true,
_ => false,
},
}
}
/// UNSTABLE: in the future, this may return true if all validty
/// checks for a float return true
/// return true if ty is valid for formatting floats
pub fn is_float_type(&self) -> bool {
match self.ty {
None => true,
Some(c) => match c {
'f' | 'e' | 'E' => true,
_ => false,
},
}
}
}
impl<'a, 'b> fmt::Write for Formatter<'a, 'b> {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.buff.write_str(s)
}
}
| {
format.precision = v;
} | conditional_block |
server.go | package shardmaster | )
import "fmt"
import "net/rpc"
import "log"
import "paxos"
import "sync"
import "sync/atomic"
import "os"
import "syscall"
import "encoding/gob"
const Debug = false
var log_mu sync.Mutex
func (sm *ShardMaster) Logf(format string, a ...interface{}) {
if !Debug {
return
}
log_mu.Lock()
defer log_mu.Unlock()
me := sm.me
fmt.Printf("\x1b[%dm", (me%6)+31)
fmt.Printf("SM#%d : ", me)
fmt.Printf(format+"\n", a...)
fmt.Printf("\x1b[0m")
}
type ShardMaster struct {
mu sync.Mutex
l net.Listener
me int
dead int32 // for testing
unreliable int32 // for testing
px *paxos.Paxos
opReqChan chan OpReq
lastDummySeq int //Seq of last time we launched a dummy op to fill a hole
activeGIDs map[int64]bool //If inactive remove from map instead of setting to false
lastConfig int
configs []Config // indexed by config num
}
type Op struct {
OpID int64
Type OpType
GID int64 //Used by all Ops but Query
Servers []string //Used by Join
Shard int //Used by move
Num int //Used by Query
}
type OpType int
const (
JoinOp OpType = iota + 1
LeaveOp
MoveOp
QueryOp
Dummy
)
type OpReq struct {
op Op
replyChan chan Config
}
func (sm *ShardMaster) sequentialApplier() {
seq := 1
for !sm.isdead() {
select {
case opreq := <-sm.opReqChan:
op := opreq.op
sm.Logf("Got operation through channel")
seq = sm.addToPaxos(seq, op)
sm.Logf("Operation added to paxos log at %d", seq)
if op.Type == QueryOp {
if op.Num < 0 {
//Returning latest config
opreq.replyChan <- sm.configs[sm.lastConfig]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", sm.lastConfig, seq)
} else {
opreq.replyChan <- sm.configs[op.Num]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", op.Num, seq)
}
} else {
opreq.replyChan <- Config{}
}
case <-time.After(50 * time.Millisecond):
sm.Logf("Ping")
seq = sm.ping(seq)
}
sm.Logf("Calling Done(%d)", seq-2)
sm.px.Done(seq - 1)
}
}
//Takes the last non-applied seq and returns the new one
func (sm *ShardMaster) ping(seq int) int {
//TODO: Is this a good dummy OP?
dummyOp := Op{}
for !sm.isdead() {
fate, val := sm.px.Status(seq)
if fate == paxos.Decided {
sm.applyOp(val.(Op))
seq++
continue
}
if sm.px.Max() > seq && seq > sm.lastDummySeq {
sm.px.Start(seq, dummyOp)
sm.waitForPaxos(seq)
sm.lastDummySeq = seq
} else {
return seq
}
}
sm.Logf("ERRRRORR: Ping fallthrough, we are dying! Return seq -1 ")
return -1
}
func (sm *ShardMaster) addToPaxos(seq int, op Op) (retseq int) {
for !sm.isdead() {
//Suggest OP as next seq
sm.px.Start(seq, op)
val, err := sm.waitForPaxos(seq)
if err != nil {
sm.Logf("ERRRROROOROROO!!!")
continue
}
sm.applyOp(val.(Op))
seq++
//Did work?
if val.(Op).OpID == op.OpID {
sm.Logf("Applied operation in log at seq %d", seq-1)
return seq
} else {
sm.Logf("Somebody else took seq %d before us, applying it and trying again", seq-1)
}
}
return -1
}
func (sm *ShardMaster) waitForPaxos(seq int) (val interface{}, err error) {
var status paxos.Fate
to := 10 * time.Millisecond
for {
status, val = sm.px.Status(seq)
if status == paxos.Decided {
err = nil
return
}
if status == paxos.Forgotten || sm.isdead() {
err = fmt.Errorf("We are dead or waiting for something forgotten. Server shutting down?")
sm.Logf("We are dead or waiting for something forgotten. Server shutting down?")
return
}
sm.Logf("Still waiting for paxos: %d", seq)
time.Sleep(to)
if to < 3*time.Second {
to *= 2
} else {
err = fmt.Errorf("Wait for paxos timeout!1")
return
}
}
}
func (sm *ShardMaster) applyOp(op Op) {
sm.Logf("Applying op to database")
switch op.Type {
case JoinOp:
sm.Logf("Join, you guys!")
sm.ApplyJoin(op.GID, op.Servers)
case LeaveOp:
sm.ApplyLeave(op.GID)
sm.Logf("Leave op applied!")
case MoveOp:
sm.ApplyMove(op.GID, op.Shard)
sm.Logf("Move op applied!")
case QueryOp:
//Do nothing
case Dummy:
//Do nothing
}
}
func (sm *ShardMaster) ApplyMove(GID int64, Shard int) {
newConfig := sm.makeNewConfig()
newConfig.Shards[Shard] = GID
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyJoin(GID int64, Servers []string) {
sm.activeGIDs[GID] = true
newConfig := sm.makeNewConfig()
newConfig.Groups[GID] = Servers
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyLeave(GID int64) {
delete(sm.activeGIDs, GID)
newConfig := sm.makeNewConfig()
delete(newConfig.Groups, GID)
for i, group := range newConfig.Shards {
if group == GID {
newConfig.Shards[i] = 0 //Set to invalid group. Will be distributed by the rebalance
}
}
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) rebalanceShards(newConfig *Config) {
nShards := len(newConfig.Shards)
nGroups := 0
for _, _ = range newConfig.Groups {
nGroups++
}
groupToShards := make(map[int64][]int)
groupToShards[0] = []int{}
for GUID, _ := range sm.activeGIDs {
groupToShards[GUID] = []int{}
}
for i, v := range newConfig.Shards {
GUID := v
groupToShards[GUID] = append(groupToShards[GUID], i)
}
minGUID, minGroupSize := getMin(groupToShards)
maxGUID, maxGroupSize := getMax(groupToShards)
minShardsPerGroup := nShards / nGroups
maxShardsPerGroup := minShardsPerGroup
if nShards%nGroups > 0 {
maxShardsPerGroup += 1
}
for len(groupToShards[0]) > 0 || minGroupSize < minShardsPerGroup || maxGroupSize > maxShardsPerGroup {
sm.Logf("Rebalance iteration! ")
sm.Logf("%d > 0! ", len(groupToShards[0]))
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
shardsInInvalidGroup := len(groupToShards[0])
if shardsInInvalidGroup > 0 {
for i := 0; i < shardsInInvalidGroup; i++ {
sm.Logf("Rebalance 0 iteration!")
moveshard := groupToShards[0][0] //Remove the first on
groupToShards[0] = sliceDel(groupToShards[0], 0)
minGUID, _ := getMin(groupToShards)
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
continue
}
minGUID, minGroupSize = getMin(groupToShards)
maxGUID, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d (GUID: %d) ", minGroupSize, minGUID)
sm.Logf("max %d (GUID: %d) ", maxGroupSize, maxGUID)
maxCanGive := maxGroupSize - minShardsPerGroup
minNeeds := minShardsPerGroup - minGroupSize
shardsToMove := minNeeds
if maxCanGive < minNeeds {
shardsToMove = maxCanGive
}
sm.Logf("Moving %d shards: minNeeds %d, maxCanGive %d!", shardsToMove, minNeeds, maxCanGive)
for i := 0; i < shardsToMove; i++ {
moveshard := groupToShards[maxGUID][i]
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
groupToShards[maxGUID] = sliceDel(groupToShards[maxGUID], i)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
}
}
func getMax(groupToShards map[int64][]int) (GUID int64, nShards int) {
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) >= nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func getMin(groupToShards map[int64][]int) (GUID int64, nShards int) {
nShards = 2147483647 //Max signed int32 int is 32 or 64, so this will fit
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) < nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func sliceDel(a []int, i int) []int {
return append(a[:i], a[i+1:]...)
}
func sliceDelInt64(a []int64, i int) []int64 {
return append(a[:i], a[i+1:]...)
}
func (sm *ShardMaster) makeNewConfig() Config {
oldConfig := sm.configs[sm.lastConfig]
newConfig := Config{Groups: make(map[int64][]string)}
newConfig.Num = oldConfig.Num + 1
sm.lastConfig = newConfig.Num
newConfig.Shards = oldConfig.Shards //TODO: Does this work?
for k, v := range oldConfig.Groups {
newConfig.Groups[k] = v
}
return newConfig
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) error {
op := Op{Type: JoinOp, OpID: rand.Int63(), GID: args.GID, Servers: args.Servers}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) error {
op := Op{Type: LeaveOp, OpID: rand.Int63(), GID: args.GID}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) error {
op := Op{Type: MoveOp, OpID: rand.Int63(), GID: args.GID, Shard: args.Shard}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) error {
op := Op{Type: QueryOp, OpID: rand.Int63(), Num: args.Num}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
reply.Config = <-opReq.replyChan
sm.Logf("Got return!")
return nil
}
// please don't change these two functions.
func (sm *ShardMaster) Kill() {
atomic.StoreInt32(&sm.dead, 1)
sm.l.Close()
sm.px.Kill()
}
// call this to find out if the server is dead.
func (sm *ShardMaster) isdead() bool {
return atomic.LoadInt32(&sm.dead) != 0
}
// please do not change these two functions.
func (sm *ShardMaster) setunreliable(what bool) {
if what {
atomic.StoreInt32(&sm.unreliable, 1)
} else {
atomic.StoreInt32(&sm.unreliable, 0)
}
}
func (sm *ShardMaster) isunreliable() bool {
return atomic.LoadInt32(&sm.unreliable) != 0
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int64][]string{}
sm.activeGIDs = map[int64]bool{}
sm.opReqChan = make(chan OpReq)
rpcs := rpc.NewServer()
gob.Register(Op{})
rpcs.Register(sm)
sm.px = paxos.Make(servers, me, rpcs)
os.Remove(servers[me])
l, e := net.Listen("unix", servers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
sm.l = l
go sm.sequentialApplier()
// please do not change any of the following code,
// or do anything to subvert it.
go func() {
for sm.isdead() == false {
conn, err := sm.l.Accept()
if err == nil && sm.isdead() == false {
if sm.isunreliable() && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if sm.isunreliable() && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
go rpcs.ServeConn(conn)
} else {
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && sm.isdead() == false {
fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
sm.Kill()
}
}
}()
return sm
} |
import (
"math/rand"
"net"
"time" | random_line_split |
server.go | package shardmaster
import (
"math/rand"
"net"
"time"
)
import "fmt"
import "net/rpc"
import "log"
import "paxos"
import "sync"
import "sync/atomic"
import "os"
import "syscall"
import "encoding/gob"
const Debug = false
var log_mu sync.Mutex
func (sm *ShardMaster) Logf(format string, a ...interface{}) {
if !Debug {
return
}
log_mu.Lock()
defer log_mu.Unlock()
me := sm.me
fmt.Printf("\x1b[%dm", (me%6)+31)
fmt.Printf("SM#%d : ", me)
fmt.Printf(format+"\n", a...)
fmt.Printf("\x1b[0m")
}
type ShardMaster struct {
mu sync.Mutex
l net.Listener
me int
dead int32 // for testing
unreliable int32 // for testing
px *paxos.Paxos
opReqChan chan OpReq
lastDummySeq int //Seq of last time we launched a dummy op to fill a hole
activeGIDs map[int64]bool //If inactive remove from map instead of setting to false
lastConfig int
configs []Config // indexed by config num
}
type Op struct {
OpID int64
Type OpType
GID int64 //Used by all Ops but Query
Servers []string //Used by Join
Shard int //Used by move
Num int //Used by Query
}
type OpType int
const (
JoinOp OpType = iota + 1
LeaveOp
MoveOp
QueryOp
Dummy
)
type OpReq struct {
op Op
replyChan chan Config
}
func (sm *ShardMaster) sequentialApplier() {
seq := 1
for !sm.isdead() {
select {
case opreq := <-sm.opReqChan:
op := opreq.op
sm.Logf("Got operation through channel")
seq = sm.addToPaxos(seq, op)
sm.Logf("Operation added to paxos log at %d", seq)
if op.Type == QueryOp {
if op.Num < 0 {
//Returning latest config
opreq.replyChan <- sm.configs[sm.lastConfig]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", sm.lastConfig, seq)
} else {
opreq.replyChan <- sm.configs[op.Num]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", op.Num, seq)
}
} else {
opreq.replyChan <- Config{}
}
case <-time.After(50 * time.Millisecond):
sm.Logf("Ping")
seq = sm.ping(seq)
}
sm.Logf("Calling Done(%d)", seq-2)
sm.px.Done(seq - 1)
}
}
//Takes the last non-applied seq and returns the new one
func (sm *ShardMaster) ping(seq int) int {
//TODO: Is this a good dummy OP?
dummyOp := Op{}
for !sm.isdead() {
fate, val := sm.px.Status(seq)
if fate == paxos.Decided {
sm.applyOp(val.(Op))
seq++
continue
}
if sm.px.Max() > seq && seq > sm.lastDummySeq {
sm.px.Start(seq, dummyOp)
sm.waitForPaxos(seq)
sm.lastDummySeq = seq
} else {
return seq
}
}
sm.Logf("ERRRRORR: Ping fallthrough, we are dying! Return seq -1 ")
return -1
}
func (sm *ShardMaster) addToPaxos(seq int, op Op) (retseq int) {
for !sm.isdead() {
//Suggest OP as next seq
sm.px.Start(seq, op)
val, err := sm.waitForPaxos(seq)
if err != nil {
sm.Logf("ERRRROROOROROO!!!")
continue
}
sm.applyOp(val.(Op))
seq++
//Did work?
if val.(Op).OpID == op.OpID {
sm.Logf("Applied operation in log at seq %d", seq-1)
return seq
} else {
sm.Logf("Somebody else took seq %d before us, applying it and trying again", seq-1)
}
}
return -1
}
func (sm *ShardMaster) waitForPaxos(seq int) (val interface{}, err error) {
var status paxos.Fate
to := 10 * time.Millisecond
for {
status, val = sm.px.Status(seq)
if status == paxos.Decided {
err = nil
return
}
if status == paxos.Forgotten || sm.isdead() {
err = fmt.Errorf("We are dead or waiting for something forgotten. Server shutting down?")
sm.Logf("We are dead or waiting for something forgotten. Server shutting down?")
return
}
sm.Logf("Still waiting for paxos: %d", seq)
time.Sleep(to)
if to < 3*time.Second {
to *= 2
} else {
err = fmt.Errorf("Wait for paxos timeout!1")
return
}
}
}
func (sm *ShardMaster) applyOp(op Op) {
sm.Logf("Applying op to database")
switch op.Type {
case JoinOp:
sm.Logf("Join, you guys!")
sm.ApplyJoin(op.GID, op.Servers)
case LeaveOp:
sm.ApplyLeave(op.GID)
sm.Logf("Leave op applied!")
case MoveOp:
sm.ApplyMove(op.GID, op.Shard)
sm.Logf("Move op applied!")
case QueryOp:
//Do nothing
case Dummy:
//Do nothing
}
}
func (sm *ShardMaster) ApplyMove(GID int64, Shard int) {
newConfig := sm.makeNewConfig()
newConfig.Shards[Shard] = GID
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyJoin(GID int64, Servers []string) {
sm.activeGIDs[GID] = true
newConfig := sm.makeNewConfig()
newConfig.Groups[GID] = Servers
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyLeave(GID int64) {
delete(sm.activeGIDs, GID)
newConfig := sm.makeNewConfig()
delete(newConfig.Groups, GID)
for i, group := range newConfig.Shards {
if group == GID {
newConfig.Shards[i] = 0 //Set to invalid group. Will be distributed by the rebalance
}
}
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) rebalanceShards(newConfig *Config) {
nShards := len(newConfig.Shards)
nGroups := 0
for _, _ = range newConfig.Groups {
nGroups++
}
groupToShards := make(map[int64][]int)
groupToShards[0] = []int{}
for GUID, _ := range sm.activeGIDs {
groupToShards[GUID] = []int{}
}
for i, v := range newConfig.Shards {
GUID := v
groupToShards[GUID] = append(groupToShards[GUID], i)
}
minGUID, minGroupSize := getMin(groupToShards)
maxGUID, maxGroupSize := getMax(groupToShards)
minShardsPerGroup := nShards / nGroups
maxShardsPerGroup := minShardsPerGroup
if nShards%nGroups > 0 {
maxShardsPerGroup += 1
}
for len(groupToShards[0]) > 0 || minGroupSize < minShardsPerGroup || maxGroupSize > maxShardsPerGroup {
sm.Logf("Rebalance iteration! ")
sm.Logf("%d > 0! ", len(groupToShards[0]))
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
shardsInInvalidGroup := len(groupToShards[0])
if shardsInInvalidGroup > 0 {
for i := 0; i < shardsInInvalidGroup; i++ {
sm.Logf("Rebalance 0 iteration!")
moveshard := groupToShards[0][0] //Remove the first on
groupToShards[0] = sliceDel(groupToShards[0], 0)
minGUID, _ := getMin(groupToShards)
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
continue
}
minGUID, minGroupSize = getMin(groupToShards)
maxGUID, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d (GUID: %d) ", minGroupSize, minGUID)
sm.Logf("max %d (GUID: %d) ", maxGroupSize, maxGUID)
maxCanGive := maxGroupSize - minShardsPerGroup
minNeeds := minShardsPerGroup - minGroupSize
shardsToMove := minNeeds
if maxCanGive < minNeeds {
shardsToMove = maxCanGive
}
sm.Logf("Moving %d shards: minNeeds %d, maxCanGive %d!", shardsToMove, minNeeds, maxCanGive)
for i := 0; i < shardsToMove; i++ {
moveshard := groupToShards[maxGUID][i]
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
groupToShards[maxGUID] = sliceDel(groupToShards[maxGUID], i)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
}
}
func getMax(groupToShards map[int64][]int) (GUID int64, nShards int) {
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) >= nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func getMin(groupToShards map[int64][]int) (GUID int64, nShards int) {
nShards = 2147483647 //Max signed int32 int is 32 or 64, so this will fit
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) < nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func sliceDel(a []int, i int) []int {
return append(a[:i], a[i+1:]...)
}
func sliceDelInt64(a []int64, i int) []int64 {
return append(a[:i], a[i+1:]...)
}
func (sm *ShardMaster) makeNewConfig() Config {
oldConfig := sm.configs[sm.lastConfig]
newConfig := Config{Groups: make(map[int64][]string)}
newConfig.Num = oldConfig.Num + 1
sm.lastConfig = newConfig.Num
newConfig.Shards = oldConfig.Shards //TODO: Does this work?
for k, v := range oldConfig.Groups {
newConfig.Groups[k] = v
}
return newConfig
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) error {
op := Op{Type: JoinOp, OpID: rand.Int63(), GID: args.GID, Servers: args.Servers}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) error {
op := Op{Type: LeaveOp, OpID: rand.Int63(), GID: args.GID}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) error {
op := Op{Type: MoveOp, OpID: rand.Int63(), GID: args.GID, Shard: args.Shard}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) error |
// please don't change these two functions.
func (sm *ShardMaster) Kill() {
atomic.StoreInt32(&sm.dead, 1)
sm.l.Close()
sm.px.Kill()
}
// call this to find out if the server is dead.
func (sm *ShardMaster) isdead() bool {
return atomic.LoadInt32(&sm.dead) != 0
}
// please do not change these two functions.
func (sm *ShardMaster) setunreliable(what bool) {
if what {
atomic.StoreInt32(&sm.unreliable, 1)
} else {
atomic.StoreInt32(&sm.unreliable, 0)
}
}
func (sm *ShardMaster) isunreliable() bool {
return atomic.LoadInt32(&sm.unreliable) != 0
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int64][]string{}
sm.activeGIDs = map[int64]bool{}
sm.opReqChan = make(chan OpReq)
rpcs := rpc.NewServer()
gob.Register(Op{})
rpcs.Register(sm)
sm.px = paxos.Make(servers, me, rpcs)
os.Remove(servers[me])
l, e := net.Listen("unix", servers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
sm.l = l
go sm.sequentialApplier()
// please do not change any of the following code,
// or do anything to subvert it.
go func() {
for sm.isdead() == false {
conn, err := sm.l.Accept()
if err == nil && sm.isdead() == false {
if sm.isunreliable() && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if sm.isunreliable() && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
go rpcs.ServeConn(conn)
} else {
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && sm.isdead() == false {
fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
sm.Kill()
}
}
}()
return sm
}
| {
op := Op{Type: QueryOp, OpID: rand.Int63(), Num: args.Num}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
reply.Config = <-opReq.replyChan
sm.Logf("Got return!")
return nil
} | identifier_body |
server.go | package shardmaster
import (
"math/rand"
"net"
"time"
)
import "fmt"
import "net/rpc"
import "log"
import "paxos"
import "sync"
import "sync/atomic"
import "os"
import "syscall"
import "encoding/gob"
const Debug = false
var log_mu sync.Mutex
func (sm *ShardMaster) Logf(format string, a ...interface{}) {
if !Debug {
return
}
log_mu.Lock()
defer log_mu.Unlock()
me := sm.me
fmt.Printf("\x1b[%dm", (me%6)+31)
fmt.Printf("SM#%d : ", me)
fmt.Printf(format+"\n", a...)
fmt.Printf("\x1b[0m")
}
type ShardMaster struct {
mu sync.Mutex
l net.Listener
me int
dead int32 // for testing
unreliable int32 // for testing
px *paxos.Paxos
opReqChan chan OpReq
lastDummySeq int //Seq of last time we launched a dummy op to fill a hole
activeGIDs map[int64]bool //If inactive remove from map instead of setting to false
lastConfig int
configs []Config // indexed by config num
}
type Op struct {
OpID int64
Type OpType
GID int64 //Used by all Ops but Query
Servers []string //Used by Join
Shard int //Used by move
Num int //Used by Query
}
type OpType int
const (
JoinOp OpType = iota + 1
LeaveOp
MoveOp
QueryOp
Dummy
)
type OpReq struct {
op Op
replyChan chan Config
}
func (sm *ShardMaster) sequentialApplier() {
seq := 1
for !sm.isdead() {
select {
case opreq := <-sm.opReqChan:
op := opreq.op
sm.Logf("Got operation through channel")
seq = sm.addToPaxos(seq, op)
sm.Logf("Operation added to paxos log at %d", seq)
if op.Type == QueryOp {
if op.Num < 0 {
//Returning latest config
opreq.replyChan <- sm.configs[sm.lastConfig]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", sm.lastConfig, seq)
} else {
opreq.replyChan <- sm.configs[op.Num]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", op.Num, seq)
}
} else {
opreq.replyChan <- Config{}
}
case <-time.After(50 * time.Millisecond):
sm.Logf("Ping")
seq = sm.ping(seq)
}
sm.Logf("Calling Done(%d)", seq-2)
sm.px.Done(seq - 1)
}
}
//Takes the last non-applied seq and returns the new one
func (sm *ShardMaster) | (seq int) int {
//TODO: Is this a good dummy OP?
dummyOp := Op{}
for !sm.isdead() {
fate, val := sm.px.Status(seq)
if fate == paxos.Decided {
sm.applyOp(val.(Op))
seq++
continue
}
if sm.px.Max() > seq && seq > sm.lastDummySeq {
sm.px.Start(seq, dummyOp)
sm.waitForPaxos(seq)
sm.lastDummySeq = seq
} else {
return seq
}
}
sm.Logf("ERRRRORR: Ping fallthrough, we are dying! Return seq -1 ")
return -1
}
func (sm *ShardMaster) addToPaxos(seq int, op Op) (retseq int) {
for !sm.isdead() {
//Suggest OP as next seq
sm.px.Start(seq, op)
val, err := sm.waitForPaxos(seq)
if err != nil {
sm.Logf("ERRRROROOROROO!!!")
continue
}
sm.applyOp(val.(Op))
seq++
//Did work?
if val.(Op).OpID == op.OpID {
sm.Logf("Applied operation in log at seq %d", seq-1)
return seq
} else {
sm.Logf("Somebody else took seq %d before us, applying it and trying again", seq-1)
}
}
return -1
}
func (sm *ShardMaster) waitForPaxos(seq int) (val interface{}, err error) {
var status paxos.Fate
to := 10 * time.Millisecond
for {
status, val = sm.px.Status(seq)
if status == paxos.Decided {
err = nil
return
}
if status == paxos.Forgotten || sm.isdead() {
err = fmt.Errorf("We are dead or waiting for something forgotten. Server shutting down?")
sm.Logf("We are dead or waiting for something forgotten. Server shutting down?")
return
}
sm.Logf("Still waiting for paxos: %d", seq)
time.Sleep(to)
if to < 3*time.Second {
to *= 2
} else {
err = fmt.Errorf("Wait for paxos timeout!1")
return
}
}
}
func (sm *ShardMaster) applyOp(op Op) {
sm.Logf("Applying op to database")
switch op.Type {
case JoinOp:
sm.Logf("Join, you guys!")
sm.ApplyJoin(op.GID, op.Servers)
case LeaveOp:
sm.ApplyLeave(op.GID)
sm.Logf("Leave op applied!")
case MoveOp:
sm.ApplyMove(op.GID, op.Shard)
sm.Logf("Move op applied!")
case QueryOp:
//Do nothing
case Dummy:
//Do nothing
}
}
func (sm *ShardMaster) ApplyMove(GID int64, Shard int) {
newConfig := sm.makeNewConfig()
newConfig.Shards[Shard] = GID
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyJoin(GID int64, Servers []string) {
sm.activeGIDs[GID] = true
newConfig := sm.makeNewConfig()
newConfig.Groups[GID] = Servers
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyLeave(GID int64) {
delete(sm.activeGIDs, GID)
newConfig := sm.makeNewConfig()
delete(newConfig.Groups, GID)
for i, group := range newConfig.Shards {
if group == GID {
newConfig.Shards[i] = 0 //Set to invalid group. Will be distributed by the rebalance
}
}
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) rebalanceShards(newConfig *Config) {
nShards := len(newConfig.Shards)
nGroups := 0
for _, _ = range newConfig.Groups {
nGroups++
}
groupToShards := make(map[int64][]int)
groupToShards[0] = []int{}
for GUID, _ := range sm.activeGIDs {
groupToShards[GUID] = []int{}
}
for i, v := range newConfig.Shards {
GUID := v
groupToShards[GUID] = append(groupToShards[GUID], i)
}
minGUID, minGroupSize := getMin(groupToShards)
maxGUID, maxGroupSize := getMax(groupToShards)
minShardsPerGroup := nShards / nGroups
maxShardsPerGroup := minShardsPerGroup
if nShards%nGroups > 0 {
maxShardsPerGroup += 1
}
for len(groupToShards[0]) > 0 || minGroupSize < minShardsPerGroup || maxGroupSize > maxShardsPerGroup {
sm.Logf("Rebalance iteration! ")
sm.Logf("%d > 0! ", len(groupToShards[0]))
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
shardsInInvalidGroup := len(groupToShards[0])
if shardsInInvalidGroup > 0 {
for i := 0; i < shardsInInvalidGroup; i++ {
sm.Logf("Rebalance 0 iteration!")
moveshard := groupToShards[0][0] //Remove the first on
groupToShards[0] = sliceDel(groupToShards[0], 0)
minGUID, _ := getMin(groupToShards)
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
continue
}
minGUID, minGroupSize = getMin(groupToShards)
maxGUID, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d (GUID: %d) ", minGroupSize, minGUID)
sm.Logf("max %d (GUID: %d) ", maxGroupSize, maxGUID)
maxCanGive := maxGroupSize - minShardsPerGroup
minNeeds := minShardsPerGroup - minGroupSize
shardsToMove := minNeeds
if maxCanGive < minNeeds {
shardsToMove = maxCanGive
}
sm.Logf("Moving %d shards: minNeeds %d, maxCanGive %d!", shardsToMove, minNeeds, maxCanGive)
for i := 0; i < shardsToMove; i++ {
moveshard := groupToShards[maxGUID][i]
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
groupToShards[maxGUID] = sliceDel(groupToShards[maxGUID], i)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
}
}
func getMax(groupToShards map[int64][]int) (GUID int64, nShards int) {
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) >= nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func getMin(groupToShards map[int64][]int) (GUID int64, nShards int) {
nShards = 2147483647 //Max signed int32 int is 32 or 64, so this will fit
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) < nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func sliceDel(a []int, i int) []int {
return append(a[:i], a[i+1:]...)
}
func sliceDelInt64(a []int64, i int) []int64 {
return append(a[:i], a[i+1:]...)
}
func (sm *ShardMaster) makeNewConfig() Config {
oldConfig := sm.configs[sm.lastConfig]
newConfig := Config{Groups: make(map[int64][]string)}
newConfig.Num = oldConfig.Num + 1
sm.lastConfig = newConfig.Num
newConfig.Shards = oldConfig.Shards //TODO: Does this work?
for k, v := range oldConfig.Groups {
newConfig.Groups[k] = v
}
return newConfig
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) error {
op := Op{Type: JoinOp, OpID: rand.Int63(), GID: args.GID, Servers: args.Servers}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) error {
op := Op{Type: LeaveOp, OpID: rand.Int63(), GID: args.GID}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) error {
op := Op{Type: MoveOp, OpID: rand.Int63(), GID: args.GID, Shard: args.Shard}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) error {
op := Op{Type: QueryOp, OpID: rand.Int63(), Num: args.Num}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
reply.Config = <-opReq.replyChan
sm.Logf("Got return!")
return nil
}
// please don't change these two functions.
func (sm *ShardMaster) Kill() {
atomic.StoreInt32(&sm.dead, 1)
sm.l.Close()
sm.px.Kill()
}
// call this to find out if the server is dead.
func (sm *ShardMaster) isdead() bool {
return atomic.LoadInt32(&sm.dead) != 0
}
// please do not change these two functions.
func (sm *ShardMaster) setunreliable(what bool) {
if what {
atomic.StoreInt32(&sm.unreliable, 1)
} else {
atomic.StoreInt32(&sm.unreliable, 0)
}
}
func (sm *ShardMaster) isunreliable() bool {
return atomic.LoadInt32(&sm.unreliable) != 0
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int64][]string{}
sm.activeGIDs = map[int64]bool{}
sm.opReqChan = make(chan OpReq)
rpcs := rpc.NewServer()
gob.Register(Op{})
rpcs.Register(sm)
sm.px = paxos.Make(servers, me, rpcs)
os.Remove(servers[me])
l, e := net.Listen("unix", servers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
sm.l = l
go sm.sequentialApplier()
// please do not change any of the following code,
// or do anything to subvert it.
go func() {
for sm.isdead() == false {
conn, err := sm.l.Accept()
if err == nil && sm.isdead() == false {
if sm.isunreliable() && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if sm.isunreliable() && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
go rpcs.ServeConn(conn)
} else {
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && sm.isdead() == false {
fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
sm.Kill()
}
}
}()
return sm
}
| ping | identifier_name |
server.go | package shardmaster
import (
"math/rand"
"net"
"time"
)
import "fmt"
import "net/rpc"
import "log"
import "paxos"
import "sync"
import "sync/atomic"
import "os"
import "syscall"
import "encoding/gob"
const Debug = false
var log_mu sync.Mutex
func (sm *ShardMaster) Logf(format string, a ...interface{}) {
if !Debug {
return
}
log_mu.Lock()
defer log_mu.Unlock()
me := sm.me
fmt.Printf("\x1b[%dm", (me%6)+31)
fmt.Printf("SM#%d : ", me)
fmt.Printf(format+"\n", a...)
fmt.Printf("\x1b[0m")
}
type ShardMaster struct {
mu sync.Mutex
l net.Listener
me int
dead int32 // for testing
unreliable int32 // for testing
px *paxos.Paxos
opReqChan chan OpReq
lastDummySeq int //Seq of last time we launched a dummy op to fill a hole
activeGIDs map[int64]bool //If inactive remove from map instead of setting to false
lastConfig int
configs []Config // indexed by config num
}
type Op struct {
OpID int64
Type OpType
GID int64 //Used by all Ops but Query
Servers []string //Used by Join
Shard int //Used by move
Num int //Used by Query
}
type OpType int
const (
JoinOp OpType = iota + 1
LeaveOp
MoveOp
QueryOp
Dummy
)
type OpReq struct {
op Op
replyChan chan Config
}
func (sm *ShardMaster) sequentialApplier() {
seq := 1
for !sm.isdead() {
select {
case opreq := <-sm.opReqChan:
op := opreq.op
sm.Logf("Got operation through channel")
seq = sm.addToPaxos(seq, op)
sm.Logf("Operation added to paxos log at %d", seq)
if op.Type == QueryOp {
if op.Num < 0 {
//Returning latest config
opreq.replyChan <- sm.configs[sm.lastConfig]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", sm.lastConfig, seq)
} else {
opreq.replyChan <- sm.configs[op.Num]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", op.Num, seq)
}
} else {
opreq.replyChan <- Config{}
}
case <-time.After(50 * time.Millisecond):
sm.Logf("Ping")
seq = sm.ping(seq)
}
sm.Logf("Calling Done(%d)", seq-2)
sm.px.Done(seq - 1)
}
}
//Takes the last non-applied seq and returns the new one
func (sm *ShardMaster) ping(seq int) int {
//TODO: Is this a good dummy OP?
dummyOp := Op{}
for !sm.isdead() {
fate, val := sm.px.Status(seq)
if fate == paxos.Decided {
sm.applyOp(val.(Op))
seq++
continue
}
if sm.px.Max() > seq && seq > sm.lastDummySeq | else {
return seq
}
}
sm.Logf("ERRRRORR: Ping fallthrough, we are dying! Return seq -1 ")
return -1
}
func (sm *ShardMaster) addToPaxos(seq int, op Op) (retseq int) {
for !sm.isdead() {
//Suggest OP as next seq
sm.px.Start(seq, op)
val, err := sm.waitForPaxos(seq)
if err != nil {
sm.Logf("ERRRROROOROROO!!!")
continue
}
sm.applyOp(val.(Op))
seq++
//Did work?
if val.(Op).OpID == op.OpID {
sm.Logf("Applied operation in log at seq %d", seq-1)
return seq
} else {
sm.Logf("Somebody else took seq %d before us, applying it and trying again", seq-1)
}
}
return -1
}
func (sm *ShardMaster) waitForPaxos(seq int) (val interface{}, err error) {
var status paxos.Fate
to := 10 * time.Millisecond
for {
status, val = sm.px.Status(seq)
if status == paxos.Decided {
err = nil
return
}
if status == paxos.Forgotten || sm.isdead() {
err = fmt.Errorf("We are dead or waiting for something forgotten. Server shutting down?")
sm.Logf("We are dead or waiting for something forgotten. Server shutting down?")
return
}
sm.Logf("Still waiting for paxos: %d", seq)
time.Sleep(to)
if to < 3*time.Second {
to *= 2
} else {
err = fmt.Errorf("Wait for paxos timeout!1")
return
}
}
}
func (sm *ShardMaster) applyOp(op Op) {
sm.Logf("Applying op to database")
switch op.Type {
case JoinOp:
sm.Logf("Join, you guys!")
sm.ApplyJoin(op.GID, op.Servers)
case LeaveOp:
sm.ApplyLeave(op.GID)
sm.Logf("Leave op applied!")
case MoveOp:
sm.ApplyMove(op.GID, op.Shard)
sm.Logf("Move op applied!")
case QueryOp:
//Do nothing
case Dummy:
//Do nothing
}
}
func (sm *ShardMaster) ApplyMove(GID int64, Shard int) {
newConfig := sm.makeNewConfig()
newConfig.Shards[Shard] = GID
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyJoin(GID int64, Servers []string) {
sm.activeGIDs[GID] = true
newConfig := sm.makeNewConfig()
newConfig.Groups[GID] = Servers
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyLeave(GID int64) {
delete(sm.activeGIDs, GID)
newConfig := sm.makeNewConfig()
delete(newConfig.Groups, GID)
for i, group := range newConfig.Shards {
if group == GID {
newConfig.Shards[i] = 0 //Set to invalid group. Will be distributed by the rebalance
}
}
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) rebalanceShards(newConfig *Config) {
nShards := len(newConfig.Shards)
nGroups := 0
for _, _ = range newConfig.Groups {
nGroups++
}
groupToShards := make(map[int64][]int)
groupToShards[0] = []int{}
for GUID, _ := range sm.activeGIDs {
groupToShards[GUID] = []int{}
}
for i, v := range newConfig.Shards {
GUID := v
groupToShards[GUID] = append(groupToShards[GUID], i)
}
minGUID, minGroupSize := getMin(groupToShards)
maxGUID, maxGroupSize := getMax(groupToShards)
minShardsPerGroup := nShards / nGroups
maxShardsPerGroup := minShardsPerGroup
if nShards%nGroups > 0 {
maxShardsPerGroup += 1
}
for len(groupToShards[0]) > 0 || minGroupSize < minShardsPerGroup || maxGroupSize > maxShardsPerGroup {
sm.Logf("Rebalance iteration! ")
sm.Logf("%d > 0! ", len(groupToShards[0]))
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
shardsInInvalidGroup := len(groupToShards[0])
if shardsInInvalidGroup > 0 {
for i := 0; i < shardsInInvalidGroup; i++ {
sm.Logf("Rebalance 0 iteration!")
moveshard := groupToShards[0][0] //Remove the first on
groupToShards[0] = sliceDel(groupToShards[0], 0)
minGUID, _ := getMin(groupToShards)
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
continue
}
minGUID, minGroupSize = getMin(groupToShards)
maxGUID, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d (GUID: %d) ", minGroupSize, minGUID)
sm.Logf("max %d (GUID: %d) ", maxGroupSize, maxGUID)
maxCanGive := maxGroupSize - minShardsPerGroup
minNeeds := minShardsPerGroup - minGroupSize
shardsToMove := minNeeds
if maxCanGive < minNeeds {
shardsToMove = maxCanGive
}
sm.Logf("Moving %d shards: minNeeds %d, maxCanGive %d!", shardsToMove, minNeeds, maxCanGive)
for i := 0; i < shardsToMove; i++ {
moveshard := groupToShards[maxGUID][i]
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
groupToShards[maxGUID] = sliceDel(groupToShards[maxGUID], i)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
}
}
func getMax(groupToShards map[int64][]int) (GUID int64, nShards int) {
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) >= nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func getMin(groupToShards map[int64][]int) (GUID int64, nShards int) {
nShards = 2147483647 //Max signed int32 int is 32 or 64, so this will fit
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) < nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func sliceDel(a []int, i int) []int {
return append(a[:i], a[i+1:]...)
}
func sliceDelInt64(a []int64, i int) []int64 {
return append(a[:i], a[i+1:]...)
}
func (sm *ShardMaster) makeNewConfig() Config {
oldConfig := sm.configs[sm.lastConfig]
newConfig := Config{Groups: make(map[int64][]string)}
newConfig.Num = oldConfig.Num + 1
sm.lastConfig = newConfig.Num
newConfig.Shards = oldConfig.Shards //TODO: Does this work?
for k, v := range oldConfig.Groups {
newConfig.Groups[k] = v
}
return newConfig
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) error {
op := Op{Type: JoinOp, OpID: rand.Int63(), GID: args.GID, Servers: args.Servers}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) error {
op := Op{Type: LeaveOp, OpID: rand.Int63(), GID: args.GID}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) error {
op := Op{Type: MoveOp, OpID: rand.Int63(), GID: args.GID, Shard: args.Shard}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) error {
op := Op{Type: QueryOp, OpID: rand.Int63(), Num: args.Num}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
reply.Config = <-opReq.replyChan
sm.Logf("Got return!")
return nil
}
// please don't change these two functions.
func (sm *ShardMaster) Kill() {
atomic.StoreInt32(&sm.dead, 1)
sm.l.Close()
sm.px.Kill()
}
// call this to find out if the server is dead.
func (sm *ShardMaster) isdead() bool {
return atomic.LoadInt32(&sm.dead) != 0
}
// please do not change these two functions.
func (sm *ShardMaster) setunreliable(what bool) {
if what {
atomic.StoreInt32(&sm.unreliable, 1)
} else {
atomic.StoreInt32(&sm.unreliable, 0)
}
}
func (sm *ShardMaster) isunreliable() bool {
return atomic.LoadInt32(&sm.unreliable) != 0
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int64][]string{}
sm.activeGIDs = map[int64]bool{}
sm.opReqChan = make(chan OpReq)
rpcs := rpc.NewServer()
gob.Register(Op{})
rpcs.Register(sm)
sm.px = paxos.Make(servers, me, rpcs)
os.Remove(servers[me])
l, e := net.Listen("unix", servers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
sm.l = l
go sm.sequentialApplier()
// please do not change any of the following code,
// or do anything to subvert it.
go func() {
for sm.isdead() == false {
conn, err := sm.l.Accept()
if err == nil && sm.isdead() == false {
if sm.isunreliable() && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if sm.isunreliable() && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
go rpcs.ServeConn(conn)
} else {
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && sm.isdead() == false {
fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
sm.Kill()
}
}
}()
return sm
}
| {
sm.px.Start(seq, dummyOp)
sm.waitForPaxos(seq)
sm.lastDummySeq = seq
} | conditional_block |
scripts_LA_2.py | import numpy as np
# from process_data import map_nodes_to_cities, map_links_to_cities
from process_data import process_links, extract_features, \
geojson_link, cities_to_js, process_net_attack
from scripts_LA import load_LA_3
from utils import modify_capacity, multiply_cognitive_cost
from frank_wolfe_heterogeneous import parametric_study_3
from metrics import average_cost_all_or_nothing, all_or_nothing_assignment, \
cost, save_metrics, average_cost, average_cost_subset, gas_emission
cities = ['Burbank',
'Glendale',
'La Canada Flintridge',
'Pasadena',
'South Pasadena',
'Alhambra',
'San Marino',
'San Gabriel',
'Temple City',
'Arcadia',
'Sierra Madre',
'Monrovia',
'Monterey Park',
'Rosemead',
'El Monte',
'South El Monte',
'Montebello',
'Pico Rivera',
'Irwindale',
'Baldwin Park',
'West Covina',
'Azusa',
'Covina',
'Duarte',
'Glendora']
def visualize_cities():
cities_to_js('data/cities.js', 'Los Angeles', 0, 1)
def visualize_links_by_city(city):
# visualize the links from a specific city
graph, demand, node, features = load_LA_3()
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
links = process_links(graph, node, features, in_order=True)
names = ['capacity', 'length', 'fftt']
color = 3 * (linkToCity[:, 1] == city)
color = color + 10 * (features[:, 0] > 900.)
weight = (features[:, 0] <= 900.) + 3. * (features[:, 0] > 900.)
geojson_link(links, names, color, weight)
def process_LA_net_attack(thres, beta):
process_net_attack('data/LA_net.txt',
'data/LA_net_attack.csv', thres, beta)
def load_LA_4():
graph = np.loadtxt('data/LA_net_attack.csv', delimiter=',', skiprows=1)
demand = np.loadtxt('data/LA_od_3.csv', delimiter=',', skiprows=1)
node = np.loadtxt('data/LA_node.csv', delimiter=',')
# features = table in the format [[capacity, length, FreeFlowTime]]
features = extract_features('data/LA_net.txt')
# increase capacities of these two links because they have a travel time
# in equilibrium that that is too big
features[10787, 0] = features[10787, 0] * 1.5
graph[10787, -1] = graph[10787, -1] / (1.5**4)
features[3348, :] = features[3348, :] * 1.2
graph[3348, -1] = graph[3348, -1] / (1.2**4)
# divide demand going to node 106 by 10 because too large
for i in range(demand.shape[0]):
if demand[i, 1] == 106.:
demand[i, 2] = demand[i, 2] / 10.
return graph, demand, node, features
def LA_parametric_study_attack(alphas, thres, betas):
for beta in betas:
net2, d, node, features = LA_metrics_attacks_all(beta, thres)
parametric_study_3(alphas, beta, net2, d, node, features,
1000., 3000., 'data/LA/test_attack_{}_{}.csv',
stop=1e-2)
# beta is the coefficient of reduction of capacity: capacity = beta*capacity
# load_LA_4() loads the modified network
def LA_metrics_attack(alphas, input, output, beta):
net, d, node, features = load_LA_4()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
net2, small_capacity = multiply_cognitive_cost(
net, features, beta, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attack_2(alphas, input, output, thres, beta):
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(net, features, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attacks_city(beta, thres, city):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# extract the mapping from links to cities
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
print linkToCity
links_affected = np.logical_and(
linkToCity[:, 1] == city, features[:, 0] < thres)
print np.sum(links_affected)
# modify all small capacity links in GLendale
net2 = modify_capacity(net, links_affected, beta)
print net2
def LA_metrics_attacks_all(beta, thres):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# modify all small capacity links
links_affected = (features[:, 0] < thres)
net2 = modify_capacity(net, links_affected, beta)
return net2, d, node, features
def compute_metrics_beta(alpha, beta, f, net, d, feat, subset, out, row,
fs=None, net2=None,
length_unit='Mile', time_unit='Minute'):
'''
Save in the numpy array 'out' at the specific 'row' the following metrics
- average cost for non-routed
- average cost for routed
- average cost
- average cost on a subset (e.g. local routes)
- average cost outside of a subset (e.g. non-local routes)
- total gas emissions
- total gas emissions on a subset (e.g. local routes)
- total gas emissions outside of a subset (e.g. non-local routes)
- total flow in the network
- total flow in the network on a subset (e.g. local routes)
- total flow in the network outside of a subset (e.g. non-local routes)
'''
if length_unit == 'Meter':
lengths = feat[:, 1] / 1609.34 # convert into miles
elif length_unit == 'Mile':
lengths = feat[:, 1]
if time_unit == 'Minute':
|
elif time_unit == 'Second':
a = 3600.
b = 60. / a
speed = a * np.divide(lengths, np.maximum(cost(f, net), 10e-8))
co2 = np.multiply(gas_emission(speed), lengths)
out[row, 0] = alpha
out[row, 1] = beta
out[row, 4] = b * average_cost(f, net, d)
out[row, 5] = b * average_cost_subset(f, net, d, subset)
out[row, 6] = out[row, 3] - out[row, 4]
out[row, 7] = co2.dot(f) / f.dot(lengths)
out[row, 8] = np.multiply(co2, subset).dot(f) / f.dot(lengths)
out[row, 9] = out[row, 6] - out[row, 7]
out[row, 10] = np.sum(np.multiply(f, lengths)) * 4000.
out[row, 11] = np.sum(np.multiply(np.multiply(f, lengths), subset)) * 4000.
out[row, 12] = out[row, 9] - out[row, 10]
if alpha == 0.0:
out[row, 2] = b * average_cost(f, net, d)
out[row, 3] = b * average_cost_all_or_nothing(f, net, d)
return
if alpha == 1.0:
L = all_or_nothing_assignment(cost(f, net2), net, d)
out[row, 2] = b * cost(f, net).dot(L) / np.sum(d[:, 2])
out[row, 3] = b * average_cost(f, net, d)
return
out[row, 2] = b * cost(f, net).dot(fs[:, 0]) / \
np.sum((1 - alpha) * d[:, 2])
out[row, 3] = b * cost(f, net).dot(fs[:, 1]) / np.sum(alpha * d[:, 2])
def save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=0,
length_unit='Mile', time_unit='Minute'):
out = np.zeros((len(alphas) * len(betas), 13))
for beta in betas:
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(
net, features, 1000., 3000.)
subset = small_capacity
a = 0
if alphas[0] == 0.0:
alpha = 0.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[0], alphas[0])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(0.0, beta, f, net, d, features,
subset, out, 0,
length_unit=length_unit, time_unit=time_unit)
a = 1
b = 1 if alphas[-1] == 1.0 else 0
for i, alpha in enumerate(alphas[a:len(alphas) - b]):
print 'compute for nr = {}, r = {}'.format(1 - alpha, alpha)
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(alpha, beta, f, net, d, features, subset,
out, i + a, fs=fs,
length_unit=length_unit, time_unit=time_unit)
if alphas[-1] == 1.0:
alpha = 1.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[-1], alphas[-1])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(1.0, beta, f, net, d, features, subset,
out, -1, net2=net2,
length_unit=length_unit,
time_unit=time_unit)
colnames = 'ratio_routed,beta,tt_non_routed,tt_routed,tt,'
colnames = colnames + 'tt_local,tt_non_local,'
colnames = colnames + 'gas,gas_local,gas_non_local,'
colnames = colnames + 'vmt,vmt_local,vmt_non_local'
np.savetxt(output, out, delimiter=',',
header=colnames,
comments='')
def LA_metrics_attack_3(alphas, betas, input, output, thres):
save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=1,
length_unit='Meter', time_unit='Second')
def main():
# map_nodes_to_cities(
# cities, 'visualization/cities.js', 'data/LA_node.csv',
# 'data/LA/node_to_cities.csv')
# map_links_to_cities('data/LA/node_to_cities.csv', 'data/LA_net.csv', \
# 'data/LA/link_to_cities.csv')
# visualize_links_by_city('Glendale')
# visualize_cities()
# =================================Attack================================
# LA_metrics_attacks_city(0.5, 1000.,'Glendale')
LA_parametric_study_attack(.2, 1000., np.linspace(0.5, 1., 6))
# LA_metrics_attack_3(np.array([0.50]), np.array([0.90]),
# 'data/LA/test_attack_{}_{}.csv',
# 'data/LA/out_attack.csv', 1000.)
# LA_metrics_attack(np.linspace(0,1,11), 'data/LA/test_{}.csv',
# 'data/LA/out_attack.csv',1.0)
if __name__ == '__main__':
main()
| a = 60.0 | conditional_block |
scripts_LA_2.py | import numpy as np
# from process_data import map_nodes_to_cities, map_links_to_cities
from process_data import process_links, extract_features, \
geojson_link, cities_to_js, process_net_attack
from scripts_LA import load_LA_3
from utils import modify_capacity, multiply_cognitive_cost
from frank_wolfe_heterogeneous import parametric_study_3
from metrics import average_cost_all_or_nothing, all_or_nothing_assignment, \
cost, save_metrics, average_cost, average_cost_subset, gas_emission
cities = ['Burbank',
'Glendale',
'La Canada Flintridge',
'Pasadena',
'South Pasadena',
'Alhambra',
'San Marino',
'San Gabriel',
'Temple City',
'Arcadia',
'Sierra Madre',
'Monrovia',
'Monterey Park',
'Rosemead',
'El Monte',
'South El Monte',
'Montebello',
'Pico Rivera',
'Irwindale',
'Baldwin Park',
'West Covina',
'Azusa',
'Covina',
'Duarte',
'Glendora']
def visualize_cities():
cities_to_js('data/cities.js', 'Los Angeles', 0, 1)
def visualize_links_by_city(city):
# visualize the links from a specific city
graph, demand, node, features = load_LA_3()
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
links = process_links(graph, node, features, in_order=True)
names = ['capacity', 'length', 'fftt']
color = 3 * (linkToCity[:, 1] == city)
color = color + 10 * (features[:, 0] > 900.)
weight = (features[:, 0] <= 900.) + 3. * (features[:, 0] > 900.)
geojson_link(links, names, color, weight)
def process_LA_net_attack(thres, beta):
process_net_attack('data/LA_net.txt',
'data/LA_net_attack.csv', thres, beta)
def load_LA_4():
graph = np.loadtxt('data/LA_net_attack.csv', delimiter=',', skiprows=1)
demand = np.loadtxt('data/LA_od_3.csv', delimiter=',', skiprows=1)
node = np.loadtxt('data/LA_node.csv', delimiter=',')
# features = table in the format [[capacity, length, FreeFlowTime]]
features = extract_features('data/LA_net.txt')
# increase capacities of these two links because they have a travel time
# in equilibrium that that is too big
features[10787, 0] = features[10787, 0] * 1.5
graph[10787, -1] = graph[10787, -1] / (1.5**4)
features[3348, :] = features[3348, :] * 1.2
graph[3348, -1] = graph[3348, -1] / (1.2**4)
# divide demand going to node 106 by 10 because too large
for i in range(demand.shape[0]):
if demand[i, 1] == 106.:
demand[i, 2] = demand[i, 2] / 10.
return graph, demand, node, features
def LA_parametric_study_attack(alphas, thres, betas):
for beta in betas:
net2, d, node, features = LA_metrics_attacks_all(beta, thres)
parametric_study_3(alphas, beta, net2, d, node, features,
1000., 3000., 'data/LA/test_attack_{}_{}.csv',
stop=1e-2)
# beta is the coefficient of reduction of capacity: capacity = beta*capacity
# load_LA_4() loads the modified network
def LA_metrics_attack(alphas, input, output, beta):
net, d, node, features = load_LA_4()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
net2, small_capacity = multiply_cognitive_cost(
net, features, beta, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attack_2(alphas, input, output, thres, beta):
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(net, features, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attacks_city(beta, thres, city):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# extract the mapping from links to cities
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
print linkToCity
links_affected = np.logical_and(
linkToCity[:, 1] == city, features[:, 0] < thres)
print np.sum(links_affected)
# modify all small capacity links in GLendale
net2 = modify_capacity(net, links_affected, beta)
print net2
def LA_metrics_attacks_all(beta, thres):
|
def compute_metrics_beta(alpha, beta, f, net, d, feat, subset, out, row,
fs=None, net2=None,
length_unit='Mile', time_unit='Minute'):
'''
Save in the numpy array 'out' at the specific 'row' the following metrics
- average cost for non-routed
- average cost for routed
- average cost
- average cost on a subset (e.g. local routes)
- average cost outside of a subset (e.g. non-local routes)
- total gas emissions
- total gas emissions on a subset (e.g. local routes)
- total gas emissions outside of a subset (e.g. non-local routes)
- total flow in the network
- total flow in the network on a subset (e.g. local routes)
- total flow in the network outside of a subset (e.g. non-local routes)
'''
if length_unit == 'Meter':
lengths = feat[:, 1] / 1609.34 # convert into miles
elif length_unit == 'Mile':
lengths = feat[:, 1]
if time_unit == 'Minute':
a = 60.0
elif time_unit == 'Second':
a = 3600.
b = 60. / a
speed = a * np.divide(lengths, np.maximum(cost(f, net), 10e-8))
co2 = np.multiply(gas_emission(speed), lengths)
out[row, 0] = alpha
out[row, 1] = beta
out[row, 4] = b * average_cost(f, net, d)
out[row, 5] = b * average_cost_subset(f, net, d, subset)
out[row, 6] = out[row, 3] - out[row, 4]
out[row, 7] = co2.dot(f) / f.dot(lengths)
out[row, 8] = np.multiply(co2, subset).dot(f) / f.dot(lengths)
out[row, 9] = out[row, 6] - out[row, 7]
out[row, 10] = np.sum(np.multiply(f, lengths)) * 4000.
out[row, 11] = np.sum(np.multiply(np.multiply(f, lengths), subset)) * 4000.
out[row, 12] = out[row, 9] - out[row, 10]
if alpha == 0.0:
out[row, 2] = b * average_cost(f, net, d)
out[row, 3] = b * average_cost_all_or_nothing(f, net, d)
return
if alpha == 1.0:
L = all_or_nothing_assignment(cost(f, net2), net, d)
out[row, 2] = b * cost(f, net).dot(L) / np.sum(d[:, 2])
out[row, 3] = b * average_cost(f, net, d)
return
out[row, 2] = b * cost(f, net).dot(fs[:, 0]) / \
np.sum((1 - alpha) * d[:, 2])
out[row, 3] = b * cost(f, net).dot(fs[:, 1]) / np.sum(alpha * d[:, 2])
def save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=0,
length_unit='Mile', time_unit='Minute'):
out = np.zeros((len(alphas) * len(betas), 13))
for beta in betas:
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(
net, features, 1000., 3000.)
subset = small_capacity
a = 0
if alphas[0] == 0.0:
alpha = 0.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[0], alphas[0])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(0.0, beta, f, net, d, features,
subset, out, 0,
length_unit=length_unit, time_unit=time_unit)
a = 1
b = 1 if alphas[-1] == 1.0 else 0
for i, alpha in enumerate(alphas[a:len(alphas) - b]):
print 'compute for nr = {}, r = {}'.format(1 - alpha, alpha)
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(alpha, beta, f, net, d, features, subset,
out, i + a, fs=fs,
length_unit=length_unit, time_unit=time_unit)
if alphas[-1] == 1.0:
alpha = 1.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[-1], alphas[-1])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(1.0, beta, f, net, d, features, subset,
out, -1, net2=net2,
length_unit=length_unit,
time_unit=time_unit)
colnames = 'ratio_routed,beta,tt_non_routed,tt_routed,tt,'
colnames = colnames + 'tt_local,tt_non_local,'
colnames = colnames + 'gas,gas_local,gas_non_local,'
colnames = colnames + 'vmt,vmt_local,vmt_non_local'
np.savetxt(output, out, delimiter=',',
header=colnames,
comments='')
def LA_metrics_attack_3(alphas, betas, input, output, thres):
save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=1,
length_unit='Meter', time_unit='Second')
def main():
# map_nodes_to_cities(
# cities, 'visualization/cities.js', 'data/LA_node.csv',
# 'data/LA/node_to_cities.csv')
# map_links_to_cities('data/LA/node_to_cities.csv', 'data/LA_net.csv', \
# 'data/LA/link_to_cities.csv')
# visualize_links_by_city('Glendale')
# visualize_cities()
# =================================Attack================================
# LA_metrics_attacks_city(0.5, 1000.,'Glendale')
LA_parametric_study_attack(.2, 1000., np.linspace(0.5, 1., 6))
# LA_metrics_attack_3(np.array([0.50]), np.array([0.90]),
# 'data/LA/test_attack_{}_{}.csv',
# 'data/LA/out_attack.csv', 1000.)
# LA_metrics_attack(np.linspace(0,1,11), 'data/LA/test_{}.csv',
# 'data/LA/out_attack.csv',1.0)
if __name__ == '__main__':
main()
| net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# modify all small capacity links
links_affected = (features[:, 0] < thres)
net2 = modify_capacity(net, links_affected, beta)
return net2, d, node, features | identifier_body |
scripts_LA_2.py | import numpy as np
# from process_data import map_nodes_to_cities, map_links_to_cities
from process_data import process_links, extract_features, \
geojson_link, cities_to_js, process_net_attack
from scripts_LA import load_LA_3
from utils import modify_capacity, multiply_cognitive_cost
from frank_wolfe_heterogeneous import parametric_study_3
from metrics import average_cost_all_or_nothing, all_or_nothing_assignment, \
cost, save_metrics, average_cost, average_cost_subset, gas_emission
cities = ['Burbank',
'Glendale',
'La Canada Flintridge',
'Pasadena',
'South Pasadena',
'Alhambra',
'San Marino',
'San Gabriel',
'Temple City',
'Arcadia',
'Sierra Madre',
'Monrovia',
'Monterey Park',
'Rosemead',
'El Monte',
'South El Monte',
'Montebello',
'Pico Rivera',
'Irwindale',
'Baldwin Park',
'West Covina',
'Azusa',
'Covina',
'Duarte',
'Glendora']
def visualize_cities():
cities_to_js('data/cities.js', 'Los Angeles', 0, 1)
def visualize_links_by_city(city):
# visualize the links from a specific city
graph, demand, node, features = load_LA_3()
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
links = process_links(graph, node, features, in_order=True)
names = ['capacity', 'length', 'fftt']
color = 3 * (linkToCity[:, 1] == city)
color = color + 10 * (features[:, 0] > 900.)
weight = (features[:, 0] <= 900.) + 3. * (features[:, 0] > 900.)
geojson_link(links, names, color, weight)
def process_LA_net_attack(thres, beta):
process_net_attack('data/LA_net.txt',
'data/LA_net_attack.csv', thres, beta)
def load_LA_4():
graph = np.loadtxt('data/LA_net_attack.csv', delimiter=',', skiprows=1)
demand = np.loadtxt('data/LA_od_3.csv', delimiter=',', skiprows=1)
node = np.loadtxt('data/LA_node.csv', delimiter=',')
# features = table in the format [[capacity, length, FreeFlowTime]]
features = extract_features('data/LA_net.txt')
# increase capacities of these two links because they have a travel time
# in equilibrium that that is too big
features[10787, 0] = features[10787, 0] * 1.5
graph[10787, -1] = graph[10787, -1] / (1.5**4)
features[3348, :] = features[3348, :] * 1.2
graph[3348, -1] = graph[3348, -1] / (1.2**4)
# divide demand going to node 106 by 10 because too large
for i in range(demand.shape[0]):
if demand[i, 1] == 106.:
demand[i, 2] = demand[i, 2] / 10.
return graph, demand, node, features
def LA_parametric_study_attack(alphas, thres, betas):
for beta in betas:
net2, d, node, features = LA_metrics_attacks_all(beta, thres)
parametric_study_3(alphas, beta, net2, d, node, features,
1000., 3000., 'data/LA/test_attack_{}_{}.csv',
stop=1e-2)
# beta is the coefficient of reduction of capacity: capacity = beta*capacity
# load_LA_4() loads the modified network
def LA_metrics_attack(alphas, input, output, beta):
net, d, node, features = load_LA_4()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
net2, small_capacity = multiply_cognitive_cost(
net, features, beta, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def | (alphas, input, output, thres, beta):
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(net, features, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attacks_city(beta, thres, city):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# extract the mapping from links to cities
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
print linkToCity
links_affected = np.logical_and(
linkToCity[:, 1] == city, features[:, 0] < thres)
print np.sum(links_affected)
# modify all small capacity links in GLendale
net2 = modify_capacity(net, links_affected, beta)
print net2
def LA_metrics_attacks_all(beta, thres):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# modify all small capacity links
links_affected = (features[:, 0] < thres)
net2 = modify_capacity(net, links_affected, beta)
return net2, d, node, features
def compute_metrics_beta(alpha, beta, f, net, d, feat, subset, out, row,
fs=None, net2=None,
length_unit='Mile', time_unit='Minute'):
'''
Save in the numpy array 'out' at the specific 'row' the following metrics
- average cost for non-routed
- average cost for routed
- average cost
- average cost on a subset (e.g. local routes)
- average cost outside of a subset (e.g. non-local routes)
- total gas emissions
- total gas emissions on a subset (e.g. local routes)
- total gas emissions outside of a subset (e.g. non-local routes)
- total flow in the network
- total flow in the network on a subset (e.g. local routes)
- total flow in the network outside of a subset (e.g. non-local routes)
'''
if length_unit == 'Meter':
lengths = feat[:, 1] / 1609.34 # convert into miles
elif length_unit == 'Mile':
lengths = feat[:, 1]
if time_unit == 'Minute':
a = 60.0
elif time_unit == 'Second':
a = 3600.
b = 60. / a
speed = a * np.divide(lengths, np.maximum(cost(f, net), 10e-8))
co2 = np.multiply(gas_emission(speed), lengths)
out[row, 0] = alpha
out[row, 1] = beta
out[row, 4] = b * average_cost(f, net, d)
out[row, 5] = b * average_cost_subset(f, net, d, subset)
out[row, 6] = out[row, 3] - out[row, 4]
out[row, 7] = co2.dot(f) / f.dot(lengths)
out[row, 8] = np.multiply(co2, subset).dot(f) / f.dot(lengths)
out[row, 9] = out[row, 6] - out[row, 7]
out[row, 10] = np.sum(np.multiply(f, lengths)) * 4000.
out[row, 11] = np.sum(np.multiply(np.multiply(f, lengths), subset)) * 4000.
out[row, 12] = out[row, 9] - out[row, 10]
if alpha == 0.0:
out[row, 2] = b * average_cost(f, net, d)
out[row, 3] = b * average_cost_all_or_nothing(f, net, d)
return
if alpha == 1.0:
L = all_or_nothing_assignment(cost(f, net2), net, d)
out[row, 2] = b * cost(f, net).dot(L) / np.sum(d[:, 2])
out[row, 3] = b * average_cost(f, net, d)
return
out[row, 2] = b * cost(f, net).dot(fs[:, 0]) / \
np.sum((1 - alpha) * d[:, 2])
out[row, 3] = b * cost(f, net).dot(fs[:, 1]) / np.sum(alpha * d[:, 2])
def save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=0,
length_unit='Mile', time_unit='Minute'):
out = np.zeros((len(alphas) * len(betas), 13))
for beta in betas:
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(
net, features, 1000., 3000.)
subset = small_capacity
a = 0
if alphas[0] == 0.0:
alpha = 0.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[0], alphas[0])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(0.0, beta, f, net, d, features,
subset, out, 0,
length_unit=length_unit, time_unit=time_unit)
a = 1
b = 1 if alphas[-1] == 1.0 else 0
for i, alpha in enumerate(alphas[a:len(alphas) - b]):
print 'compute for nr = {}, r = {}'.format(1 - alpha, alpha)
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(alpha, beta, f, net, d, features, subset,
out, i + a, fs=fs,
length_unit=length_unit, time_unit=time_unit)
if alphas[-1] == 1.0:
alpha = 1.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[-1], alphas[-1])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(1.0, beta, f, net, d, features, subset,
out, -1, net2=net2,
length_unit=length_unit,
time_unit=time_unit)
colnames = 'ratio_routed,beta,tt_non_routed,tt_routed,tt,'
colnames = colnames + 'tt_local,tt_non_local,'
colnames = colnames + 'gas,gas_local,gas_non_local,'
colnames = colnames + 'vmt,vmt_local,vmt_non_local'
np.savetxt(output, out, delimiter=',',
header=colnames,
comments='')
def LA_metrics_attack_3(alphas, betas, input, output, thres):
save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=1,
length_unit='Meter', time_unit='Second')
def main():
# map_nodes_to_cities(
# cities, 'visualization/cities.js', 'data/LA_node.csv',
# 'data/LA/node_to_cities.csv')
# map_links_to_cities('data/LA/node_to_cities.csv', 'data/LA_net.csv', \
# 'data/LA/link_to_cities.csv')
# visualize_links_by_city('Glendale')
# visualize_cities()
# =================================Attack================================
# LA_metrics_attacks_city(0.5, 1000.,'Glendale')
LA_parametric_study_attack(.2, 1000., np.linspace(0.5, 1., 6))
# LA_metrics_attack_3(np.array([0.50]), np.array([0.90]),
# 'data/LA/test_attack_{}_{}.csv',
# 'data/LA/out_attack.csv', 1000.)
# LA_metrics_attack(np.linspace(0,1,11), 'data/LA/test_{}.csv',
# 'data/LA/out_attack.csv',1.0)
if __name__ == '__main__':
main()
| LA_metrics_attack_2 | identifier_name |
scripts_LA_2.py | import numpy as np
# from process_data import map_nodes_to_cities, map_links_to_cities
from process_data import process_links, extract_features, \
geojson_link, cities_to_js, process_net_attack
from scripts_LA import load_LA_3
from utils import modify_capacity, multiply_cognitive_cost
from frank_wolfe_heterogeneous import parametric_study_3
from metrics import average_cost_all_or_nothing, all_or_nothing_assignment, \
cost, save_metrics, average_cost, average_cost_subset, gas_emission
cities = ['Burbank',
'Glendale',
'La Canada Flintridge',
'Pasadena',
'South Pasadena',
'Alhambra',
'San Marino',
'San Gabriel',
'Temple City',
'Arcadia',
'Sierra Madre',
'Monrovia',
'Monterey Park',
'Rosemead',
'El Monte',
'South El Monte',
'Montebello',
'Pico Rivera',
'Irwindale',
'Baldwin Park',
'West Covina',
'Azusa',
'Covina',
'Duarte',
'Glendora']
def visualize_cities():
cities_to_js('data/cities.js', 'Los Angeles', 0, 1)
def visualize_links_by_city(city):
# visualize the links from a specific city
graph, demand, node, features = load_LA_3()
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
links = process_links(graph, node, features, in_order=True)
names = ['capacity', 'length', 'fftt']
color = 3 * (linkToCity[:, 1] == city)
color = color + 10 * (features[:, 0] > 900.)
weight = (features[:, 0] <= 900.) + 3. * (features[:, 0] > 900.)
geojson_link(links, names, color, weight)
def process_LA_net_attack(thres, beta):
process_net_attack('data/LA_net.txt',
'data/LA_net_attack.csv', thres, beta)
def load_LA_4():
graph = np.loadtxt('data/LA_net_attack.csv', delimiter=',', skiprows=1)
demand = np.loadtxt('data/LA_od_3.csv', delimiter=',', skiprows=1)
node = np.loadtxt('data/LA_node.csv', delimiter=',')
# features = table in the format [[capacity, length, FreeFlowTime]]
features = extract_features('data/LA_net.txt')
# increase capacities of these two links because they have a travel time
# in equilibrium that that is too big
features[10787, 0] = features[10787, 0] * 1.5
graph[10787, -1] = graph[10787, -1] / (1.5**4)
features[3348, :] = features[3348, :] * 1.2
graph[3348, -1] = graph[3348, -1] / (1.2**4)
# divide demand going to node 106 by 10 because too large
for i in range(demand.shape[0]):
if demand[i, 1] == 106.:
demand[i, 2] = demand[i, 2] / 10.
return graph, demand, node, features
def LA_parametric_study_attack(alphas, thres, betas):
for beta in betas:
net2, d, node, features = LA_metrics_attacks_all(beta, thres)
parametric_study_3(alphas, beta, net2, d, node, features,
1000., 3000., 'data/LA/test_attack_{}_{}.csv',
stop=1e-2)
# beta is the coefficient of reduction of capacity: capacity = beta*capacity
# load_LA_4() loads the modified network
def LA_metrics_attack(alphas, input, output, beta):
net, d, node, features = load_LA_4()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
net2, small_capacity = multiply_cognitive_cost(
net, features, beta, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attack_2(alphas, input, output, thres, beta):
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(net, features, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attacks_city(beta, thres, city):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# extract the mapping from links to cities
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
print linkToCity
links_affected = np.logical_and(
linkToCity[:, 1] == city, features[:, 0] < thres)
print np.sum(links_affected)
# modify all small capacity links in GLendale
net2 = modify_capacity(net, links_affected, beta)
print net2
def LA_metrics_attacks_all(beta, thres):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# modify all small capacity links
links_affected = (features[:, 0] < thres)
net2 = modify_capacity(net, links_affected, beta)
return net2, d, node, features
def compute_metrics_beta(alpha, beta, f, net, d, feat, subset, out, row,
fs=None, net2=None,
length_unit='Mile', time_unit='Minute'):
'''
Save in the numpy array 'out' at the specific 'row' the following metrics
- average cost for non-routed
- average cost for routed
- average cost
- average cost on a subset (e.g. local routes)
- average cost outside of a subset (e.g. non-local routes)
- total gas emissions
- total gas emissions on a subset (e.g. local routes)
- total gas emissions outside of a subset (e.g. non-local routes)
- total flow in the network
- total flow in the network on a subset (e.g. local routes)
- total flow in the network outside of a subset (e.g. non-local routes)
'''
if length_unit == 'Meter':
lengths = feat[:, 1] / 1609.34 # convert into miles
elif length_unit == 'Mile':
lengths = feat[:, 1]
if time_unit == 'Minute':
a = 60.0
elif time_unit == 'Second':
a = 3600.
b = 60. / a
speed = a * np.divide(lengths, np.maximum(cost(f, net), 10e-8))
co2 = np.multiply(gas_emission(speed), lengths)
out[row, 0] = alpha
out[row, 1] = beta
out[row, 4] = b * average_cost(f, net, d)
out[row, 5] = b * average_cost_subset(f, net, d, subset)
out[row, 6] = out[row, 3] - out[row, 4]
out[row, 7] = co2.dot(f) / f.dot(lengths)
out[row, 8] = np.multiply(co2, subset).dot(f) / f.dot(lengths)
out[row, 9] = out[row, 6] - out[row, 7]
out[row, 10] = np.sum(np.multiply(f, lengths)) * 4000.
out[row, 11] = np.sum(np.multiply(np.multiply(f, lengths), subset)) * 4000.
out[row, 12] = out[row, 9] - out[row, 10]
if alpha == 0.0:
out[row, 2] = b * average_cost(f, net, d)
out[row, 3] = b * average_cost_all_or_nothing(f, net, d)
return
if alpha == 1.0:
L = all_or_nothing_assignment(cost(f, net2), net, d)
out[row, 2] = b * cost(f, net).dot(L) / np.sum(d[:, 2])
out[row, 3] = b * average_cost(f, net, d)
return
out[row, 2] = b * cost(f, net).dot(fs[:, 0]) / \
np.sum((1 - alpha) * d[:, 2])
out[row, 3] = b * cost(f, net).dot(fs[:, 1]) / np.sum(alpha * d[:, 2])
def save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=0,
length_unit='Mile', time_unit='Minute'):
out = np.zeros((len(alphas) * len(betas), 13))
for beta in betas:
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(
net, features, 1000., 3000.)
subset = small_capacity
a = 0
if alphas[0] == 0.0:
alpha = 0.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[0], alphas[0])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(0.0, beta, f, net, d, features,
subset, out, 0,
length_unit=length_unit, time_unit=time_unit)
a = 1
b = 1 if alphas[-1] == 1.0 else 0
for i, alpha in enumerate(alphas[a:len(alphas) - b]):
print 'compute for nr = {}, r = {}'.format(1 - alpha, alpha)
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(alpha, beta, f, net, d, features, subset,
out, i + a, fs=fs,
length_unit=length_unit, time_unit=time_unit)
if alphas[-1] == 1.0:
alpha = 1.0 | int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(1.0, beta, f, net, d, features, subset,
out, -1, net2=net2,
length_unit=length_unit,
time_unit=time_unit)
colnames = 'ratio_routed,beta,tt_non_routed,tt_routed,tt,'
colnames = colnames + 'tt_local,tt_non_local,'
colnames = colnames + 'gas,gas_local,gas_non_local,'
colnames = colnames + 'vmt,vmt_local,vmt_non_local'
np.savetxt(output, out, delimiter=',',
header=colnames,
comments='')
def LA_metrics_attack_3(alphas, betas, input, output, thres):
save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=1,
length_unit='Meter', time_unit='Second')
def main():
# map_nodes_to_cities(
# cities, 'visualization/cities.js', 'data/LA_node.csv',
# 'data/LA/node_to_cities.csv')
# map_links_to_cities('data/LA/node_to_cities.csv', 'data/LA_net.csv', \
# 'data/LA/link_to_cities.csv')
# visualize_links_by_city('Glendale')
# visualize_cities()
# =================================Attack================================
# LA_metrics_attacks_city(0.5, 1000.,'Glendale')
LA_parametric_study_attack(.2, 1000., np.linspace(0.5, 1., 6))
# LA_metrics_attack_3(np.array([0.50]), np.array([0.90]),
# 'data/LA/test_attack_{}_{}.csv',
# 'data/LA/out_attack.csv', 1000.)
# LA_metrics_attack(np.linspace(0,1,11), 'data/LA/test_{}.csv',
# 'data/LA/out_attack.csv',1.0)
if __name__ == '__main__':
main() | print 'compute for nr = {}, r = {}'.format(
1 - alphas[-1], alphas[-1])
fs = np.loadtxt(input.format(int(alpha * 100), | random_line_split |
align.rs | mod rustbio;
use std::{
ops::Range,
sync::{
atomic::{AtomicBool, AtomicU16, Ordering},
mpsc::{Sender, SyncSender},
Arc,
},
thread::available_parallelism,
};
use crate::{file::FileContent, view::AlignedMessage};
use bio::alignment::AlignmentOperation as Op;
use realfft::{num_complex::Complex64, RealFftPlanner, RealToComplex};
use serde::{Deserialize, Serialize};
use self::rustbio::{align_banded, RustBio};
pub const DEFAULT_BLOCKSIZE: usize = 8192;
pub const DEFAULT_KMER: usize = 8;
pub const DEFAULT_WINDOW: usize = 6;
/// An align mode, can be either Local for local alignment, global for global alignment,
/// or Blockwise with a given block size. The blockwise mode starts from a given position
/// and aligns only using `blocksize` bytes from each sequence in one direction, which
/// makes it works fast and local, but it doesn't see bigger gaps and everything after big gaps
/// tends to be unaligned.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum AlignMode {
Local,
Global,
Blockwise(usize),
}
#[derive(Clone, Copy, Debug)]
pub enum InternalMode {
Local,
Global,
Semiglobal,
}
impl From<AlignMode> for InternalMode {
fn from(value: AlignMode) -> Self {
match value {
AlignMode::Local => InternalMode::Local,
AlignMode::Global | AlignMode::Blockwise(_) => InternalMode::Global,
}
}
}
trait Align {
fn align(&self, algo: &AlignAlgorithm, mode: InternalMode, x: &[u8], y: &[u8]) -> Vec<Op>;
}
/// Determines whether to use the banded variant of the algorithm with given k-mer length
/// and window size
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Banded {
Normal,
Banded { kmer: usize, window: usize },
}
/// Contains parameters to run the alignment algorithm with
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct AlignAlgorithm {
pub gap_open: i32,
pub gap_extend: i32,
pub mismatch_score: i32,
pub match_score: i32,
pub mode: AlignMode,
pub band: Banded,
}
impl Default for AlignAlgorithm {
fn default() -> Self {
AlignAlgorithm {
gap_open: -5,
gap_extend: -1,
mismatch_score: -1,
match_score: 1,
mode: AlignMode::Blockwise(DEFAULT_BLOCKSIZE),
band: Banded::Normal,
}
}
}
impl AlignAlgorithm {
/// This function starts the threads for the alignment, which send the data over the sender.
/// It should then immediately return.
pub fn start_align(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
sender: Sender<AlignedMessage>,
) {
let algo = *self;
match self.mode {
AlignMode::Local => {
// we only need one thread
std::thread::spawn(move || algo.align_whole(x, y, InternalMode::Local, sender));
}
AlignMode::Global => {
std::thread::spawn(move || algo.align_whole(x, y, InternalMode::Global, sender));
}
AlignMode::Blockwise(blocksize) => {
// for Blockwise, we need one thread for each direction from the cursor
// Clone the data for the second thread here
let x_cp = x.clone();
let y_cp = y.clone();
let sender_cp = sender.clone();
std::thread::spawn(move || algo.align_end(x, y, addr, blocksize, sender));
std::thread::spawn(move || {
algo.align_front(x_cp, y_cp, addr, blocksize, sender_cp)
});
}
}
}
pub fn start_align_with_selection(
&self,
files: [FileContent; 2],
selection: [Option<Range<usize>>; 2],
addr: [usize; 2],
sender: Sender<AlignedMessage>,
) {
let (selected, right, end) = match selection.clone() {
[None, None] | [Some(_), Some(_)] => {
let [file0, file1] = files;
// if both or none are selected, just do the normal process
return self.start_align(file0, file1, (addr[0], addr[1]), sender);
}
[Some(x), None] | [None, Some(x)] => {
if x.is_empty() {
// selection is empty, does not really make sense to do glocal alignment
let [file0, file1] = files;
return self.start_align(file0, file1, (addr[0], addr[1]), sender);
}
let right = selection[1].is_some();
(
x.clone(),
selection[1].is_some(),
addr[right as usize] != x.start,
)
}
};
let algo = *self;
std::thread::spawn(move || {
algo.align_with_selection(files, (selected, right), end, sender)
});
}
fn align(&self, x: &[u8], y: &[u8], mode: InternalMode) -> Vec<Op> {
if x[..] == y[..] {
return vec![Op::Match; x.len()];
}
if self.band == Banded::Normal {
RustBio.align(self, mode, x, y)
} else {
align_banded(self, mode, x, y)
}
}
/// Aligns x to y as a whole
fn align_whole(
&self,
x: FileContent,
y: FileContent,
mode: InternalMode,
sender: Sender<AlignedMessage>,
) {
let alignment = self.align(&x, &y, mode);
let _ = sender.send(AlignedMessage::Append(
AlignElement::from_array(&alignment, &x, &y, 0, 0).0,
));
}
fn align_with_selection(
&self,
files: [FileContent; 2],
selection: (Range<usize>, bool),
end: bool,
sender: Sender<AlignedMessage>,
) {
let (select, right) = selection;
let full_pattern = &files[right as usize].clone();
let pattern = &files[right as usize].clone()[select.clone()];
let text = &files[(!right) as usize].clone()[..];
let alignment = self.align(pattern, text, InternalMode::Semiglobal);
let (alignment, textaddr) = ops_pattern_subrange(&alignment);
let (mut array, pattern_end, text_end) =
AlignElement::from_array(alignment, full_pattern, text, select.start, textaddr);
let (start_addr, end_addr) = if right {
array.iter_mut().for_each(|x| *x = x.mirror());
((textaddr, select.start), (text_end, pattern_end))
} else {
((select.start, textaddr), (pattern_end, text_end))
};
let (prepend, append) = if end {
let ap = array.pop().into_iter().collect();
(array, ap)
} else {
(Vec::new(), array)
};
if sender.send(AlignedMessage::Append(append)).is_err() {
return;
}
if sender.send(AlignedMessage::Prepend(prepend)).is_err() {
return;
}
let blocksize = if let AlignMode::Blockwise(s) = self.mode {
s
} else {
usize::MAX
};
let files2 = files.clone();
let sender2 = sender.clone();
let algo = *self;
std::thread::spawn(move || {
algo.align_end(
files2[0].clone(),
files2[1].clone(),
end_addr,
blocksize,
sender2,
);
});
self.align_front(
files[0].clone(),
files[1].clone(),
start_addr,
blocksize,
sender,
);
}
/// Blockwise alignment in the ascending address direction
pub fn align_end(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
// we want to have the beginning of our two arrays aligned at the same place
// since we start from a previous alignment or a cursor
while xaddr < x.len() && yaddr < y.len() {
// align at most block_size bytes from each sequence
let end_aligned = self.align(
&x[xaddr..(xaddr + block_size).min(x.len())],
&y[yaddr..(yaddr + block_size).min(y.len())],
self.mode.into(),
);
// we only actually append at most half of the block size since we make sure gaps crossing
// block boundaries are better detected
let ops = &end_aligned[0..end_aligned.len().min(block_size / 2)];
// we will not progress like this, so might as well quit
if ops.is_empty() {
break;
}
let (end, new_xaddr, new_yaddr) = AlignElement::from_array(ops, &x, &y, xaddr, yaddr);
if sender.send(AlignedMessage::Append(end)).is_err() {
return;
}
xaddr = new_xaddr;
yaddr = new_yaddr;
}
let clip = if x.len() == xaddr {
Op::Yclip(y.len() - yaddr)
} else if y.len() == yaddr {
Op::Xclip(x.len() - xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, xaddr, yaddr).0;
let _ = sender.send(AlignedMessage::Append(leftover));
}
/// Same as align_end, but in the other direction
pub fn align_front(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
while xaddr > 0 && yaddr > 0 {
let lower_xaddr = xaddr.saturating_sub(block_size);
let lower_yaddr = yaddr.saturating_sub(block_size);
let aligned = self.align(
&x[lower_xaddr..xaddr],
&y[lower_yaddr..yaddr],
self.mode.into(),
);
// unlike in align_end, we create the Alignelement from the whole array and then cut it
// in half. This is because the addresses returned from from_array are at the end, which
// we already know, so we instead take the start addresses from the array itself
let (end, _, _) = AlignElement::from_array(&aligned, &x, &y, lower_xaddr, lower_yaddr);
let real_end = Vec::from(&end[end.len().saturating_sub(block_size / 2)..end.len()]);
// if this is empty, we will not progress, so send the leftover out and quit after that
if real_end.is_empty() {
break;
}
let first = real_end.first().unwrap();
xaddr = first.xaddr;
yaddr = first.yaddr;
if sender.send(AlignedMessage::Prepend(real_end)).is_err() {
return;
}
}
let clip = if xaddr == 0 {
Op::Yclip(yaddr)
} else if yaddr == 0 {
Op::Xclip(xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, 0, 0).0;
let _ = sender.send(AlignedMessage::Prepend(leftover));
}
}
/// Representation of the alignment that saves the original addresses of the bytes.
/// This has some space overhead, but alignment is slow enough for that not to matter in most cases.
#[derive(Clone, Copy, Debug)]
pub struct AlignElement {
pub xaddr: usize,
pub xbyte: Option<u8>,
pub yaddr: usize,
pub ybyte: Option<u8>,
}
impl AlignElement {
/// mirrors the values
pub fn mirror(&self) -> AlignElement {
AlignElement {
xaddr: self.yaddr,
xbyte: self.ybyte,
yaddr: self.xaddr,
ybyte: self.xbyte,
}
}
/// Creates a vector out of `AlignElement`s from the operations outputted by rust-bio.
/// Also outputs the addresses at the end of the array.
fn from_array(
r: &[Op],
x: &[u8],
y: &[u8],
mut xaddr: usize,
mut yaddr: usize,
) -> (Vec<AlignElement>, usize, usize) {
let mut v = Vec::new();
for op in r {
match op {
Op::Match | Op::Subst => {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: Some(y[yaddr]),
});
xaddr += 1;
yaddr += 1;
}
Op::Ins => {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: None,
});
xaddr += 1;
}
Op::Del => {
v.push(AlignElement {
xaddr,
xbyte: None,
yaddr,
ybyte: Some(y[yaddr]),
});
yaddr += 1;
}
Op::Xclip(size) => {
v.extend((xaddr..xaddr + size).map(|s| AlignElement {
xaddr: s,
xbyte: Some(x[s]),
yaddr,
ybyte: None,
}));
xaddr += size
}
Op::Yclip(size) => {
v.extend((yaddr..yaddr + size).map(|s| AlignElement {
xaddr,
xbyte: None,
yaddr: s,
ybyte: Some(y[s]), | yaddr += size
}
}
}
(v, xaddr, yaddr)
}
}
fn ops_pattern_subrange(mut ops: &[Op]) -> (&[Op], usize) {
let mut ret_addr = 0;
if let [Op::Yclip(addr), rest @ ..] = ops {
ops = rest;
ret_addr += addr;
}
while let [Op::Del, rest @ ..] = ops {
ops = rest;
ret_addr += 1;
}
while let [rest @ .., Op::Del | Op::Yclip(_)] = ops {
ops = rest;
}
(ops, ret_addr)
}
pub enum FlatAlignProgressMessage {
Incomplete(u16),
Complete(isize),
}
pub struct FlatAlignmentContext {
is_running: Arc<AtomicBool>,
vecs: [FileContent; 2],
update_progress: Box<dyn FnMut(FlatAlignProgressMessage) + Send + 'static>,
}
impl FlatAlignmentContext {
pub fn new(
is_running: Arc<AtomicBool>,
vecs: [FileContent; 2],
update_progress: Box<dyn FnMut(FlatAlignProgressMessage) + Send + 'static>,
) -> Self {
Self {
is_running,
vecs,
update_progress,
}
}
// this finds the alignment between two arrays *without* removing elements such that
// fewest bytes are different (for the compvec)
pub fn align_flat(mut self) {
// this algorithm works by, for each byte:
// * making an indicator vector for both files indicating the addresses that have the given byte
// * cross-correlating them, which results in the number of matches of that byte value for each relative offset
// and then adding them all together to get the total number of matching bytes
let mut progress = 0u16;
let current_byte = Arc::new(AtomicU16::new(0));
let mut fft_planner = RealFftPlanner::new();
let total_len = self.vecs.iter().map(|x| x.len()).max().unwrap() * 2;
// the cross correlation is done using the omnipresent fft algorithm
let fft_forward = fft_planner.plan_fft_forward(total_len);
let fft_inverse = fft_planner.plan_fft_inverse(total_len);
let mut sum = fft_forward.make_output_vec();
// this is easily parallelizable for up to 256 threads, for which we span a thread pool
let thread_num = available_parallelism().map(usize::from).unwrap_or(1);
let (send, recv) = std::sync::mpsc::sync_channel::<Vec<Complex64>>(4.max(thread_num));
for _ in 0..thread_num {
let vecs = [self.vecs[0].clone(), self.vecs[1].clone()];
let inbyte = current_byte.clone();
let outvecs = send.clone();
let fft = fft_forward.clone();
std::thread::spawn(move || correlation_thread(vecs, inbyte, outvecs, fft));
}
for vec in recv.into_iter().take(256) {
if !self.is_running.load(Ordering::Relaxed) {
return;
}
// add the vectors together in the frequency domain
for (a, b) in sum.iter_mut().zip(vec.into_iter()) {
*a += b;
}
progress += 1;
(self.update_progress)(FlatAlignProgressMessage::Incomplete(progress));
}
// get the actual result in the time domain
let mut result = fft_inverse.make_output_vec();
fft_inverse
.process(&mut sum, &mut result)
.expect("Wrong lengths");
drop(sum);
// positive offset of the array with the highest value of overlap
let offset = result
.iter()
.enumerate()
.max_by(|a, b| {
a.1.partial_cmp(b.1).unwrap_or_else(|| {
if a.1.is_nan() {
std::cmp::Ordering::Less
} else {
std::cmp::Ordering::Greater
}
})
})
.unwrap_or((0, &0.0))
.0;
drop(result);
// reverse direction of result array
let offset = total_len - offset - 1;
// get the relative offset between the two vectors with optimal overlap
let relative_offset = if offset >= total_len / 2 {
offset as isize - total_len as isize
} else {
offset as isize
};
(self.update_progress)(FlatAlignProgressMessage::Complete(relative_offset))
}
}
fn correlation_thread(
vecs: [FileContent; 2],
inbyte: Arc<AtomicU16>,
outvecs: SyncSender<Vec<Complex64>>,
fft: Arc<dyn RealToComplex<f64>>,
) {
let len = fft.len();
loop {
// check if the next value in queue is still below 256
let byte: u8 = match inbyte.fetch_add(1, Ordering::Relaxed).try_into() {
Ok(f) => f,
Err(_) => return,
};
// cross-correlation using ffts
let mut first_out = fft.make_output_vec();
let mut first = fft.make_input_vec();
// one of the vectors is reversed because we want correlation, not convolution
for (i, x) in vecs[0].iter().enumerate() {
if *x == byte {
first[len - i - 1] = 1.0;
}
}
fft.process(&mut first, &mut first_out)
.expect("Wrong fft vector lengths");
// these vectors can be large, so drop them as soon as possible
drop(first);
let mut second = fft.make_input_vec();
for (i, x) in vecs[1].iter().enumerate() {
if *x == byte {
second[i] = 1.0
}
}
let mut second_out = fft.make_output_vec();
fft.process(&mut second, &mut second_out)
.expect("Wrong fft vector lengths");
drop(second);
for (a, b) in first_out.iter_mut().zip(second_out.iter()) {
*a *= b;
}
drop(second_out);
// note: we do not correlate fully, since we can add all the samples together
// in the frequency domain, saving nearly 1/3 of the processing time
if outvecs.send(first_out).is_err() {
return;
}
}
} | })); | random_line_split |
align.rs | mod rustbio;
use std::{
ops::Range,
sync::{
atomic::{AtomicBool, AtomicU16, Ordering},
mpsc::{Sender, SyncSender},
Arc,
},
thread::available_parallelism,
};
use crate::{file::FileContent, view::AlignedMessage};
use bio::alignment::AlignmentOperation as Op;
use realfft::{num_complex::Complex64, RealFftPlanner, RealToComplex};
use serde::{Deserialize, Serialize};
use self::rustbio::{align_banded, RustBio};
pub const DEFAULT_BLOCKSIZE: usize = 8192;
pub const DEFAULT_KMER: usize = 8;
pub const DEFAULT_WINDOW: usize = 6;
/// An align mode, can be either Local for local alignment, global for global alignment,
/// or Blockwise with a given block size. The blockwise mode starts from a given position
/// and aligns only using `blocksize` bytes from each sequence in one direction, which
/// makes it works fast and local, but it doesn't see bigger gaps and everything after big gaps
/// tends to be unaligned.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum AlignMode {
Local,
Global,
Blockwise(usize),
}
#[derive(Clone, Copy, Debug)]
pub enum | {
Local,
Global,
Semiglobal,
}
impl From<AlignMode> for InternalMode {
fn from(value: AlignMode) -> Self {
match value {
AlignMode::Local => InternalMode::Local,
AlignMode::Global | AlignMode::Blockwise(_) => InternalMode::Global,
}
}
}
trait Align {
fn align(&self, algo: &AlignAlgorithm, mode: InternalMode, x: &[u8], y: &[u8]) -> Vec<Op>;
}
/// Determines whether to use the banded variant of the algorithm with given k-mer length
/// and window size
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Banded {
Normal,
Banded { kmer: usize, window: usize },
}
/// Contains parameters to run the alignment algorithm with
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct AlignAlgorithm {
pub gap_open: i32,
pub gap_extend: i32,
pub mismatch_score: i32,
pub match_score: i32,
pub mode: AlignMode,
pub band: Banded,
}
impl Default for AlignAlgorithm {
fn default() -> Self {
AlignAlgorithm {
gap_open: -5,
gap_extend: -1,
mismatch_score: -1,
match_score: 1,
mode: AlignMode::Blockwise(DEFAULT_BLOCKSIZE),
band: Banded::Normal,
}
}
}
impl AlignAlgorithm {
/// This function starts the threads for the alignment, which send the data over the sender.
/// It should then immediately return.
pub fn start_align(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
sender: Sender<AlignedMessage>,
) {
let algo = *self;
match self.mode {
AlignMode::Local => {
// we only need one thread
std::thread::spawn(move || algo.align_whole(x, y, InternalMode::Local, sender));
}
AlignMode::Global => {
std::thread::spawn(move || algo.align_whole(x, y, InternalMode::Global, sender));
}
AlignMode::Blockwise(blocksize) => {
// for Blockwise, we need one thread for each direction from the cursor
// Clone the data for the second thread here
let x_cp = x.clone();
let y_cp = y.clone();
let sender_cp = sender.clone();
std::thread::spawn(move || algo.align_end(x, y, addr, blocksize, sender));
std::thread::spawn(move || {
algo.align_front(x_cp, y_cp, addr, blocksize, sender_cp)
});
}
}
}
pub fn start_align_with_selection(
&self,
files: [FileContent; 2],
selection: [Option<Range<usize>>; 2],
addr: [usize; 2],
sender: Sender<AlignedMessage>,
) {
let (selected, right, end) = match selection.clone() {
[None, None] | [Some(_), Some(_)] => {
let [file0, file1] = files;
// if both or none are selected, just do the normal process
return self.start_align(file0, file1, (addr[0], addr[1]), sender);
}
[Some(x), None] | [None, Some(x)] => {
if x.is_empty() {
// selection is empty, does not really make sense to do glocal alignment
let [file0, file1] = files;
return self.start_align(file0, file1, (addr[0], addr[1]), sender);
}
let right = selection[1].is_some();
(
x.clone(),
selection[1].is_some(),
addr[right as usize] != x.start,
)
}
};
let algo = *self;
std::thread::spawn(move || {
algo.align_with_selection(files, (selected, right), end, sender)
});
}
fn align(&self, x: &[u8], y: &[u8], mode: InternalMode) -> Vec<Op> {
if x[..] == y[..] {
return vec![Op::Match; x.len()];
}
if self.band == Banded::Normal {
RustBio.align(self, mode, x, y)
} else {
align_banded(self, mode, x, y)
}
}
/// Aligns x to y as a whole
fn align_whole(
&self,
x: FileContent,
y: FileContent,
mode: InternalMode,
sender: Sender<AlignedMessage>,
) {
let alignment = self.align(&x, &y, mode);
let _ = sender.send(AlignedMessage::Append(
AlignElement::from_array(&alignment, &x, &y, 0, 0).0,
));
}
fn align_with_selection(
&self,
files: [FileContent; 2],
selection: (Range<usize>, bool),
end: bool,
sender: Sender<AlignedMessage>,
) {
let (select, right) = selection;
let full_pattern = &files[right as usize].clone();
let pattern = &files[right as usize].clone()[select.clone()];
let text = &files[(!right) as usize].clone()[..];
let alignment = self.align(pattern, text, InternalMode::Semiglobal);
let (alignment, textaddr) = ops_pattern_subrange(&alignment);
let (mut array, pattern_end, text_end) =
AlignElement::from_array(alignment, full_pattern, text, select.start, textaddr);
let (start_addr, end_addr) = if right {
array.iter_mut().for_each(|x| *x = x.mirror());
((textaddr, select.start), (text_end, pattern_end))
} else {
((select.start, textaddr), (pattern_end, text_end))
};
let (prepend, append) = if end {
let ap = array.pop().into_iter().collect();
(array, ap)
} else {
(Vec::new(), array)
};
if sender.send(AlignedMessage::Append(append)).is_err() {
return;
}
if sender.send(AlignedMessage::Prepend(prepend)).is_err() {
return;
}
let blocksize = if let AlignMode::Blockwise(s) = self.mode {
s
} else {
usize::MAX
};
let files2 = files.clone();
let sender2 = sender.clone();
let algo = *self;
std::thread::spawn(move || {
algo.align_end(
files2[0].clone(),
files2[1].clone(),
end_addr,
blocksize,
sender2,
);
});
self.align_front(
files[0].clone(),
files[1].clone(),
start_addr,
blocksize,
sender,
);
}
/// Blockwise alignment in the ascending address direction
pub fn align_end(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
// we want to have the beginning of our two arrays aligned at the same place
// since we start from a previous alignment or a cursor
while xaddr < x.len() && yaddr < y.len() {
// align at most block_size bytes from each sequence
let end_aligned = self.align(
&x[xaddr..(xaddr + block_size).min(x.len())],
&y[yaddr..(yaddr + block_size).min(y.len())],
self.mode.into(),
);
// we only actually append at most half of the block size since we make sure gaps crossing
// block boundaries are better detected
let ops = &end_aligned[0..end_aligned.len().min(block_size / 2)];
// we will not progress like this, so might as well quit
if ops.is_empty() {
break;
}
let (end, new_xaddr, new_yaddr) = AlignElement::from_array(ops, &x, &y, xaddr, yaddr);
if sender.send(AlignedMessage::Append(end)).is_err() {
return;
}
xaddr = new_xaddr;
yaddr = new_yaddr;
}
let clip = if x.len() == xaddr {
Op::Yclip(y.len() - yaddr)
} else if y.len() == yaddr {
Op::Xclip(x.len() - xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, xaddr, yaddr).0;
let _ = sender.send(AlignedMessage::Append(leftover));
}
/// Same as align_end, but in the other direction
pub fn align_front(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
while xaddr > 0 && yaddr > 0 {
let lower_xaddr = xaddr.saturating_sub(block_size);
let lower_yaddr = yaddr.saturating_sub(block_size);
let aligned = self.align(
&x[lower_xaddr..xaddr],
&y[lower_yaddr..yaddr],
self.mode.into(),
);
// unlike in align_end, we create the Alignelement from the whole array and then cut it
// in half. This is because the addresses returned from from_array are at the end, which
// we already know, so we instead take the start addresses from the array itself
let (end, _, _) = AlignElement::from_array(&aligned, &x, &y, lower_xaddr, lower_yaddr);
let real_end = Vec::from(&end[end.len().saturating_sub(block_size / 2)..end.len()]);
// if this is empty, we will not progress, so send the leftover out and quit after that
if real_end.is_empty() {
break;
}
let first = real_end.first().unwrap();
xaddr = first.xaddr;
yaddr = first.yaddr;
if sender.send(AlignedMessage::Prepend(real_end)).is_err() {
return;
}
}
let clip = if xaddr == 0 {
Op::Yclip(yaddr)
} else if yaddr == 0 {
Op::Xclip(xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, 0, 0).0;
let _ = sender.send(AlignedMessage::Prepend(leftover));
}
}
/// Representation of the alignment that saves the original addresses of the bytes.
/// This has some space overhead, but alignment is slow enough for that not to matter in most cases.
#[derive(Clone, Copy, Debug)]
pub struct AlignElement {
pub xaddr: usize,
pub xbyte: Option<u8>,
pub yaddr: usize,
pub ybyte: Option<u8>,
}
impl AlignElement {
/// mirrors the values
pub fn mirror(&self) -> AlignElement {
AlignElement {
xaddr: self.yaddr,
xbyte: self.ybyte,
yaddr: self.xaddr,
ybyte: self.xbyte,
}
}
/// Creates a vector out of `AlignElement`s from the operations outputted by rust-bio.
/// Also outputs the addresses at the end of the array.
fn from_array(
r: &[Op],
x: &[u8],
y: &[u8],
mut xaddr: usize,
mut yaddr: usize,
) -> (Vec<AlignElement>, usize, usize) {
let mut v = Vec::new();
for op in r {
match op {
Op::Match | Op::Subst => {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: Some(y[yaddr]),
});
xaddr += 1;
yaddr += 1;
}
Op::Ins => {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: None,
});
xaddr += 1;
}
Op::Del => {
v.push(AlignElement {
xaddr,
xbyte: None,
yaddr,
ybyte: Some(y[yaddr]),
});
yaddr += 1;
}
Op::Xclip(size) => {
v.extend((xaddr..xaddr + size).map(|s| AlignElement {
xaddr: s,
xbyte: Some(x[s]),
yaddr,
ybyte: None,
}));
xaddr += size
}
Op::Yclip(size) => {
v.extend((yaddr..yaddr + size).map(|s| AlignElement {
xaddr,
xbyte: None,
yaddr: s,
ybyte: Some(y[s]),
}));
yaddr += size
}
}
}
(v, xaddr, yaddr)
}
}
fn ops_pattern_subrange(mut ops: &[Op]) -> (&[Op], usize) {
let mut ret_addr = 0;
if let [Op::Yclip(addr), rest @ ..] = ops {
ops = rest;
ret_addr += addr;
}
while let [Op::Del, rest @ ..] = ops {
ops = rest;
ret_addr += 1;
}
while let [rest @ .., Op::Del | Op::Yclip(_)] = ops {
ops = rest;
}
(ops, ret_addr)
}
pub enum FlatAlignProgressMessage {
Incomplete(u16),
Complete(isize),
}
pub struct FlatAlignmentContext {
is_running: Arc<AtomicBool>,
vecs: [FileContent; 2],
update_progress: Box<dyn FnMut(FlatAlignProgressMessage) + Send + 'static>,
}
impl FlatAlignmentContext {
pub fn new(
is_running: Arc<AtomicBool>,
vecs: [FileContent; 2],
update_progress: Box<dyn FnMut(FlatAlignProgressMessage) + Send + 'static>,
) -> Self {
Self {
is_running,
vecs,
update_progress,
}
}
// this finds the alignment between two arrays *without* removing elements such that
// fewest bytes are different (for the compvec)
pub fn align_flat(mut self) {
// this algorithm works by, for each byte:
// * making an indicator vector for both files indicating the addresses that have the given byte
// * cross-correlating them, which results in the number of matches of that byte value for each relative offset
// and then adding them all together to get the total number of matching bytes
let mut progress = 0u16;
let current_byte = Arc::new(AtomicU16::new(0));
let mut fft_planner = RealFftPlanner::new();
let total_len = self.vecs.iter().map(|x| x.len()).max().unwrap() * 2;
// the cross correlation is done using the omnipresent fft algorithm
let fft_forward = fft_planner.plan_fft_forward(total_len);
let fft_inverse = fft_planner.plan_fft_inverse(total_len);
let mut sum = fft_forward.make_output_vec();
// this is easily parallelizable for up to 256 threads, for which we span a thread pool
let thread_num = available_parallelism().map(usize::from).unwrap_or(1);
let (send, recv) = std::sync::mpsc::sync_channel::<Vec<Complex64>>(4.max(thread_num));
for _ in 0..thread_num {
let vecs = [self.vecs[0].clone(), self.vecs[1].clone()];
let inbyte = current_byte.clone();
let outvecs = send.clone();
let fft = fft_forward.clone();
std::thread::spawn(move || correlation_thread(vecs, inbyte, outvecs, fft));
}
for vec in recv.into_iter().take(256) {
if !self.is_running.load(Ordering::Relaxed) {
return;
}
// add the vectors together in the frequency domain
for (a, b) in sum.iter_mut().zip(vec.into_iter()) {
*a += b;
}
progress += 1;
(self.update_progress)(FlatAlignProgressMessage::Incomplete(progress));
}
// get the actual result in the time domain
let mut result = fft_inverse.make_output_vec();
fft_inverse
.process(&mut sum, &mut result)
.expect("Wrong lengths");
drop(sum);
// positive offset of the array with the highest value of overlap
let offset = result
.iter()
.enumerate()
.max_by(|a, b| {
a.1.partial_cmp(b.1).unwrap_or_else(|| {
if a.1.is_nan() {
std::cmp::Ordering::Less
} else {
std::cmp::Ordering::Greater
}
})
})
.unwrap_or((0, &0.0))
.0;
drop(result);
// reverse direction of result array
let offset = total_len - offset - 1;
// get the relative offset between the two vectors with optimal overlap
let relative_offset = if offset >= total_len / 2 {
offset as isize - total_len as isize
} else {
offset as isize
};
(self.update_progress)(FlatAlignProgressMessage::Complete(relative_offset))
}
}
fn correlation_thread(
vecs: [FileContent; 2],
inbyte: Arc<AtomicU16>,
outvecs: SyncSender<Vec<Complex64>>,
fft: Arc<dyn RealToComplex<f64>>,
) {
let len = fft.len();
loop {
// check if the next value in queue is still below 256
let byte: u8 = match inbyte.fetch_add(1, Ordering::Relaxed).try_into() {
Ok(f) => f,
Err(_) => return,
};
// cross-correlation using ffts
let mut first_out = fft.make_output_vec();
let mut first = fft.make_input_vec();
// one of the vectors is reversed because we want correlation, not convolution
for (i, x) in vecs[0].iter().enumerate() {
if *x == byte {
first[len - i - 1] = 1.0;
}
}
fft.process(&mut first, &mut first_out)
.expect("Wrong fft vector lengths");
// these vectors can be large, so drop them as soon as possible
drop(first);
let mut second = fft.make_input_vec();
for (i, x) in vecs[1].iter().enumerate() {
if *x == byte {
second[i] = 1.0
}
}
let mut second_out = fft.make_output_vec();
fft.process(&mut second, &mut second_out)
.expect("Wrong fft vector lengths");
drop(second);
for (a, b) in first_out.iter_mut().zip(second_out.iter()) {
*a *= b;
}
drop(second_out);
// note: we do not correlate fully, since we can add all the samples together
// in the frequency domain, saving nearly 1/3 of the processing time
if outvecs.send(first_out).is_err() {
return;
}
}
}
| InternalMode | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.