file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
pageserver.rs | //
// Main entry point for the Page Server executable
//
use log::*;
use pageserver::defaults::*;
use serde::{Deserialize, Serialize};
use std::{
env,
net::TcpListener,
path::{Path, PathBuf},
str::FromStr,
thread,
};
use zenith_utils::{auth::JwtAuth, logging, postgres_backend::AuthType};
use anyhow::{bail, ensure, Context, Result};
use clap::{App, Arg, ArgMatches};
use daemonize::Daemonize;
use pageserver::{
branches,
defaults::{DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_PG_LISTEN_ADDR},
http, page_service, tenant_mgr, PageServerConf, RelishStorageConfig, S3Config, LOG_FILE_NAME,
};
use zenith_utils::http::endpoint;
use const_format::formatcp;
/// String arguments that can be declared via CLI or config file
#[derive(Serialize, Deserialize)]
struct CfgFileParams {
listen_pg_addr: Option<String>,
listen_http_addr: Option<String>,
checkpoint_distance: Option<String>,
checkpoint_period: Option<String>,
gc_horizon: Option<String>,
gc_period: Option<String>,
pg_distrib_dir: Option<String>,
auth_validation_public_key_path: Option<String>,
auth_type: Option<String>,
// see https://github.com/alexcrichton/toml-rs/blob/6c162e6562c3e432bf04c82a3d1d789d80761a86/examples/enum_external.rs for enum deserialisation examples
relish_storage: Option<RelishStorage>,
}
#[derive(Serialize, Deserialize, Clone)]
enum RelishStorage {
Local {
local_path: String,
},
AwsS3 {
bucket_name: String,
bucket_region: String,
#[serde(skip_serializing)]
access_key_id: Option<String>,
#[serde(skip_serializing)]
secret_access_key: Option<String>,
},
}
impl CfgFileParams {
/// Extract string arguments from CLI
fn from_args(arg_matches: &ArgMatches) -> Self {
let get_arg = |arg_name: &str| -> Option<String> {
arg_matches.value_of(arg_name).map(str::to_owned)
};
let relish_storage = if let Some(local_path) = get_arg("relish-storage-local-path") {
Some(RelishStorage::Local { local_path })
} else if let Some((bucket_name, bucket_region)) =
get_arg("relish-storage-s3-bucket").zip(get_arg("relish-storage-region"))
{
Some(RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id: get_arg("relish-storage-access-key"),
secret_access_key: get_arg("relish-storage-secret-access-key"),
})
} else {
None
};
Self {
listen_pg_addr: get_arg("listen-pg"),
listen_http_addr: get_arg("listen-http"),
checkpoint_distance: get_arg("checkpoint_distance"),
checkpoint_period: get_arg("checkpoint_period"),
gc_horizon: get_arg("gc_horizon"),
gc_period: get_arg("gc_period"),
pg_distrib_dir: get_arg("postgres-distrib"),
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
auth_type: get_arg("auth-type"),
relish_storage,
}
}
/// Fill missing values in `self` with `other`
fn or(self, other: CfgFileParams) -> Self {
// TODO cleaner way to do this
Self {
listen_pg_addr: self.listen_pg_addr.or(other.listen_pg_addr),
listen_http_addr: self.listen_http_addr.or(other.listen_http_addr),
checkpoint_distance: self.checkpoint_distance.or(other.checkpoint_distance),
checkpoint_period: self.checkpoint_period.or(other.checkpoint_period),
gc_horizon: self.gc_horizon.or(other.gc_horizon),
gc_period: self.gc_period.or(other.gc_period),
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
auth_validation_public_key_path: self
.auth_validation_public_key_path
.or(other.auth_validation_public_key_path),
auth_type: self.auth_type.or(other.auth_type),
relish_storage: self.relish_storage.or(other.relish_storage),
}
}
/// Create a PageServerConf from these string parameters
fn try_into_config(&self) -> Result<PageServerConf> {
let workdir = PathBuf::from(".");
let listen_pg_addr = match self.listen_pg_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_PG_LISTEN_ADDR.to_owned(),
};
let listen_http_addr = match self.listen_http_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_HTTP_LISTEN_ADDR.to_owned(),
};
let checkpoint_distance: u64 = match self.checkpoint_distance.as_ref() {
Some(checkpoint_distance_str) => checkpoint_distance_str.parse()?,
None => DEFAULT_CHECKPOINT_DISTANCE,
};
let checkpoint_period = match self.checkpoint_period.as_ref() {
Some(checkpoint_period_str) => humantime::parse_duration(checkpoint_period_str)?,
None => DEFAULT_CHECKPOINT_PERIOD,
};
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
Some(horizon_str) => horizon_str.parse()?,
None => DEFAULT_GC_HORIZON,
};
let gc_period = match self.gc_period.as_ref() {
Some(period_str) => humantime::parse_duration(period_str)?,
None => DEFAULT_GC_PERIOD,
};
let pg_distrib_dir = match self.pg_distrib_dir.as_ref() {
Some(pg_distrib_dir_str) => PathBuf::from(pg_distrib_dir_str),
None => env::current_dir()?.join("tmp_install"),
};
let auth_validation_public_key_path = self
.auth_validation_public_key_path
.as_ref()
.map(PathBuf::from);
let auth_type = self
.auth_type
.as_ref()
.map_or(Ok(AuthType::Trust), |auth_type| {
AuthType::from_str(auth_type)
})?;
if !pg_distrib_dir.join("bin/postgres").exists() {
bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
}
if auth_type == AuthType::ZenithJWT {
ensure!(
auth_validation_public_key_path.is_some(),
"Missing auth_validation_public_key_path when auth_type is ZenithJWT"
);
let path_ref = auth_validation_public_key_path.as_ref().unwrap();
ensure!(
path_ref.exists(),
format!("Can't find auth_validation_public_key at {:?}", path_ref)
);
}
let relish_storage_config =
self.relish_storage
.as_ref()
.map(|storage_params| match storage_params.clone() {
RelishStorage::Local { local_path } => {
RelishStorageConfig::LocalFs(PathBuf::from(local_path))
}
RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
} => RelishStorageConfig::AwsS3(S3Config {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
}),
});
Ok(PageServerConf {
daemonize: false,
listen_pg_addr,
listen_http_addr,
checkpoint_distance,
checkpoint_period,
gc_horizon,
gc_period,
superuser: String::from(DEFAULT_SUPERUSER),
workdir,
pg_distrib_dir,
auth_validation_public_key_path,
auth_type,
relish_storage_config,
})
}
}
fn main() -> Result<()> {
let arg_matches = App::new("Zenith page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.arg(
Arg::with_name("listen-pg")
.short("l")
.long("listen-pg")
.alias("listen") // keep some compatibility
.takes_value(true)
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("listen-http")
.long("listen-http")
.alias("http_endpoint") // keep some compatibility
.takes_value(true)
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::with_name("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
)
.arg(
Arg::with_name("checkpoint_distance")
.long("checkpoint_distance")
.takes_value(true)
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
)
.arg(
Arg::with_name("checkpoint_period")
.long("checkpoint_period")
.takes_value(true)
.help("Interval between checkpoint iterations"),
)
.arg(
Arg::with_name("gc_horizon")
.long("gc_horizon")
.takes_value(true)
.help("Distance from current LSN to perform all wal records cleanup"),
)
.arg(
Arg::with_name("gc_period")
.long("gc_period")
.takes_value(true)
.help("Interval between garbage collector iterations"),
)
.arg(
Arg::with_name("workdir")
.short("D")
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
.arg(
Arg::with_name("postgres-distrib")
.long("postgres-distrib")
.takes_value(true)
.help("Postgres distribution directory"),
)
.arg(
Arg::with_name("create-tenant")
.long("create-tenant")
.takes_value(true)
.help("Create tenant during init")
.requires("init"),
)
.arg(
Arg::with_name("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to public key used to validate jwt signature"),
)
.arg(
Arg::with_name("auth-type")
.long("auth-type")
.takes_value(true)
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
)
.arg(
Arg::with_name("relish-storage-local-path")
.long("relish-storage-local-path")
.takes_value(true)
.help("Path to the local directory, to be used as an external relish storage")
.conflicts_with_all(&[
"relish-storage-s3-bucket",
"relish-storage-region",
"relish-storage-access-key",
"relish-storage-secret-access-key",
]),
)
.arg(
Arg::with_name("relish-storage-s3-bucket")
.long("relish-storage-s3-bucket")
.takes_value(true)
.help("Name of the AWS S3 bucket to use an external relish storage")
.requires("relish-storage-region"),
)
.arg(
Arg::with_name("relish-storage-region")
.long("relish-storage-region")
.takes_value(true)
.help("Region of the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-access-key")
.long("relish-storage-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-secret-access-key")
.long("relish-storage-secret-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.get_matches();
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith"));
let cfg_file_path = workdir
.canonicalize()
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?
.join("pageserver.toml");
let args_params = CfgFileParams::from_args(&arg_matches);
let init = arg_matches.is_present("init");
let create_tenant = arg_matches.value_of("create-tenant");
let params = if init {
// We're initializing the repo, so there's no config file yet
args_params
} else {
// Supplement the CLI arguments with the config file
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)
.with_context(|| format!("No pageserver config at '{}'", cfg_file_path.display()))?;
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents).with_context(|| {
format!(
"Failed to read '{}' as pageserver config",
cfg_file_path.display()
)
})?;
args_params.or(file_params)
};
// Set CWD to workdir for non-daemon modes
env::set_current_dir(&workdir).with_context(|| {
format!(
"Failed to set application's current dir to '{}'",
workdir.display()
)
})?;
// Ensure the config is valid, even if just init-ing
let mut conf = params.try_into_config().with_context(|| {
format!(
"Pageserver config at '{}' is not valid",
cfg_file_path.display()
)
})?;
conf.daemonize = arg_matches.is_present("daemonize");
if init && conf.daemonize {
bail!("--daemonize cannot be used with --init")
}
// The configuration is all set up now. Turn it into a 'static
// that can be freely stored in structs and passed across threads
// as a ref.
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
// Create repo and exit if init was requested
if init {
branches::init_pageserver(conf, create_tenant).context("Failed to init pageserver")?;
// write the config file
let cfg_file_contents = toml::to_string_pretty(¶ms)
.context("Failed to create pageserver config contents for initialisation")?;
// TODO support enable-auth flag
std::fs::write(&cfg_file_path, cfg_file_contents).with_context(|| {
format!(
"Failed to initialize pageserver config at '{}'",
cfg_file_path.display()
)
})?;
Ok(())
} else {
start_pageserver(conf).context("Failed to start pageserver")
}
}
fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
// Initialize logger
let (_scope_guard, log_file) = logging::init(LOG_FILE_NAME, conf.daemonize)?;
// TODO: Check that it looks like a valid repository before going further
// bind sockets before daemonizing so we report errors early and do not return until we are listening
info!(
"Starting pageserver http handler on {}",
conf.listen_http_addr
);
let http_listener = TcpListener::bind(conf.listen_http_addr.clone())?;
info!(
"Starting pageserver pg protocol handler on {}",
conf.listen_pg_addr
);
let pageserver_listener = TcpListener::bind(conf.listen_pg_addr.clone())?;
if conf.daemonize |
// Initialize tenant manager.
tenant_mgr::init(conf);
// keep join handles for spawned threads
let mut join_handles = vec![];
// initialize authentication for incoming connections
let auth = match &conf.auth_type {
AuthType::Trust | AuthType::MD5 => None,
AuthType::ZenithJWT => {
// unwrap is ok because check is performed when creating config, so path is set and file exists
let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
Some(JwtAuth::from_key_path(key_path)?.into())
}
};
info!("Using auth: {:#?}", conf.auth_type);
// Spawn a new thread for the http endpoint
// bind before launching separate thread so the error reported before startup exits
let cloned = auth.clone();
let http_endpoint_thread = thread::Builder::new()
.name("http_endpoint_thread".into())
.spawn(move || {
let router = http::make_router(conf, cloned);
endpoint::serve_thread_main(router, http_listener)
})?;
join_handles.push(http_endpoint_thread);
// Spawn a thread to listen for connections. It will spawn further threads
// for each connection.
let page_service_thread = thread::Builder::new()
.name("Page Service thread".into())
.spawn(move || {
page_service::thread_main(conf, auth, pageserver_listener, conf.auth_type)
})?;
join_handles.push(page_service_thread);
for handle in join_handles.into_iter() {
handle
.join()
.expect("thread panicked")
.expect("thread exited with an error")
}
Ok(())
}
| {
info!("daemonizing...");
// There shouldn't be any logging to stdin/stdout. Redirect it to the main log so
// that we will see any accidental manual fprintf's or backtraces.
let stdout = log_file.try_clone().unwrap();
let stderr = log_file;
let daemonize = Daemonize::new()
.pid_file("pageserver.pid")
.working_directory(".")
.stdout(stdout)
.stderr(stderr);
match daemonize.start() {
Ok(_) => info!("Success, daemonized"),
Err(e) => error!("Error, {}", e),
}
} | conditional_block |
pageserver.rs | //
// Main entry point for the Page Server executable
//
use log::*;
use pageserver::defaults::*;
use serde::{Deserialize, Serialize};
use std::{
env,
net::TcpListener,
path::{Path, PathBuf},
str::FromStr,
thread,
};
use zenith_utils::{auth::JwtAuth, logging, postgres_backend::AuthType};
use anyhow::{bail, ensure, Context, Result};
use clap::{App, Arg, ArgMatches};
use daemonize::Daemonize;
use pageserver::{
branches,
defaults::{DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_PG_LISTEN_ADDR},
http, page_service, tenant_mgr, PageServerConf, RelishStorageConfig, S3Config, LOG_FILE_NAME,
};
use zenith_utils::http::endpoint;
use const_format::formatcp;
/// String arguments that can be declared via CLI or config file
#[derive(Serialize, Deserialize)]
struct CfgFileParams {
listen_pg_addr: Option<String>,
listen_http_addr: Option<String>,
checkpoint_distance: Option<String>,
checkpoint_period: Option<String>,
gc_horizon: Option<String>,
gc_period: Option<String>,
pg_distrib_dir: Option<String>,
auth_validation_public_key_path: Option<String>,
auth_type: Option<String>,
// see https://github.com/alexcrichton/toml-rs/blob/6c162e6562c3e432bf04c82a3d1d789d80761a86/examples/enum_external.rs for enum deserialisation examples
relish_storage: Option<RelishStorage>,
}
#[derive(Serialize, Deserialize, Clone)]
enum RelishStorage {
Local {
local_path: String,
},
AwsS3 {
bucket_name: String,
bucket_region: String,
#[serde(skip_serializing)]
access_key_id: Option<String>,
#[serde(skip_serializing)]
secret_access_key: Option<String>,
},
}
impl CfgFileParams {
/// Extract string arguments from CLI
fn from_args(arg_matches: &ArgMatches) -> Self {
let get_arg = |arg_name: &str| -> Option<String> {
arg_matches.value_of(arg_name).map(str::to_owned)
};
let relish_storage = if let Some(local_path) = get_arg("relish-storage-local-path") {
Some(RelishStorage::Local { local_path })
} else if let Some((bucket_name, bucket_region)) =
get_arg("relish-storage-s3-bucket").zip(get_arg("relish-storage-region"))
{
Some(RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id: get_arg("relish-storage-access-key"),
secret_access_key: get_arg("relish-storage-secret-access-key"),
})
} else {
None
};
Self {
listen_pg_addr: get_arg("listen-pg"),
listen_http_addr: get_arg("listen-http"),
checkpoint_distance: get_arg("checkpoint_distance"),
checkpoint_period: get_arg("checkpoint_period"),
gc_horizon: get_arg("gc_horizon"),
gc_period: get_arg("gc_period"),
pg_distrib_dir: get_arg("postgres-distrib"),
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
auth_type: get_arg("auth-type"),
relish_storage,
}
}
/// Fill missing values in `self` with `other`
fn or(self, other: CfgFileParams) -> Self {
// TODO cleaner way to do this
Self {
listen_pg_addr: self.listen_pg_addr.or(other.listen_pg_addr),
listen_http_addr: self.listen_http_addr.or(other.listen_http_addr),
checkpoint_distance: self.checkpoint_distance.or(other.checkpoint_distance),
checkpoint_period: self.checkpoint_period.or(other.checkpoint_period),
gc_horizon: self.gc_horizon.or(other.gc_horizon),
gc_period: self.gc_period.or(other.gc_period),
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
auth_validation_public_key_path: self
.auth_validation_public_key_path
.or(other.auth_validation_public_key_path),
auth_type: self.auth_type.or(other.auth_type),
relish_storage: self.relish_storage.or(other.relish_storage),
}
}
/// Create a PageServerConf from these string parameters
fn try_into_config(&self) -> Result<PageServerConf> {
let workdir = PathBuf::from(".");
let listen_pg_addr = match self.listen_pg_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_PG_LISTEN_ADDR.to_owned(),
};
let listen_http_addr = match self.listen_http_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_HTTP_LISTEN_ADDR.to_owned(),
};
let checkpoint_distance: u64 = match self.checkpoint_distance.as_ref() {
Some(checkpoint_distance_str) => checkpoint_distance_str.parse()?,
None => DEFAULT_CHECKPOINT_DISTANCE,
};
let checkpoint_period = match self.checkpoint_period.as_ref() {
Some(checkpoint_period_str) => humantime::parse_duration(checkpoint_period_str)?,
None => DEFAULT_CHECKPOINT_PERIOD,
};
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
Some(horizon_str) => horizon_str.parse()?,
None => DEFAULT_GC_HORIZON,
};
let gc_period = match self.gc_period.as_ref() {
Some(period_str) => humantime::parse_duration(period_str)?,
None => DEFAULT_GC_PERIOD,
};
let pg_distrib_dir = match self.pg_distrib_dir.as_ref() {
Some(pg_distrib_dir_str) => PathBuf::from(pg_distrib_dir_str),
None => env::current_dir()?.join("tmp_install"),
};
let auth_validation_public_key_path = self
.auth_validation_public_key_path
.as_ref()
.map(PathBuf::from);
let auth_type = self
.auth_type
.as_ref()
.map_or(Ok(AuthType::Trust), |auth_type| {
AuthType::from_str(auth_type)
})?;
if !pg_distrib_dir.join("bin/postgres").exists() {
bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
}
if auth_type == AuthType::ZenithJWT {
ensure!(
auth_validation_public_key_path.is_some(),
"Missing auth_validation_public_key_path when auth_type is ZenithJWT"
);
let path_ref = auth_validation_public_key_path.as_ref().unwrap();
ensure!(
path_ref.exists(),
format!("Can't find auth_validation_public_key at {:?}", path_ref)
);
}
let relish_storage_config =
self.relish_storage
.as_ref()
.map(|storage_params| match storage_params.clone() {
RelishStorage::Local { local_path } => {
RelishStorageConfig::LocalFs(PathBuf::from(local_path))
}
RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
} => RelishStorageConfig::AwsS3(S3Config {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
}),
});
Ok(PageServerConf {
daemonize: false,
listen_pg_addr,
listen_http_addr,
checkpoint_distance,
checkpoint_period,
gc_horizon,
gc_period,
superuser: String::from(DEFAULT_SUPERUSER),
workdir,
pg_distrib_dir, |
auth_validation_public_key_path,
auth_type,
relish_storage_config,
})
}
}
fn main() -> Result<()> {
let arg_matches = App::new("Zenith page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.arg(
Arg::with_name("listen-pg")
.short("l")
.long("listen-pg")
.alias("listen") // keep some compatibility
.takes_value(true)
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("listen-http")
.long("listen-http")
.alias("http_endpoint") // keep some compatibility
.takes_value(true)
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::with_name("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
)
.arg(
Arg::with_name("checkpoint_distance")
.long("checkpoint_distance")
.takes_value(true)
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
)
.arg(
Arg::with_name("checkpoint_period")
.long("checkpoint_period")
.takes_value(true)
.help("Interval between checkpoint iterations"),
)
.arg(
Arg::with_name("gc_horizon")
.long("gc_horizon")
.takes_value(true)
.help("Distance from current LSN to perform all wal records cleanup"),
)
.arg(
Arg::with_name("gc_period")
.long("gc_period")
.takes_value(true)
.help("Interval between garbage collector iterations"),
)
.arg(
Arg::with_name("workdir")
.short("D")
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
.arg(
Arg::with_name("postgres-distrib")
.long("postgres-distrib")
.takes_value(true)
.help("Postgres distribution directory"),
)
.arg(
Arg::with_name("create-tenant")
.long("create-tenant")
.takes_value(true)
.help("Create tenant during init")
.requires("init"),
)
.arg(
Arg::with_name("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to public key used to validate jwt signature"),
)
.arg(
Arg::with_name("auth-type")
.long("auth-type")
.takes_value(true)
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
)
.arg(
Arg::with_name("relish-storage-local-path")
.long("relish-storage-local-path")
.takes_value(true)
.help("Path to the local directory, to be used as an external relish storage")
.conflicts_with_all(&[
"relish-storage-s3-bucket",
"relish-storage-region",
"relish-storage-access-key",
"relish-storage-secret-access-key",
]),
)
.arg(
Arg::with_name("relish-storage-s3-bucket")
.long("relish-storage-s3-bucket")
.takes_value(true)
.help("Name of the AWS S3 bucket to use an external relish storage")
.requires("relish-storage-region"),
)
.arg(
Arg::with_name("relish-storage-region")
.long("relish-storage-region")
.takes_value(true)
.help("Region of the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-access-key")
.long("relish-storage-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-secret-access-key")
.long("relish-storage-secret-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.get_matches();
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith"));
let cfg_file_path = workdir
.canonicalize()
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?
.join("pageserver.toml");
let args_params = CfgFileParams::from_args(&arg_matches);
let init = arg_matches.is_present("init");
let create_tenant = arg_matches.value_of("create-tenant");
let params = if init {
// We're initializing the repo, so there's no config file yet
args_params
} else {
// Supplement the CLI arguments with the config file
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)
.with_context(|| format!("No pageserver config at '{}'", cfg_file_path.display()))?;
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents).with_context(|| {
format!(
"Failed to read '{}' as pageserver config",
cfg_file_path.display()
)
})?;
args_params.or(file_params)
};
// Set CWD to workdir for non-daemon modes
env::set_current_dir(&workdir).with_context(|| {
format!(
"Failed to set application's current dir to '{}'",
workdir.display()
)
})?;
// Ensure the config is valid, even if just init-ing
let mut conf = params.try_into_config().with_context(|| {
format!(
"Pageserver config at '{}' is not valid",
cfg_file_path.display()
)
})?;
conf.daemonize = arg_matches.is_present("daemonize");
if init && conf.daemonize {
bail!("--daemonize cannot be used with --init")
}
// The configuration is all set up now. Turn it into a 'static
// that can be freely stored in structs and passed across threads
// as a ref.
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
// Create repo and exit if init was requested
if init {
branches::init_pageserver(conf, create_tenant).context("Failed to init pageserver")?;
// write the config file
let cfg_file_contents = toml::to_string_pretty(¶ms)
.context("Failed to create pageserver config contents for initialisation")?;
// TODO support enable-auth flag
std::fs::write(&cfg_file_path, cfg_file_contents).with_context(|| {
format!(
"Failed to initialize pageserver config at '{}'",
cfg_file_path.display()
)
})?;
Ok(())
} else {
start_pageserver(conf).context("Failed to start pageserver")
}
}
fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
// Initialize logger
let (_scope_guard, log_file) = logging::init(LOG_FILE_NAME, conf.daemonize)?;
// TODO: Check that it looks like a valid repository before going further
// bind sockets before daemonizing so we report errors early and do not return until we are listening
info!(
"Starting pageserver http handler on {}",
conf.listen_http_addr
);
let http_listener = TcpListener::bind(conf.listen_http_addr.clone())?;
info!(
"Starting pageserver pg protocol handler on {}",
conf.listen_pg_addr
);
let pageserver_listener = TcpListener::bind(conf.listen_pg_addr.clone())?;
if conf.daemonize {
info!("daemonizing...");
// There shouldn't be any logging to stdin/stdout. Redirect it to the main log so
// that we will see any accidental manual fprintf's or backtraces.
let stdout = log_file.try_clone().unwrap();
let stderr = log_file;
let daemonize = Daemonize::new()
.pid_file("pageserver.pid")
.working_directory(".")
.stdout(stdout)
.stderr(stderr);
match daemonize.start() {
Ok(_) => info!("Success, daemonized"),
Err(e) => error!("Error, {}", e),
}
}
// Initialize tenant manager.
tenant_mgr::init(conf);
// keep join handles for spawned threads
let mut join_handles = vec![];
// initialize authentication for incoming connections
let auth = match &conf.auth_type {
AuthType::Trust | AuthType::MD5 => None,
AuthType::ZenithJWT => {
// unwrap is ok because check is performed when creating config, so path is set and file exists
let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
Some(JwtAuth::from_key_path(key_path)?.into())
}
};
info!("Using auth: {:#?}", conf.auth_type);
// Spawn a new thread for the http endpoint
// bind before launching separate thread so the error reported before startup exits
let cloned = auth.clone();
let http_endpoint_thread = thread::Builder::new()
.name("http_endpoint_thread".into())
.spawn(move || {
let router = http::make_router(conf, cloned);
endpoint::serve_thread_main(router, http_listener)
})?;
join_handles.push(http_endpoint_thread);
// Spawn a thread to listen for connections. It will spawn further threads
// for each connection.
let page_service_thread = thread::Builder::new()
.name("Page Service thread".into())
.spawn(move || {
page_service::thread_main(conf, auth, pageserver_listener, conf.auth_type)
})?;
join_handles.push(page_service_thread);
for handle in join_handles.into_iter() {
handle
.join()
.expect("thread panicked")
.expect("thread exited with an error")
}
Ok(())
} | random_line_split | |
executor.rs | //! Functions for setting configuration and executing the generator.
use cpp_to_rust_generator::common::errors::Result;
use cpp_to_rust_generator::common::{log, toml};
use cpp_to_rust_generator::common::file_utils::{PathBufWithAdded, repo_crate_local_path};
use cpp_to_rust_generator::config::{Config, CacheUsage, DebugLoggingConfig, exec};
use cpp_to_rust_generator::cpp_data::CppVisibility;
use cpp_to_rust_generator::common::cpp_build_config::{CppBuildConfigData, CppLibraryType};
use cpp_to_rust_generator::common::target;
use qt_generator_common::{get_installation_data, lib_folder_name, lib_dependencies};
use std::path::PathBuf;
use versions;
use doc_parser::DocParser;
use fix_header_names::fix_header_names;
use cpp_to_rust_generator::cpp_method::CppMethod;
use cpp_to_rust_generator::cpp_data::CppTypeKind;
use cpp_to_rust_generator::config::{CrateProperties, is_completed};
use doc_decoder::DocData;
use lib_configs;
/// Options passed to `exec_all`,
/// as in `cpp_to_rust_generator::config::Config`.
pub struct ExecConfig {
pub write_dependencies_local_paths: bool,
pub cache_usage: CacheUsage,
pub write_cache: bool,
pub debug_logging_config: DebugLoggingConfig,
pub quiet_mode: bool,
}
/// Executes generator for `libs` with given configuration.
pub fn exec_all(libs: Vec<String>,
cache_dir: PathBuf,
output_dir: PathBuf,
config: ExecConfig)
-> Result<()> {
if config.quiet_mode {
let mut logger = log::default_logger();
logger.set_category_settings(log::Status,
log::LoggerSettings {
file_path: None,
write_to_stderr: false,
});
}
let crate_templates_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).with_added("crate_templates");
let final_libs = if libs.iter().any(|x| x == "all") {
vec!["core".to_string(),
"gui".to_string(),
"widgets".to_string(),
"ui_tools".to_string(),
"3d_core".to_string(),
"3d_render".to_string(),
"3d_input".to_string(),
"3d_logic".to_string(),
"3d_extras".to_string()]
} else {
libs
};
let mut configs: Vec<Config> = Vec::new();
for sublib_name in final_libs {
let lib_cache_dir = cache_dir.with_added(format!("qt_{}", sublib_name));
let lib_crate_templates_path = crate_templates_path.with_added(&sublib_name);
let lib_output_dir = output_dir.with_added(format!("qt_{}", sublib_name));
let mut dependency_paths = Vec::new();
for dep in lib_dependencies(&sublib_name)? {
let path = cache_dir.with_added(format!("qt_{}", dep));
if !configs.iter().any(|c| c.cache_dir_path() == &path) && !is_completed(&path) {
return Err(format!("\"{}\" depends on \"{}\" but processing \
in \"{}\" directory is not completed.",
sublib_name,
dep,
path.display())
.into());
}
dependency_paths.push(path);
}
if is_completed(&lib_cache_dir) && config.cache_usage.can_skip_all() {
log::status("No processing! cpp_to_rust uses previous results.");
log::status("Run with -C0 to force full processing.");
continue;
}
configs.push(make_config(&sublib_name,
lib_cache_dir,
lib_output_dir,
lib_crate_templates_path,
dependency_paths,
&config)?);
}
exec(configs.into_iter())?;
Ok(())
}
/// Executes the generator for a single Qt module with given configuration.
fn make_config(sublib_name: &str,
cache_dir: PathBuf,
output_dir: PathBuf,
crate_templates_path: PathBuf,
dependency_paths: Vec<PathBuf>,
exec_config: &ExecConfig)
-> Result<Config> {
log::status(format!("Preparing generator config for library: {}", sublib_name));
let crate_name = format!("qt_{}", sublib_name);
let mut crate_properties = CrateProperties::new(crate_name.clone(),
versions::QT_OUTPUT_CRATES_VERSION);
let mut custom_fields = toml::value::Table::new();
let mut package_data = toml::value::Table::new();
package_data.insert("authors".to_string(),
toml::Value::Array(vec![toml::Value::String("Pavel Strakhov <ri@idzaaus.org>"
.to_string())]));
let description = format!("Bindings for {} C++ library (generated automatically with cpp_to_rust project)",
lib_folder_name(sublib_name));
package_data.insert("description".to_string(), toml::Value::String(description));
let doc_url = format!("https://rust-qt.github.io/rustdoc/qt/{}", &crate_name);
package_data.insert("documentation".to_string(), toml::Value::String(doc_url));
package_data.insert("repository".to_string(),
toml::Value::String("https://github.com/rust-qt/cpp_to_rust".to_string()));
package_data.insert("license".to_string(),
toml::Value::String("MIT".to_string()));
custom_fields.insert("package".to_string(), toml::Value::Table(package_data));
crate_properties.set_custom_fields(custom_fields);
crate_properties.remove_default_build_dependencies();
let qt_build_tools_path = if exec_config.write_dependencies_local_paths {
Some(repo_crate_local_path("qt_generator/qt_build_tools")?)
} else {
None
};
crate_properties.add_build_dependency("qt_build_tools",
versions::QT_BUILD_TOOLS_VERSION,
qt_build_tools_path);
let mut config = Config::new(&output_dir, &cache_dir, crate_properties);
let installation_data = get_installation_data(sublib_name)?;
config.add_include_path(&installation_data.root_include_path);
config.add_include_path(&installation_data.lib_include_path);
for dep in lib_dependencies(&sublib_name)? {
let dep_data = get_installation_data(dep)?;
config.add_include_path(&dep_data.lib_include_path);
}
config.add_target_include_path(&installation_data.lib_include_path);
config.set_cache_usage(exec_config.cache_usage.clone());
config.set_write_dependencies_local_paths(exec_config.write_dependencies_local_paths);
config.set_write_cache(exec_config.write_cache);
config.set_quiet_mode(exec_config.quiet_mode);
config.set_debug_logging_config(exec_config.debug_logging_config.clone());
config.set_cpp_lib_version(installation_data.qt_version.as_str());
if exec_config.write_dependencies_local_paths {
log::status("Output Cargo.toml file will contain local paths of used dependencies \
(use --no-local-paths to disable).");
} else {
log::status("Local paths will not be written to the output crate. Make sure all dependencies \
are published before trying to compile the crate.");
}
// TODO: does parsing work on MacOS without adding "-F"?
config.add_include_directive(&lib_folder_name(sublib_name));
let lib_include_path = installation_data.lib_include_path.clone();
config.add_cpp_data_filter(move |cpp_data| fix_header_names(cpp_data, &lib_include_path));
config.add_cpp_parser_arguments(vec!["-fPIC", "-fcxx-exceptions"]);
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-std=gnu++11");
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-fPIC");
// msvc and mingw don't need this
config
.cpp_build_config_mut()
.add(target::Condition::OS(target::OS::Windows).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.set_library_type(CppLibraryType::Shared);
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc), data);
}
if target::current_env() == target::Env::Msvc {
config.add_cpp_parser_argument("-std=c++14");
} else {
config.add_cpp_parser_argument("-std=gnu++11");
}
config.add_cpp_parser_blocked_name("qt_check_for_QGADGET_macro");
let sublib_name_clone = sublib_name.to_string();
let docs_path = installation_data.docs_path.clone();
config.add_cpp_data_filter(move |cpp_data| {
match DocData::new(&sublib_name_clone, &docs_path) {
Ok(doc_data) => {
let mut parser = DocParser::new(doc_data);
find_methods_docs(&mut cpp_data.methods, &mut parser)?; | type1.doc = Some(doc.0);
if let CppTypeKind::Enum { ref mut values } = type1.kind {
let enum_namespace = if let Some(index) = type1.name.rfind("::") {
type1.name[0..index + 2].to_string()
} else {
String::new()
};
for value in values {
if let Some(r) = doc.1.iter().find(|x| x.name == value.name) {
value.doc = Some(r.html.clone());
// let full_name = format!("{}::{}", enum_namespace, &value.name);
// println!("full name: {}", full_name);
parser.mark_enum_variant_used(&format!("{}{}", enum_namespace, &value.name));
} else {
let type_name = &type1.name;
log::llog(log::DebugQtDoc, || {
format!("Not found doc for enum variant: {}::{}",
type_name,
&value.name)
});
}
}
}
}
Err(err) => {
log::llog(log::DebugQtDoc,
|| format!("Not found doc for type: {}: {}", type1.name, err));
}
}
}
parser.report_unused_anchors();
}
Err(err) => {
log::error(format!("Failed to get Qt documentation: {}", err));
err.discard_expected();
}
}
Ok(())
});
config.set_crate_template_path(crate_templates_path);
match sublib_name {
"core" => lib_configs::core(&mut config)?,
"gui" => lib_configs::gui(&mut config)?,
"widgets" => lib_configs::widgets(&mut config)?,
"3d_core" => lib_configs::core_3d(&mut config)?,
"3d_render" => lib_configs::render_3d(&mut config)?,
"3d_input" => lib_configs::input_3d(&mut config)?,
"3d_logic" => lib_configs::logic_3d(&mut config)?,
"3d_extras" => lib_configs::extras_3d(&mut config)?,
"ui_tools" => {}
_ => return Err(format!("Unknown lib name: {}", sublib_name).into()),
}
config.set_dependency_cache_paths(dependency_paths);
Ok(config)
}
/// Adds documentation from `data` to `cpp_methods`.
fn find_methods_docs(cpp_methods: &mut [CppMethod], data: &mut DocParser) -> Result<()> {
for cpp_method in cpp_methods {
if let Some(ref info) = cpp_method.class_membership {
if info.visibility == CppVisibility::Private {
continue;
}
}
if let Some(ref declaration_code) = cpp_method.declaration_code {
match data.doc_for_method(&cpp_method.doc_id(),
declaration_code,
&cpp_method.short_text()) {
Ok(doc) => cpp_method.doc = Some(doc),
Err(msg) => {
if cpp_method.class_membership.is_some() &&
(&cpp_method.name == "tr" || &cpp_method.name == "trUtf8" ||
&cpp_method.name == "metaObject") {
// no error message
} else {
log::llog(log::DebugQtDoc, || {
format!("Failed to get documentation for method: {}: {}",
&cpp_method.short_text(),
msg)
});
}
}
}
}
}
Ok(())
} | for type1 in &mut cpp_data.types {
match parser.doc_for_type(&type1.name) {
Ok(doc) => {
// log::debug(format!("Found doc for type: {}", type1.name)); | random_line_split |
executor.rs | //! Functions for setting configuration and executing the generator.
use cpp_to_rust_generator::common::errors::Result;
use cpp_to_rust_generator::common::{log, toml};
use cpp_to_rust_generator::common::file_utils::{PathBufWithAdded, repo_crate_local_path};
use cpp_to_rust_generator::config::{Config, CacheUsage, DebugLoggingConfig, exec};
use cpp_to_rust_generator::cpp_data::CppVisibility;
use cpp_to_rust_generator::common::cpp_build_config::{CppBuildConfigData, CppLibraryType};
use cpp_to_rust_generator::common::target;
use qt_generator_common::{get_installation_data, lib_folder_name, lib_dependencies};
use std::path::PathBuf;
use versions;
use doc_parser::DocParser;
use fix_header_names::fix_header_names;
use cpp_to_rust_generator::cpp_method::CppMethod;
use cpp_to_rust_generator::cpp_data::CppTypeKind;
use cpp_to_rust_generator::config::{CrateProperties, is_completed};
use doc_decoder::DocData;
use lib_configs;
/// Options passed to `exec_all`,
/// as in `cpp_to_rust_generator::config::Config`.
pub struct ExecConfig {
pub write_dependencies_local_paths: bool,
pub cache_usage: CacheUsage,
pub write_cache: bool,
pub debug_logging_config: DebugLoggingConfig,
pub quiet_mode: bool,
}
/// Executes generator for `libs` with given configuration.
pub fn exec_all(libs: Vec<String>,
cache_dir: PathBuf,
output_dir: PathBuf,
config: ExecConfig)
-> Result<()> {
if config.quiet_mode {
let mut logger = log::default_logger();
logger.set_category_settings(log::Status,
log::LoggerSettings {
file_path: None,
write_to_stderr: false,
});
}
let crate_templates_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).with_added("crate_templates");
let final_libs = if libs.iter().any(|x| x == "all") {
vec!["core".to_string(),
"gui".to_string(),
"widgets".to_string(),
"ui_tools".to_string(),
"3d_core".to_string(),
"3d_render".to_string(),
"3d_input".to_string(),
"3d_logic".to_string(),
"3d_extras".to_string()]
} else {
libs
};
let mut configs: Vec<Config> = Vec::new();
for sublib_name in final_libs {
let lib_cache_dir = cache_dir.with_added(format!("qt_{}", sublib_name));
let lib_crate_templates_path = crate_templates_path.with_added(&sublib_name);
let lib_output_dir = output_dir.with_added(format!("qt_{}", sublib_name));
let mut dependency_paths = Vec::new();
for dep in lib_dependencies(&sublib_name)? {
let path = cache_dir.with_added(format!("qt_{}", dep));
if !configs.iter().any(|c| c.cache_dir_path() == &path) && !is_completed(&path) {
return Err(format!("\"{}\" depends on \"{}\" but processing \
in \"{}\" directory is not completed.",
sublib_name,
dep,
path.display())
.into());
}
dependency_paths.push(path);
}
if is_completed(&lib_cache_dir) && config.cache_usage.can_skip_all() {
log::status("No processing! cpp_to_rust uses previous results.");
log::status("Run with -C0 to force full processing.");
continue;
}
configs.push(make_config(&sublib_name,
lib_cache_dir,
lib_output_dir,
lib_crate_templates_path,
dependency_paths,
&config)?);
}
exec(configs.into_iter())?;
Ok(())
}
/// Executes the generator for a single Qt module with given configuration.
fn make_config(sublib_name: &str,
cache_dir: PathBuf,
output_dir: PathBuf,
crate_templates_path: PathBuf,
dependency_paths: Vec<PathBuf>,
exec_config: &ExecConfig)
-> Result<Config> {
log::status(format!("Preparing generator config for library: {}", sublib_name));
let crate_name = format!("qt_{}", sublib_name);
let mut crate_properties = CrateProperties::new(crate_name.clone(),
versions::QT_OUTPUT_CRATES_VERSION);
let mut custom_fields = toml::value::Table::new();
let mut package_data = toml::value::Table::new();
package_data.insert("authors".to_string(),
toml::Value::Array(vec![toml::Value::String("Pavel Strakhov <ri@idzaaus.org>"
.to_string())]));
let description = format!("Bindings for {} C++ library (generated automatically with cpp_to_rust project)",
lib_folder_name(sublib_name));
package_data.insert("description".to_string(), toml::Value::String(description));
let doc_url = format!("https://rust-qt.github.io/rustdoc/qt/{}", &crate_name);
package_data.insert("documentation".to_string(), toml::Value::String(doc_url));
package_data.insert("repository".to_string(),
toml::Value::String("https://github.com/rust-qt/cpp_to_rust".to_string()));
package_data.insert("license".to_string(),
toml::Value::String("MIT".to_string()));
custom_fields.insert("package".to_string(), toml::Value::Table(package_data));
crate_properties.set_custom_fields(custom_fields);
crate_properties.remove_default_build_dependencies();
let qt_build_tools_path = if exec_config.write_dependencies_local_paths {
Some(repo_crate_local_path("qt_generator/qt_build_tools")?)
} else {
None
};
crate_properties.add_build_dependency("qt_build_tools",
versions::QT_BUILD_TOOLS_VERSION,
qt_build_tools_path);
let mut config = Config::new(&output_dir, &cache_dir, crate_properties);
let installation_data = get_installation_data(sublib_name)?;
config.add_include_path(&installation_data.root_include_path);
config.add_include_path(&installation_data.lib_include_path);
for dep in lib_dependencies(&sublib_name)? {
let dep_data = get_installation_data(dep)?;
config.add_include_path(&dep_data.lib_include_path);
}
config.add_target_include_path(&installation_data.lib_include_path);
config.set_cache_usage(exec_config.cache_usage.clone());
config.set_write_dependencies_local_paths(exec_config.write_dependencies_local_paths);
config.set_write_cache(exec_config.write_cache);
config.set_quiet_mode(exec_config.quiet_mode);
config.set_debug_logging_config(exec_config.debug_logging_config.clone());
config.set_cpp_lib_version(installation_data.qt_version.as_str());
if exec_config.write_dependencies_local_paths {
log::status("Output Cargo.toml file will contain local paths of used dependencies \
(use --no-local-paths to disable).");
} else {
log::status("Local paths will not be written to the output crate. Make sure all dependencies \
are published before trying to compile the crate.");
}
// TODO: does parsing work on MacOS without adding "-F"?
config.add_include_directive(&lib_folder_name(sublib_name));
let lib_include_path = installation_data.lib_include_path.clone();
config.add_cpp_data_filter(move |cpp_data| fix_header_names(cpp_data, &lib_include_path));
config.add_cpp_parser_arguments(vec!["-fPIC", "-fcxx-exceptions"]);
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-std=gnu++11");
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-fPIC");
// msvc and mingw don't need this
config
.cpp_build_config_mut()
.add(target::Condition::OS(target::OS::Windows).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.set_library_type(CppLibraryType::Shared);
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc), data);
}
if target::current_env() == target::Env::Msvc {
config.add_cpp_parser_argument("-std=c++14");
} else {
config.add_cpp_parser_argument("-std=gnu++11");
}
config.add_cpp_parser_blocked_name("qt_check_for_QGADGET_macro");
let sublib_name_clone = sublib_name.to_string();
let docs_path = installation_data.docs_path.clone();
config.add_cpp_data_filter(move |cpp_data| {
match DocData::new(&sublib_name_clone, &docs_path) {
Ok(doc_data) => {
let mut parser = DocParser::new(doc_data);
find_methods_docs(&mut cpp_data.methods, &mut parser)?;
for type1 in &mut cpp_data.types {
match parser.doc_for_type(&type1.name) {
Ok(doc) => {
// log::debug(format!("Found doc for type: {}", type1.name));
type1.doc = Some(doc.0);
if let CppTypeKind::Enum { ref mut values } = type1.kind {
let enum_namespace = if let Some(index) = type1.name.rfind("::") {
type1.name[0..index + 2].to_string()
} else {
String::new()
};
for value in values {
if let Some(r) = doc.1.iter().find(|x| x.name == value.name) {
value.doc = Some(r.html.clone());
// let full_name = format!("{}::{}", enum_namespace, &value.name);
// println!("full name: {}", full_name);
parser.mark_enum_variant_used(&format!("{}{}", enum_namespace, &value.name));
} else {
let type_name = &type1.name;
log::llog(log::DebugQtDoc, || {
format!("Not found doc for enum variant: {}::{}",
type_name,
&value.name)
});
}
}
}
}
Err(err) => {
log::llog(log::DebugQtDoc,
|| format!("Not found doc for type: {}: {}", type1.name, err));
}
}
}
parser.report_unused_anchors();
}
Err(err) => {
log::error(format!("Failed to get Qt documentation: {}", err));
err.discard_expected();
}
}
Ok(())
});
config.set_crate_template_path(crate_templates_path);
match sublib_name {
"core" => lib_configs::core(&mut config)?,
"gui" => lib_configs::gui(&mut config)?,
"widgets" => lib_configs::widgets(&mut config)?,
"3d_core" => lib_configs::core_3d(&mut config)?,
"3d_render" => lib_configs::render_3d(&mut config)?,
"3d_input" => lib_configs::input_3d(&mut config)?,
"3d_logic" => lib_configs::logic_3d(&mut config)?,
"3d_extras" => lib_configs::extras_3d(&mut config)?,
"ui_tools" => {}
_ => return Err(format!("Unknown lib name: {}", sublib_name).into()),
}
config.set_dependency_cache_paths(dependency_paths);
Ok(config)
}
/// Adds documentation from `data` to `cpp_methods`.
fn | (cpp_methods: &mut [CppMethod], data: &mut DocParser) -> Result<()> {
for cpp_method in cpp_methods {
if let Some(ref info) = cpp_method.class_membership {
if info.visibility == CppVisibility::Private {
continue;
}
}
if let Some(ref declaration_code) = cpp_method.declaration_code {
match data.doc_for_method(&cpp_method.doc_id(),
declaration_code,
&cpp_method.short_text()) {
Ok(doc) => cpp_method.doc = Some(doc),
Err(msg) => {
if cpp_method.class_membership.is_some() &&
(&cpp_method.name == "tr" || &cpp_method.name == "trUtf8" ||
&cpp_method.name == "metaObject") {
// no error message
} else {
log::llog(log::DebugQtDoc, || {
format!("Failed to get documentation for method: {}: {}",
&cpp_method.short_text(),
msg)
});
}
}
}
}
}
Ok(())
}
| find_methods_docs | identifier_name |
set3.rs | use std::collections::VecDeque;
use std::time::{SystemTime, UNIX_EPOCH};
use std::{u8, u16};
use rand::{self, Rng};
use rand::distributions::{IndependentSample, Range};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use errors::*;
use prelude::*;
use set1::{decrypt_single_byte_xor_cipher, break_repeating_key_xor};
lazy_static! {
static ref CBC_PADDING_ORACLE_KEY: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_ORACLE_IV: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_STRINGS: Vec<Vec<u8>> = {
let mut result = Vec::new();
result.push(from_base64_string("MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=").unwrap());
result.push(from_base64_string("MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=").unwrap());
result.push(from_base64_string("MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==").unwrap());
result.push(from_base64_string("MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==").unwrap());
result.push(from_base64_string("MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl").unwrap());
result.push(from_base64_string("MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==").unwrap());
result.push(from_base64_string("MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==").unwrap());
result.push(from_base64_string("MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=").unwrap());
result.push(from_base64_string("MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=").unwrap());
result.push(from_base64_string("MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93").unwrap());
result
};
}
pub fn random_ciphertext() -> Result<(Vec<u8>, Vec<u8>)> {
let mut rng = rand::thread_rng();
let plaintext = rng.choose(&CBC_PADDING_STRINGS).unwrap();
let padded_plaintext = pad_pkcs7(plaintext, 16);
aes_128_cbc_encrypt_no_padding(&CBC_PADDING_ORACLE_KEY,
&CBC_PADDING_ORACLE_IV,
&padded_plaintext)
.map(|ciphertext| (ciphertext, CBC_PADDING_ORACLE_IV.to_vec()))
}
pub fn padding_oracle(ciphertext: &[u8]) -> bool {
aes_128_cbc_decrypt_no_padding(&CBC_PADDING_ORACLE_KEY, &CBC_PADDING_ORACLE_IV, ciphertext)
.map(|plaintext| is_pkcs7_padded(&plaintext))
.unwrap_or(false)
}
pub fn decrypt_ciphertext(ciphertext: &[u8], iv: &[u8]) -> Result<Vec<u8>> {
// the key idea, is that plaintext xored with previous ciphertext block
// creates an intermediate state.
// however, if a server leaks information about the padding of a block
// (by returning 500 when a block is not padded for example)
// then we can calculate this intermediate state and xor the previous
// real ciphertext block with the intermediate state to get the plaintext
// instantly
let mut result = VecDeque::new();
// to calculate the intermediate state, we can send this:
// c1' c2 => p1' p2'
// where c2 is the last block of ciphertext, and c1' is attacker controlled.
// c1 is the second last block of the ciphertext.
// the first and only byte (z) that triggers the leak will help us calculate
// the intermediate state
// i = z ^ p'
// p = c1[16] ^ i
for n in (0..ciphertext.len() / 16).rev() {
let current_block = &ciphertext[n * 16..(n + 1) * 16];
let previous_block = if n == 0 {
iv
} else {
&ciphertext[(n - 1) * 16..n * 16]
};
let mut c1_suffix = VecDeque::new();
for i in (0..16).rev() {
let padding = 16 - i as u8;
for c in &mut c1_suffix {
*c ^= (padding - 1) ^ padding;
}
for z in 0..u8::MAX {
// C1' C2
let mut oracle_blocks = vec![0; i];
oracle_blocks.push(z);
oracle_blocks.extend(&c1_suffix);
oracle_blocks.extend(current_block);
if padding_oracle(&oracle_blocks) {
result.push_front(previous_block[i] ^ z ^ padding);
c1_suffix.push_front(z);
break;
}
}
}
}
let vec = Vec::from(result);
if is_pkcs7_padded(&vec) {
unpad_pkcs7(&vec)
} else {
Ok(vec)
}
}
pub fn get_base64_strings() -> Result<Vec<Vec<u8>>> {
let mut base64_strings = Vec::new();
base64_strings.push(from_base64_string("SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==")?);
base64_strings.push(from_base64_string("Q29taW5nIHdpdGggdml2aWQgZmFjZXM=")?);
base64_strings.push(from_base64_string("RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==")?);
base64_strings.push(from_base64_string("RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new();
for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]);
}
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1, ...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2 != 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> |
pub fn get_mt19937_ciphertext() -> Result<(u16, Vec<u8>)> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"AAAAAAAAAAAAAA");
let seed = Range::new(0, u16::MAX).ind_sample(&mut thread_rng);
Ok((seed, mt19937_fixed_xor(seed, &plaintext)))
}
pub fn break_mt19937_ciphertext(ciphertext: &[u8]) -> (u16, Vec<u8>) {
(0..u16::MAX)
.into_par_iter()
.map(|seed| (seed, mt19937_fixed_xor(seed, ciphertext)))
.find_any(|&(_, ref plaintext)| &plaintext[plaintext.len() - 14..] == b"AAAAAAAAAAAAAA")
.unwrap()
}
pub fn generate_password_reset_token() -> Result<Vec<u8>> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"user_id=123456&expires=1000");
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
let key: Vec<_> =
MersenneTwister::new(unix_timestamp).keystream().take(plaintext.len()).collect();
Ok(fixed_xor(&plaintext, &key))
}
pub fn is_password_token_using_mt19937(token: &[u8]) -> Result<bool> {
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
Ok((0u32..10000u32)
.into_par_iter()
.map(|i| {
let key: Vec<_> =
MersenneTwister::new(unix_timestamp - i).keystream().take(token.len()).collect();
fixed_xor(token, &key)
})
.find_any(|plaintext| {
plaintext.windows(b"user_id=".len()).position(|window| window == b"user_id=").is_some()
})
.is_some())
}
| {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
} | identifier_body |
set3.rs | use std::collections::VecDeque;
use std::time::{SystemTime, UNIX_EPOCH};
use std::{u8, u16};
use rand::{self, Rng};
use rand::distributions::{IndependentSample, Range};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use errors::*;
use prelude::*;
use set1::{decrypt_single_byte_xor_cipher, break_repeating_key_xor};
lazy_static! {
static ref CBC_PADDING_ORACLE_KEY: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_ORACLE_IV: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_STRINGS: Vec<Vec<u8>> = {
let mut result = Vec::new();
result.push(from_base64_string("MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=").unwrap());
result.push(from_base64_string("MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=").unwrap());
result.push(from_base64_string("MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==").unwrap());
result.push(from_base64_string("MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==").unwrap());
result.push(from_base64_string("MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl").unwrap());
result.push(from_base64_string("MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==").unwrap());
result.push(from_base64_string("MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==").unwrap());
result.push(from_base64_string("MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=").unwrap());
result.push(from_base64_string("MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=").unwrap());
result.push(from_base64_string("MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93").unwrap());
result
};
}
pub fn random_ciphertext() -> Result<(Vec<u8>, Vec<u8>)> {
let mut rng = rand::thread_rng();
let plaintext = rng.choose(&CBC_PADDING_STRINGS).unwrap();
let padded_plaintext = pad_pkcs7(plaintext, 16);
aes_128_cbc_encrypt_no_padding(&CBC_PADDING_ORACLE_KEY,
&CBC_PADDING_ORACLE_IV,
&padded_plaintext)
.map(|ciphertext| (ciphertext, CBC_PADDING_ORACLE_IV.to_vec()))
}
pub fn padding_oracle(ciphertext: &[u8]) -> bool {
aes_128_cbc_decrypt_no_padding(&CBC_PADDING_ORACLE_KEY, &CBC_PADDING_ORACLE_IV, ciphertext)
.map(|plaintext| is_pkcs7_padded(&plaintext))
.unwrap_or(false)
}
pub fn decrypt_ciphertext(ciphertext: &[u8], iv: &[u8]) -> Result<Vec<u8>> {
// the key idea, is that plaintext xored with previous ciphertext block
// creates an intermediate state.
// however, if a server leaks information about the padding of a block
// (by returning 500 when a block is not padded for example)
// then we can calculate this intermediate state and xor the previous
// real ciphertext block with the intermediate state to get the plaintext
// instantly
let mut result = VecDeque::new();
// to calculate the intermediate state, we can send this:
// c1' c2 => p1' p2'
// where c2 is the last block of ciphertext, and c1' is attacker controlled.
// c1 is the second last block of the ciphertext.
// the first and only byte (z) that triggers the leak will help us calculate
// the intermediate state
// i = z ^ p'
// p = c1[16] ^ i
for n in (0..ciphertext.len() / 16).rev() {
let current_block = &ciphertext[n * 16..(n + 1) * 16];
let previous_block = if n == 0 {
iv
} else {
&ciphertext[(n - 1) * 16..n * 16]
};
let mut c1_suffix = VecDeque::new();
for i in (0..16).rev() {
let padding = 16 - i as u8;
for c in &mut c1_suffix {
*c ^= (padding - 1) ^ padding;
}
for z in 0..u8::MAX {
// C1' C2
let mut oracle_blocks = vec![0; i];
oracle_blocks.push(z);
oracle_blocks.extend(&c1_suffix);
oracle_blocks.extend(current_block);
if padding_oracle(&oracle_blocks) {
result.push_front(previous_block[i] ^ z ^ padding);
c1_suffix.push_front(z);
break;
}
}
}
}
let vec = Vec::from(result);
if is_pkcs7_padded(&vec) {
unpad_pkcs7(&vec)
} else {
Ok(vec)
}
}
pub fn get_base64_strings() -> Result<Vec<Vec<u8>>> {
let mut base64_strings = Vec::new();
base64_strings.push(from_base64_string("SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==")?);
base64_strings.push(from_base64_string("Q29taW5nIHdpdGggdml2aWQgZmFjZXM=")?);
base64_strings.push(from_base64_string("RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==")?);
base64_strings.push(from_base64_string("RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new(); | }
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1, ...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2 != 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
}
pub fn get_mt19937_ciphertext() -> Result<(u16, Vec<u8>)> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"AAAAAAAAAAAAAA");
let seed = Range::new(0, u16::MAX).ind_sample(&mut thread_rng);
Ok((seed, mt19937_fixed_xor(seed, &plaintext)))
}
pub fn break_mt19937_ciphertext(ciphertext: &[u8]) -> (u16, Vec<u8>) {
(0..u16::MAX)
.into_par_iter()
.map(|seed| (seed, mt19937_fixed_xor(seed, ciphertext)))
.find_any(|&(_, ref plaintext)| &plaintext[plaintext.len() - 14..] == b"AAAAAAAAAAAAAA")
.unwrap()
}
pub fn generate_password_reset_token() -> Result<Vec<u8>> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"user_id=123456&expires=1000");
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
let key: Vec<_> =
MersenneTwister::new(unix_timestamp).keystream().take(plaintext.len()).collect();
Ok(fixed_xor(&plaintext, &key))
}
pub fn is_password_token_using_mt19937(token: &[u8]) -> Result<bool> {
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
Ok((0u32..10000u32)
.into_par_iter()
.map(|i| {
let key: Vec<_> =
MersenneTwister::new(unix_timestamp - i).keystream().take(token.len()).collect();
fixed_xor(token, &key)
})
.find_any(|plaintext| {
plaintext.windows(b"user_id=".len()).position(|window| window == b"user_id=").is_some()
})
.is_some())
} | for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]); | random_line_split |
set3.rs | use std::collections::VecDeque;
use std::time::{SystemTime, UNIX_EPOCH};
use std::{u8, u16};
use rand::{self, Rng};
use rand::distributions::{IndependentSample, Range};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use errors::*;
use prelude::*;
use set1::{decrypt_single_byte_xor_cipher, break_repeating_key_xor};
lazy_static! {
static ref CBC_PADDING_ORACLE_KEY: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_ORACLE_IV: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_STRINGS: Vec<Vec<u8>> = {
let mut result = Vec::new();
result.push(from_base64_string("MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=").unwrap());
result.push(from_base64_string("MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=").unwrap());
result.push(from_base64_string("MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==").unwrap());
result.push(from_base64_string("MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==").unwrap());
result.push(from_base64_string("MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl").unwrap());
result.push(from_base64_string("MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==").unwrap());
result.push(from_base64_string("MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==").unwrap());
result.push(from_base64_string("MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=").unwrap());
result.push(from_base64_string("MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=").unwrap());
result.push(from_base64_string("MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93").unwrap());
result
};
}
pub fn random_ciphertext() -> Result<(Vec<u8>, Vec<u8>)> {
let mut rng = rand::thread_rng();
let plaintext = rng.choose(&CBC_PADDING_STRINGS).unwrap();
let padded_plaintext = pad_pkcs7(plaintext, 16);
aes_128_cbc_encrypt_no_padding(&CBC_PADDING_ORACLE_KEY,
&CBC_PADDING_ORACLE_IV,
&padded_plaintext)
.map(|ciphertext| (ciphertext, CBC_PADDING_ORACLE_IV.to_vec()))
}
pub fn padding_oracle(ciphertext: &[u8]) -> bool {
aes_128_cbc_decrypt_no_padding(&CBC_PADDING_ORACLE_KEY, &CBC_PADDING_ORACLE_IV, ciphertext)
.map(|plaintext| is_pkcs7_padded(&plaintext))
.unwrap_or(false)
}
pub fn decrypt_ciphertext(ciphertext: &[u8], iv: &[u8]) -> Result<Vec<u8>> {
// the key idea, is that plaintext xored with previous ciphertext block
// creates an intermediate state.
// however, if a server leaks information about the padding of a block
// (by returning 500 when a block is not padded for example)
// then we can calculate this intermediate state and xor the previous
// real ciphertext block with the intermediate state to get the plaintext
// instantly
let mut result = VecDeque::new();
// to calculate the intermediate state, we can send this:
// c1' c2 => p1' p2'
// where c2 is the last block of ciphertext, and c1' is attacker controlled.
// c1 is the second last block of the ciphertext.
// the first and only byte (z) that triggers the leak will help us calculate
// the intermediate state
// i = z ^ p'
// p = c1[16] ^ i
for n in (0..ciphertext.len() / 16).rev() {
let current_block = &ciphertext[n * 16..(n + 1) * 16];
let previous_block = if n == 0 {
iv
} else {
&ciphertext[(n - 1) * 16..n * 16]
};
let mut c1_suffix = VecDeque::new();
for i in (0..16).rev() {
let padding = 16 - i as u8;
for c in &mut c1_suffix {
*c ^= (padding - 1) ^ padding;
}
for z in 0..u8::MAX {
// C1' C2
let mut oracle_blocks = vec![0; i];
oracle_blocks.push(z);
oracle_blocks.extend(&c1_suffix);
oracle_blocks.extend(current_block);
if padding_oracle(&oracle_blocks) {
result.push_front(previous_block[i] ^ z ^ padding);
c1_suffix.push_front(z);
break;
}
}
}
}
let vec = Vec::from(result);
if is_pkcs7_padded(&vec) {
unpad_pkcs7(&vec)
} else {
Ok(vec)
}
}
pub fn get_base64_strings() -> Result<Vec<Vec<u8>>> {
let mut base64_strings = Vec::new();
base64_strings.push(from_base64_string("SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==")?);
base64_strings.push(from_base64_string("Q29taW5nIHdpdGggdml2aWQgZmFjZXM=")?);
base64_strings.push(from_base64_string("RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==")?);
base64_strings.push(from_base64_string("RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new();
for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]);
}
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1, ...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2 != 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
}
pub fn get_mt19937_ciphertext() -> Result<(u16, Vec<u8>)> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"AAAAAAAAAAAAAA");
let seed = Range::new(0, u16::MAX).ind_sample(&mut thread_rng);
Ok((seed, mt19937_fixed_xor(seed, &plaintext)))
}
pub fn break_mt19937_ciphertext(ciphertext: &[u8]) -> (u16, Vec<u8>) {
(0..u16::MAX)
.into_par_iter()
.map(|seed| (seed, mt19937_fixed_xor(seed, ciphertext)))
.find_any(|&(_, ref plaintext)| &plaintext[plaintext.len() - 14..] == b"AAAAAAAAAAAAAA")
.unwrap()
}
pub fn | () -> Result<Vec<u8>> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"user_id=123456&expires=1000");
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
let key: Vec<_> =
MersenneTwister::new(unix_timestamp).keystream().take(plaintext.len()).collect();
Ok(fixed_xor(&plaintext, &key))
}
pub fn is_password_token_using_mt19937(token: &[u8]) -> Result<bool> {
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
Ok((0u32..10000u32)
.into_par_iter()
.map(|i| {
let key: Vec<_> =
MersenneTwister::new(unix_timestamp - i).keystream().take(token.len()).collect();
fixed_xor(token, &key)
})
.find_any(|plaintext| {
plaintext.windows(b"user_id=".len()).position(|window| window == b"user_id=").is_some()
})
.is_some())
}
| generate_password_reset_token | identifier_name |
set3.rs | use std::collections::VecDeque;
use std::time::{SystemTime, UNIX_EPOCH};
use std::{u8, u16};
use rand::{self, Rng};
use rand::distributions::{IndependentSample, Range};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use errors::*;
use prelude::*;
use set1::{decrypt_single_byte_xor_cipher, break_repeating_key_xor};
lazy_static! {
static ref CBC_PADDING_ORACLE_KEY: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_ORACLE_IV: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_STRINGS: Vec<Vec<u8>> = {
let mut result = Vec::new();
result.push(from_base64_string("MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=").unwrap());
result.push(from_base64_string("MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=").unwrap());
result.push(from_base64_string("MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==").unwrap());
result.push(from_base64_string("MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==").unwrap());
result.push(from_base64_string("MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl").unwrap());
result.push(from_base64_string("MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==").unwrap());
result.push(from_base64_string("MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==").unwrap());
result.push(from_base64_string("MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=").unwrap());
result.push(from_base64_string("MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=").unwrap());
result.push(from_base64_string("MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93").unwrap());
result
};
}
pub fn random_ciphertext() -> Result<(Vec<u8>, Vec<u8>)> {
let mut rng = rand::thread_rng();
let plaintext = rng.choose(&CBC_PADDING_STRINGS).unwrap();
let padded_plaintext = pad_pkcs7(plaintext, 16);
aes_128_cbc_encrypt_no_padding(&CBC_PADDING_ORACLE_KEY,
&CBC_PADDING_ORACLE_IV,
&padded_plaintext)
.map(|ciphertext| (ciphertext, CBC_PADDING_ORACLE_IV.to_vec()))
}
pub fn padding_oracle(ciphertext: &[u8]) -> bool {
aes_128_cbc_decrypt_no_padding(&CBC_PADDING_ORACLE_KEY, &CBC_PADDING_ORACLE_IV, ciphertext)
.map(|plaintext| is_pkcs7_padded(&plaintext))
.unwrap_or(false)
}
pub fn decrypt_ciphertext(ciphertext: &[u8], iv: &[u8]) -> Result<Vec<u8>> {
// the key idea, is that plaintext xored with previous ciphertext block
// creates an intermediate state.
// however, if a server leaks information about the padding of a block
// (by returning 500 when a block is not padded for example)
// then we can calculate this intermediate state and xor the previous
// real ciphertext block with the intermediate state to get the plaintext
// instantly
let mut result = VecDeque::new();
// to calculate the intermediate state, we can send this:
// c1' c2 => p1' p2'
// where c2 is the last block of ciphertext, and c1' is attacker controlled.
// c1 is the second last block of the ciphertext.
// the first and only byte (z) that triggers the leak will help us calculate
// the intermediate state
// i = z ^ p'
// p = c1[16] ^ i
for n in (0..ciphertext.len() / 16).rev() {
let current_block = &ciphertext[n * 16..(n + 1) * 16];
let previous_block = if n == 0 {
iv
} else {
&ciphertext[(n - 1) * 16..n * 16]
};
let mut c1_suffix = VecDeque::new();
for i in (0..16).rev() {
let padding = 16 - i as u8;
for c in &mut c1_suffix {
*c ^= (padding - 1) ^ padding;
}
for z in 0..u8::MAX {
// C1' C2
let mut oracle_blocks = vec![0; i];
oracle_blocks.push(z);
oracle_blocks.extend(&c1_suffix);
oracle_blocks.extend(current_block);
if padding_oracle(&oracle_blocks) |
}
}
}
let vec = Vec::from(result);
if is_pkcs7_padded(&vec) {
unpad_pkcs7(&vec)
} else {
Ok(vec)
}
}
pub fn get_base64_strings() -> Result<Vec<Vec<u8>>> {
let mut base64_strings = Vec::new();
base64_strings.push(from_base64_string("SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==")?);
base64_strings.push(from_base64_string("Q29taW5nIHdpdGggdml2aWQgZmFjZXM=")?);
base64_strings.push(from_base64_string("RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==")?);
base64_strings.push(from_base64_string("RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new();
for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]);
}
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1, ...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2 != 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
}
pub fn get_mt19937_ciphertext() -> Result<(u16, Vec<u8>)> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"AAAAAAAAAAAAAA");
let seed = Range::new(0, u16::MAX).ind_sample(&mut thread_rng);
Ok((seed, mt19937_fixed_xor(seed, &plaintext)))
}
pub fn break_mt19937_ciphertext(ciphertext: &[u8]) -> (u16, Vec<u8>) {
(0..u16::MAX)
.into_par_iter()
.map(|seed| (seed, mt19937_fixed_xor(seed, ciphertext)))
.find_any(|&(_, ref plaintext)| &plaintext[plaintext.len() - 14..] == b"AAAAAAAAAAAAAA")
.unwrap()
}
pub fn generate_password_reset_token() -> Result<Vec<u8>> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"user_id=123456&expires=1000");
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
let key: Vec<_> =
MersenneTwister::new(unix_timestamp).keystream().take(plaintext.len()).collect();
Ok(fixed_xor(&plaintext, &key))
}
pub fn is_password_token_using_mt19937(token: &[u8]) -> Result<bool> {
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
Ok((0u32..10000u32)
.into_par_iter()
.map(|i| {
let key: Vec<_> =
MersenneTwister::new(unix_timestamp - i).keystream().take(token.len()).collect();
fixed_xor(token, &key)
})
.find_any(|plaintext| {
plaintext.windows(b"user_id=".len()).position(|window| window == b"user_id=").is_some()
})
.is_some())
}
| {
result.push_front(previous_block[i] ^ z ^ padding);
c1_suffix.push_front(z);
break;
} | conditional_block |
mm.rs | //! The virtual and physical memory manager for the kernel
use core::marker::PhantomData;
use core::mem::size_of;
use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut};
use core::alloc::{Layout, GlobalAlloc};
use core::sync::atomic::{AtomicU64, AtomicPtr, Ordering};
use alloc::boxed::Box;
use alloc::collections::BTreeMap;
use crate::acpi::MAX_CORES;
use rangeset::RangeSet;
use boot_args::{KERNEL_PHYS_WINDOW_BASE, KERNEL_PHYS_WINDOW_SIZE};
use boot_args::KERNEL_VMEM_BASE;
use page_table::{PhysMem, PhysAddr, PageType, VirtAddr};
/// Table which is indexed by an APIC identifier to map to a physical range
/// which is local to it its NUMA node
static APIC_TO_MEMORY_RANGE: AtomicPtr<[Option<RangeSet>; MAX_CORES]> =
AtomicPtr::new(core::ptr::null_mut());
/// Get the preferred memory range for the currently running APIC. Returns
/// `None` if we have no valid APIC ID yet, or we do not have NUMA knowledge
/// of the current APIC ID
pub fn memory_range<'a>() -> Option<&'a RangeSet> {
// Check to see if the `APIC_TO_MEMORY_RANGE` has been initialized
let atmr = APIC_TO_MEMORY_RANGE.load(Ordering::SeqCst);
if atmr.is_null() {
return None;
}
// Cast the memory range structure to something we can access
let atmr = unsafe { &*atmr };
// Based on our current APIC ID look up the memory range
core!().apic_id().and_then(|x| atmr[x as usize].as_ref())
}
/// Establish the `APIC_TO_MEMORY_RANGE` global with the APIC IDs to their
/// corresponding NUMA-local memory regions
pub unsafe fn register_numa_nodes(apic_to_domain: BTreeMap<u32, u32>,
domain_to_mem: BTreeMap<u32, RangeSet>) {
// Create a heap-based database
let mut apic_mappings: Box<MaybeUninit<[Option<RangeSet>; MAX_CORES]>> =
Box::new_uninit();
// Initialize the heap based memory
for core in 0..MAX_CORES {
let foo = apic_mappings.as_mut_ptr() as *mut Option<RangeSet>;
core::ptr::write(foo.offset(core as isize), None);
}
// APIC mappings are now initialized
let mut apic_mappings = apic_mappings.assume_init();
// Go through each APIC to domain mapping
for (&apic, domain) in apic_to_domain.iter() {
apic_mappings[apic as usize] = domain_to_mem.get(domain)
.and_then(|&rs| {
Some(rs)
});
}
// Store the apic mapping database into the global!
APIC_TO_MEMORY_RANGE.store(Box::into_raw(apic_mappings), Ordering::SeqCst);
}
/// Find a free region of virtual memory that can hold `size` bytes and return
/// the virtual address
///
/// This is only valid for virtual requests for 4 KiB mappings
pub fn alloc_virt_addr_4k(size: u64) -> VirtAddr {
/// Base address for virtual allocations
static NEXT_FREE_VADDR: AtomicU64 = AtomicU64::new(KERNEL_VMEM_BASE);
/// Gap between virtual allocations
const GUARD_PAGE_SIZE: u64 = 32 * 1024;
assert!(size > 0 && (size & 0xfff) == 0,
"Invalid size for virtual region allocation");
// Compute the amount of virtual memory to reserve, including the guard
// size.
let reserve_size = GUARD_PAGE_SIZE.checked_add(size as u64)
.expect("Integer overflow on virtual region size");
// Get a new virtual region that is free
let ret = VirtAddr(
NEXT_FREE_VADDR.fetch_add(reserve_size, Ordering::SeqCst)
);
// If we cannot add the reserve size from the return value, then the
// virtual memory wrapped the 64-bit boundary
ret.0.checked_add(reserve_size)
.expect("Integer overflow on virtual address range");
ret
}
/// Gets access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys<'a>(paddr: PhysAddr, size: u64) -> &'a [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *const u8,
size as usize)
}
/// Gets mutable access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys_mut<'a>(paddr: PhysAddr, size: u64) -> &'a mut [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts_mut(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut u8,
size as usize)
}
/// Read a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::read_volatile`.
#[allow(dead_code)]
pub unsafe fn read_phys<T>(paddr: PhysAddr) -> T {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::read_volatile((KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T)
}
/// Write to a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::write_volatile`.
pub unsafe fn write_phys<T>(paddr: PhysAddr, val: T) {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on write_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::write_volatile(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T, val);
}
/// Metadata on a freed allocation
#[repr(C)]
struct | {
/// Virtual address of the next `FreeListNode`
next: usize,
/// Number of free slots in `free_mem`
free_slots: usize,
/// Virtual addresses of free allocations
free_addrs: [*mut u8; 0],
}
/// A free list which holds free entries of `size` bytes in a semi-linked list
/// table thingy.
pub struct FreeList {
/// Pointer to the first entry in the free list
head: usize,
/// Size of allocations (in bytes) for this free list
size: usize,
}
impl FreeList {
/// Create a new, empty free list containing addresses to `size` byte
/// allocations
pub fn new(size: usize) -> Self {
// Ensure some properties of the free list size
assert!(size.count_ones() == 1,
"Free list size must be a power of two");
assert!(size >= size_of::<usize>(),
"Free list size must be at least pointer width");
FreeList { head: 0, size }
}
/// Get a address from the free list
pub unsafe fn pop(&mut self) -> *mut u8 {
// If the free list is empty
if self.head == 0 {
if self.size <= 4096 {
// Special case, if the allocation fits within a page, we can
// directly return virtual addresses to our physical memory
// map. This is significantly better for TLBs and caches than
// to create new page tables for allocating a new virtual
// address. Especially since we use large pages (if possible)
// to map in the physical map
// Get access to physical memory
let alc = {
let mut phys_mem =
core!().boot_args.free_memory_ref().lock();
let phys_mem = phys_mem.as_mut().unwrap();
// Allocate 4096 bytes of page aligned physical memory, we
// do bulk allocations here to improve performance and to
// decrease the amount of physical memory lost due to
// carving off alignment bytes
let alc = phys_mem.allocate_prefer(4096, 4096,
memory_range())
.expect("Failed to allocate physical memory") as u64;
// Update stats
GLOBAL_ALLOCATOR.free_physical.store(
phys_mem.sum().unwrap(),
Ordering::Relaxed);
alc
};
// Split up this allocation and free the segments
for offset in (0..4096).step_by(self.size) {
// Get the virtual address for this physical address
let vaddr = slice_phys_mut(
PhysAddr(alc + offset), self.size as u64).as_mut_ptr();
// Add this to the free list
self.push(vaddr);
}
} else {
// Allocation size exceeds a page, we must allocate new virtual
// memory to satisfy the allocation
// Allocate a virtual address to hold this allocation
let vaddr = alloc_virt_addr_4k(self.size as u64);
// Get access to physical memory
let mut pmem = PhysicalMemory;
// Get access to virtual memory
let mut page_table = core!().boot_args.page_table.lock();
let page_table = page_table.as_mut().unwrap();
// Map in the memory as RW
page_table.map(&mut pmem, vaddr, PageType::Page4K,
self.size as u64, true, true, false, false)
.expect("Failed to map RW memory");
// Return out the allocation
return vaddr.0 as *mut u8;
}
}
// We're about to pop from the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_sub(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// Basic linked list for super small allocations which can't hold
// our stack-based free list metadata
// Save the current head (our new allocation)
let alc = self.head as *mut FreeListNode;
// Set the head to the next node
self.head = (*alc).next;
alc as *mut u8
} else {
// Get access to the free list stack
let fl = &mut *(self.head as *mut FreeListNode);
// Check if there are any addresses on the stack
if fl.free_slots <
((self.size / core::mem::size_of::<usize>()) - 2) {
// Just grab the free entry
let alc =
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize);
// Update number of free slots
fl.free_slots += 1;
// Return the allocation
alc
} else {
// The free page stack is empty at this level, take the entire
// node and use it as the allocation
// Get the old head, will be our allocation
let alc = self.head;
// Update the head to point to the next entry
self.head = fl.next;
// Return out the allocation
alc as *mut u8
}
}
}
/// Put an allocation back onto the free list
pub unsafe fn push(&mut self, vaddr: *mut u8) {
// We're about to push to the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_add(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// If the free list is too small to contain our stack free list,
// then just directly use a linked list
// Write the old head into the newly freed `vaddr`
let vaddr = vaddr as *mut FreeListNode;
(*vaddr).next = self.head;
// Update the head
self.head = vaddr as usize;
} else {
// Check if there is room for this allocation in the free stack,
// or if we need to create a new stack
if self.head == 0 ||
(*(self.head as *const FreeListNode)).free_slots == 0 {
// No free slots, create a new stack out of the freed vaddr
let vaddr = &mut *(vaddr as *mut FreeListNode);
// Set the number of free slots to the maximum size, as all
// entries are free in the stack
// This is the size of the allocation, minus the 2 `usize`
// header (in entries)
vaddr.free_slots =
(self.size / core::mem::size_of::<usize>()) - 2;
// Update the next to point to the old head
vaddr.next = self.head;
// Establish this as the new free list head
self.head = vaddr as *mut FreeListNode as usize;
} else {
// There's room in the current stack, just throw us in there
let fl = &mut *(self.head as *mut FreeListNode);
// Decrement the number of free slots
fl.free_slots -= 1;
// Store our newly freed virtual address into this slot
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize) =
vaddr;
}
}
}
}
/// A wrapper on a range set to allow implementing the `PhysMem` trait
pub struct PhysicalMemory;
impl PhysMem for PhysicalMemory {
unsafe fn translate(&mut self, paddr: PhysAddr, size: usize)
-> Option<*const u8> {
self.translate_mut(paddr, size).map(|x| x as *const u8)
}
unsafe fn translate_mut(&mut self, paddr: PhysAddr, size: usize)
-> Option<*mut u8> {
// Compute the ending physical address
let end = (size as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
})?;
// Make sure this physical address fits inside our window
if end >= KERNEL_PHYS_WINDOW_SIZE {
return None;
}
// Convert the physical address into linear mapping view address
Some((paddr.0 + KERNEL_PHYS_WINDOW_BASE) as *mut u8)
}
fn alloc_phys(&mut self, layout: Layout) -> Option<PhysAddr> {
if layout.size() <= 4096 && layout.align() <= layout.size() {
// Special case, just allocate directly from our free lists. Our
// free lists for allocations <= 4096 bytes directly map to the
// physical memory map, and are naturally aligned
unsafe {
let ptr = core!().free_list(layout).lock().pop();
Some(PhysAddr(ptr as u64 - KERNEL_PHYS_WINDOW_BASE))
}
} else {
// Get access to physical memory
let mut phys_mem = unsafe {
core!().boot_args.free_memory_ref().lock()
};
let phys_mem = phys_mem.as_mut()?;
// Could not satisfy allocation from free list, allocate
// directly from the physical memory pool
let alc = phys_mem.allocate_prefer(layout.size() as u64,
layout.align() as u64,
memory_range())?;
// Update stats
GLOBAL_ALLOCATOR.free_physical
.store(phys_mem.sum().unwrap(), Ordering::Relaxed);
Some(PhysAddr(alc as u64))
}
}
}
/// The global allocator for the bootloader, this just uses physical memory as
/// a backing and does not handle any fancy things like fragmentation. Use this
/// carefully.
#[global_allocator]
pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator {
num_allocs: AtomicU64::new(0),
num_frees: AtomicU64::new(0),
free_physical: AtomicU64::new(0),
free_list: AtomicU64::new(0),
};
/// Empty structure that we can implement `GlobalAlloc` for such that we can
/// use the `#[global_allocator]`
#[derive(Debug)]
pub struct GlobalAllocator {
/// Number of allocations performed
pub num_allocs: AtomicU64,
/// Number of frees performed
pub num_frees: AtomicU64,
/// Current number of free bytes in the physical memory pool, this only
/// ever decreases since we do not free back to physical memory
pub free_physical: AtomicU64,
/// Number of bytes sitting in free lists
pub free_list: AtomicU64,
}
/// Print the allocation statistics to the screen
pub fn print_alloc_stats() {
// Get total amount of physical memory
let total_phys = core!().boot_args
.total_physical_memory.load(Ordering::Relaxed);
// Get physical memory in use
let phys_inuse =
total_phys - GLOBAL_ALLOCATOR.free_physical.load(Ordering::Relaxed);
print!("Allocs {:8} | Frees {:8} | Physical {:10.2} MiB / {:10.2} MiB | \
Free List {:10.2} MiB\n",
GLOBAL_ALLOCATOR.num_allocs.load(Ordering::Relaxed),
GLOBAL_ALLOCATOR.num_frees.load(Ordering::Relaxed),
phys_inuse as f64 / 1024. / 1024.,
total_phys as f64 / 1024. / 1024.,
GLOBAL_ALLOCATOR.free_list
.load(Ordering::Relaxed) as f64 / 1024. / 1024.);
}
unsafe impl GlobalAlloc for GlobalAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Allocate memory from our free lists
let ptr = core!().free_list(layout).lock().pop();
// Update stats
self.num_allocs.fetch_add(1, Ordering::Relaxed);
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// Free the memory
core!().free_list(layout).lock().push(ptr);
// Update stats
self.num_frees.fetch_add(1, Ordering::Relaxed);
}
}
/// Allocation containing a physically contiguous allocation
pub struct PhysContig<T> {
/// Virtual address of the allocation
vaddr: VirtAddr,
/// Physical address of the allocation
paddr: PhysAddr,
/// Mark that this "holds" a `T`
_phantom: PhantomData<T>,
}
impl<T> PhysContig<T> {
/// Allocate physically contiguous memory large enough to hold `val` and
/// move `val` into it
pub fn new(val: T) -> PhysContig<T> {
assert!(size_of::<T>() > 0, "Cannot use ZST for PhysContig");
assert!(size_of::<T>() <= 4096, "Size too large for PhysContig");
unsafe {
// Allocate a 4 KiB page
let alloc = GLOBAL_ALLOCATOR.alloc(
Layout::from_size_align(4096, 4096).unwrap());
// Compute the physical address of this allocation
let paddr = PhysAddr(alloc as u64 - KERNEL_PHYS_WINDOW_BASE);
// Initialize the memory to `val`
core::ptr::write(alloc as *mut T, val);
// Create the `PhysContig` structure
PhysContig {
vaddr: VirtAddr(alloc as u64),
paddr: paddr,
_phantom: PhantomData,
}
}
}
/// Get the physical address of the allocation
pub fn phys_addr(&self) -> PhysAddr {
self.paddr
}
}
impl<T> Drop for PhysContig<T> {
fn drop(&mut self) {
unsafe {
GLOBAL_ALLOCATOR.dealloc(self.vaddr.0 as *mut u8,
Layout::from_size_align(4096, 4096).unwrap());
}
}
}
impl<T> Deref for PhysContig<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe {
&*(self.vaddr.0 as *const T)
}
}
}
impl<T> DerefMut for PhysContig<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe {
&mut *(self.vaddr.0 as *mut T)
}
}
}
/// Out-of-memory handler, we just panic
#[alloc_error_handler]
fn alloc_error(_layout: Layout) -> ! {
panic!("Out of memory");
}
| FreeListNode | identifier_name |
mm.rs | //! The virtual and physical memory manager for the kernel
use core::marker::PhantomData;
use core::mem::size_of;
use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut};
use core::alloc::{Layout, GlobalAlloc};
use core::sync::atomic::{AtomicU64, AtomicPtr, Ordering};
use alloc::boxed::Box;
use alloc::collections::BTreeMap;
use crate::acpi::MAX_CORES;
use rangeset::RangeSet;
use boot_args::{KERNEL_PHYS_WINDOW_BASE, KERNEL_PHYS_WINDOW_SIZE};
use boot_args::KERNEL_VMEM_BASE;
use page_table::{PhysMem, PhysAddr, PageType, VirtAddr};
/// Table which is indexed by an APIC identifier to map to a physical range
/// which is local to it its NUMA node
static APIC_TO_MEMORY_RANGE: AtomicPtr<[Option<RangeSet>; MAX_CORES]> =
AtomicPtr::new(core::ptr::null_mut());
/// Get the preferred memory range for the currently running APIC. Returns
/// `None` if we have no valid APIC ID yet, or we do not have NUMA knowledge
/// of the current APIC ID
pub fn memory_range<'a>() -> Option<&'a RangeSet> {
// Check to see if the `APIC_TO_MEMORY_RANGE` has been initialized
let atmr = APIC_TO_MEMORY_RANGE.load(Ordering::SeqCst);
if atmr.is_null() {
return None;
}
// Cast the memory range structure to something we can access
let atmr = unsafe { &*atmr };
// Based on our current APIC ID look up the memory range
core!().apic_id().and_then(|x| atmr[x as usize].as_ref())
}
/// Establish the `APIC_TO_MEMORY_RANGE` global with the APIC IDs to their
/// corresponding NUMA-local memory regions
pub unsafe fn register_numa_nodes(apic_to_domain: BTreeMap<u32, u32>,
domain_to_mem: BTreeMap<u32, RangeSet>) {
// Create a heap-based database
let mut apic_mappings: Box<MaybeUninit<[Option<RangeSet>; MAX_CORES]>> =
Box::new_uninit();
// Initialize the heap based memory
for core in 0..MAX_CORES {
let foo = apic_mappings.as_mut_ptr() as *mut Option<RangeSet>;
core::ptr::write(foo.offset(core as isize), None);
}
// APIC mappings are now initialized
let mut apic_mappings = apic_mappings.assume_init();
// Go through each APIC to domain mapping
for (&apic, domain) in apic_to_domain.iter() {
apic_mappings[apic as usize] = domain_to_mem.get(domain)
.and_then(|&rs| {
Some(rs)
});
}
// Store the apic mapping database into the global!
APIC_TO_MEMORY_RANGE.store(Box::into_raw(apic_mappings), Ordering::SeqCst);
}
/// Find a free region of virtual memory that can hold `size` bytes and return
/// the virtual address
///
/// This is only valid for virtual requests for 4 KiB mappings
pub fn alloc_virt_addr_4k(size: u64) -> VirtAddr {
/// Base address for virtual allocations
static NEXT_FREE_VADDR: AtomicU64 = AtomicU64::new(KERNEL_VMEM_BASE);
/// Gap between virtual allocations
const GUARD_PAGE_SIZE: u64 = 32 * 1024;
assert!(size > 0 && (size & 0xfff) == 0,
"Invalid size for virtual region allocation");
// Compute the amount of virtual memory to reserve, including the guard
// size.
let reserve_size = GUARD_PAGE_SIZE.checked_add(size as u64)
.expect("Integer overflow on virtual region size");
// Get a new virtual region that is free
let ret = VirtAddr(
NEXT_FREE_VADDR.fetch_add(reserve_size, Ordering::SeqCst)
);
// If we cannot add the reserve size from the return value, then the
// virtual memory wrapped the 64-bit boundary
ret.0.checked_add(reserve_size)
.expect("Integer overflow on virtual address range");
ret
}
/// Gets access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys<'a>(paddr: PhysAddr, size: u64) -> &'a [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *const u8,
size as usize)
}
/// Gets mutable access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys_mut<'a>(paddr: PhysAddr, size: u64) -> &'a mut [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts_mut(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut u8,
size as usize)
}
/// Read a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::read_volatile`.
#[allow(dead_code)]
pub unsafe fn read_phys<T>(paddr: PhysAddr) -> T {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::read_volatile((KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T)
}
/// Write to a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::write_volatile`.
pub unsafe fn write_phys<T>(paddr: PhysAddr, val: T) {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on write_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::write_volatile(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T, val);
}
/// Metadata on a freed allocation
#[repr(C)]
struct FreeListNode {
/// Virtual address of the next `FreeListNode`
next: usize,
/// Number of free slots in `free_mem`
free_slots: usize,
/// Virtual addresses of free allocations
free_addrs: [*mut u8; 0],
}
/// A free list which holds free entries of `size` bytes in a semi-linked list
/// table thingy.
pub struct FreeList {
/// Pointer to the first entry in the free list
head: usize,
/// Size of allocations (in bytes) for this free list
size: usize,
}
impl FreeList {
/// Create a new, empty free list containing addresses to `size` byte
/// allocations
pub fn new(size: usize) -> Self {
// Ensure some properties of the free list size
assert!(size.count_ones() == 1,
"Free list size must be a power of two");
assert!(size >= size_of::<usize>(),
"Free list size must be at least pointer width");
FreeList { head: 0, size }
}
/// Get a address from the free list
pub unsafe fn pop(&mut self) -> *mut u8 {
// If the free list is empty
if self.head == 0 {
if self.size <= 4096 {
// Special case, if the allocation fits within a page, we can
// directly return virtual addresses to our physical memory
// map. This is significantly better for TLBs and caches than
// to create new page tables for allocating a new virtual
// address. Especially since we use large pages (if possible)
// to map in the physical map
// Get access to physical memory
let alc = {
let mut phys_mem =
core!().boot_args.free_memory_ref().lock();
let phys_mem = phys_mem.as_mut().unwrap();
// Allocate 4096 bytes of page aligned physical memory, we
// do bulk allocations here to improve performance and to
// decrease the amount of physical memory lost due to
// carving off alignment bytes
let alc = phys_mem.allocate_prefer(4096, 4096,
memory_range())
.expect("Failed to allocate physical memory") as u64;
// Update stats
GLOBAL_ALLOCATOR.free_physical.store(
phys_mem.sum().unwrap(),
Ordering::Relaxed);
alc
};
// Split up this allocation and free the segments
for offset in (0..4096).step_by(self.size) {
// Get the virtual address for this physical address
let vaddr = slice_phys_mut(
PhysAddr(alc + offset), self.size as u64).as_mut_ptr();
// Add this to the free list
self.push(vaddr);
}
} else {
// Allocation size exceeds a page, we must allocate new virtual
// memory to satisfy the allocation
// Allocate a virtual address to hold this allocation
let vaddr = alloc_virt_addr_4k(self.size as u64);
// Get access to physical memory
let mut pmem = PhysicalMemory;
// Get access to virtual memory
let mut page_table = core!().boot_args.page_table.lock();
let page_table = page_table.as_mut().unwrap();
// Map in the memory as RW
page_table.map(&mut pmem, vaddr, PageType::Page4K,
self.size as u64, true, true, false, false)
.expect("Failed to map RW memory");
// Return out the allocation
return vaddr.0 as *mut u8;
}
}
// We're about to pop from the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_sub(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// Basic linked list for super small allocations which can't hold
// our stack-based free list metadata
// Save the current head (our new allocation)
let alc = self.head as *mut FreeListNode;
// Set the head to the next node
self.head = (*alc).next;
alc as *mut u8
} else {
// Get access to the free list stack
let fl = &mut *(self.head as *mut FreeListNode);
// Check if there are any addresses on the stack
if fl.free_slots <
((self.size / core::mem::size_of::<usize>()) - 2) {
// Just grab the free entry
let alc =
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize);
// Update number of free slots
fl.free_slots += 1;
// Return the allocation
alc
} else {
// The free page stack is empty at this level, take the entire
// node and use it as the allocation
// Get the old head, will be our allocation
let alc = self.head;
// Update the head to point to the next entry
self.head = fl.next;
// Return out the allocation
alc as *mut u8
}
}
}
/// Put an allocation back onto the free list
pub unsafe fn push(&mut self, vaddr: *mut u8) {
// We're about to push to the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_add(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// If the free list is too small to contain our stack free list,
// then just directly use a linked list
// Write the old head into the newly freed `vaddr`
let vaddr = vaddr as *mut FreeListNode;
(*vaddr).next = self.head;
// Update the head
self.head = vaddr as usize;
} else {
// Check if there is room for this allocation in the free stack,
// or if we need to create a new stack
if self.head == 0 ||
(*(self.head as *const FreeListNode)).free_slots == 0 {
// No free slots, create a new stack out of the freed vaddr
let vaddr = &mut *(vaddr as *mut FreeListNode);
// Set the number of free slots to the maximum size, as all
// entries are free in the stack
// This is the size of the allocation, minus the 2 `usize`
// header (in entries)
vaddr.free_slots =
(self.size / core::mem::size_of::<usize>()) - 2;
// Update the next to point to the old head
vaddr.next = self.head;
// Establish this as the new free list head
self.head = vaddr as *mut FreeListNode as usize;
} else {
// There's room in the current stack, just throw us in there
let fl = &mut *(self.head as *mut FreeListNode);
// Decrement the number of free slots
fl.free_slots -= 1;
// Store our newly freed virtual address into this slot
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize) =
vaddr;
}
}
}
}
/// A wrapper on a range set to allow implementing the `PhysMem` trait
pub struct PhysicalMemory;
impl PhysMem for PhysicalMemory {
unsafe fn translate(&mut self, paddr: PhysAddr, size: usize)
-> Option<*const u8> {
self.translate_mut(paddr, size).map(|x| x as *const u8)
}
unsafe fn translate_mut(&mut self, paddr: PhysAddr, size: usize)
-> Option<*mut u8> {
// Compute the ending physical address
let end = (size as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
})?;
// Make sure this physical address fits inside our window
if end >= KERNEL_PHYS_WINDOW_SIZE {
return None;
}
// Convert the physical address into linear mapping view address
Some((paddr.0 + KERNEL_PHYS_WINDOW_BASE) as *mut u8)
}
fn alloc_phys(&mut self, layout: Layout) -> Option<PhysAddr> {
if layout.size() <= 4096 && layout.align() <= layout.size() {
// Special case, just allocate directly from our free lists. Our
// free lists for allocations <= 4096 bytes directly map to the
// physical memory map, and are naturally aligned
unsafe {
let ptr = core!().free_list(layout).lock().pop();
Some(PhysAddr(ptr as u64 - KERNEL_PHYS_WINDOW_BASE))
}
} else {
// Get access to physical memory
let mut phys_mem = unsafe {
core!().boot_args.free_memory_ref().lock()
};
let phys_mem = phys_mem.as_mut()?;
// Could not satisfy allocation from free list, allocate
// directly from the physical memory pool
let alc = phys_mem.allocate_prefer(layout.size() as u64,
layout.align() as u64,
memory_range())?;
// Update stats
GLOBAL_ALLOCATOR.free_physical
.store(phys_mem.sum().unwrap(), Ordering::Relaxed);
Some(PhysAddr(alc as u64))
}
}
}
/// The global allocator for the bootloader, this just uses physical memory as
/// a backing and does not handle any fancy things like fragmentation. Use this
/// carefully.
#[global_allocator]
pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator {
num_allocs: AtomicU64::new(0),
num_frees: AtomicU64::new(0),
free_physical: AtomicU64::new(0),
free_list: AtomicU64::new(0),
};
/// Empty structure that we can implement `GlobalAlloc` for such that we can
/// use the `#[global_allocator]`
#[derive(Debug)]
pub struct GlobalAllocator {
/// Number of allocations performed
pub num_allocs: AtomicU64,
/// Number of frees performed
pub num_frees: AtomicU64,
/// Current number of free bytes in the physical memory pool, this only
/// ever decreases since we do not free back to physical memory
pub free_physical: AtomicU64,
/// Number of bytes sitting in free lists
pub free_list: AtomicU64,
}
| let total_phys = core!().boot_args
.total_physical_memory.load(Ordering::Relaxed);
// Get physical memory in use
let phys_inuse =
total_phys - GLOBAL_ALLOCATOR.free_physical.load(Ordering::Relaxed);
print!("Allocs {:8} | Frees {:8} | Physical {:10.2} MiB / {:10.2} MiB | \
Free List {:10.2} MiB\n",
GLOBAL_ALLOCATOR.num_allocs.load(Ordering::Relaxed),
GLOBAL_ALLOCATOR.num_frees.load(Ordering::Relaxed),
phys_inuse as f64 / 1024. / 1024.,
total_phys as f64 / 1024. / 1024.,
GLOBAL_ALLOCATOR.free_list
.load(Ordering::Relaxed) as f64 / 1024. / 1024.);
}
unsafe impl GlobalAlloc for GlobalAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Allocate memory from our free lists
let ptr = core!().free_list(layout).lock().pop();
// Update stats
self.num_allocs.fetch_add(1, Ordering::Relaxed);
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// Free the memory
core!().free_list(layout).lock().push(ptr);
// Update stats
self.num_frees.fetch_add(1, Ordering::Relaxed);
}
}
/// Allocation containing a physically contiguous allocation
pub struct PhysContig<T> {
/// Virtual address of the allocation
vaddr: VirtAddr,
/// Physical address of the allocation
paddr: PhysAddr,
/// Mark that this "holds" a `T`
_phantom: PhantomData<T>,
}
impl<T> PhysContig<T> {
/// Allocate physically contiguous memory large enough to hold `val` and
/// move `val` into it
pub fn new(val: T) -> PhysContig<T> {
assert!(size_of::<T>() > 0, "Cannot use ZST for PhysContig");
assert!(size_of::<T>() <= 4096, "Size too large for PhysContig");
unsafe {
// Allocate a 4 KiB page
let alloc = GLOBAL_ALLOCATOR.alloc(
Layout::from_size_align(4096, 4096).unwrap());
// Compute the physical address of this allocation
let paddr = PhysAddr(alloc as u64 - KERNEL_PHYS_WINDOW_BASE);
// Initialize the memory to `val`
core::ptr::write(alloc as *mut T, val);
// Create the `PhysContig` structure
PhysContig {
vaddr: VirtAddr(alloc as u64),
paddr: paddr,
_phantom: PhantomData,
}
}
}
/// Get the physical address of the allocation
pub fn phys_addr(&self) -> PhysAddr {
self.paddr
}
}
impl<T> Drop for PhysContig<T> {
fn drop(&mut self) {
unsafe {
GLOBAL_ALLOCATOR.dealloc(self.vaddr.0 as *mut u8,
Layout::from_size_align(4096, 4096).unwrap());
}
}
}
impl<T> Deref for PhysContig<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe {
&*(self.vaddr.0 as *const T)
}
}
}
impl<T> DerefMut for PhysContig<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe {
&mut *(self.vaddr.0 as *mut T)
}
}
}
/// Out-of-memory handler, we just panic
#[alloc_error_handler]
fn alloc_error(_layout: Layout) -> ! {
panic!("Out of memory");
} | /// Print the allocation statistics to the screen
pub fn print_alloc_stats() {
// Get total amount of physical memory | random_line_split |
mm.rs | //! The virtual and physical memory manager for the kernel
use core::marker::PhantomData;
use core::mem::size_of;
use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut};
use core::alloc::{Layout, GlobalAlloc};
use core::sync::atomic::{AtomicU64, AtomicPtr, Ordering};
use alloc::boxed::Box;
use alloc::collections::BTreeMap;
use crate::acpi::MAX_CORES;
use rangeset::RangeSet;
use boot_args::{KERNEL_PHYS_WINDOW_BASE, KERNEL_PHYS_WINDOW_SIZE};
use boot_args::KERNEL_VMEM_BASE;
use page_table::{PhysMem, PhysAddr, PageType, VirtAddr};
/// Table which is indexed by an APIC identifier to map to a physical range
/// which is local to it its NUMA node
static APIC_TO_MEMORY_RANGE: AtomicPtr<[Option<RangeSet>; MAX_CORES]> =
AtomicPtr::new(core::ptr::null_mut());
/// Get the preferred memory range for the currently running APIC. Returns
/// `None` if we have no valid APIC ID yet, or we do not have NUMA knowledge
/// of the current APIC ID
pub fn memory_range<'a>() -> Option<&'a RangeSet> {
// Check to see if the `APIC_TO_MEMORY_RANGE` has been initialized
let atmr = APIC_TO_MEMORY_RANGE.load(Ordering::SeqCst);
if atmr.is_null() {
return None;
}
// Cast the memory range structure to something we can access
let atmr = unsafe { &*atmr };
// Based on our current APIC ID look up the memory range
core!().apic_id().and_then(|x| atmr[x as usize].as_ref())
}
/// Establish the `APIC_TO_MEMORY_RANGE` global with the APIC IDs to their
/// corresponding NUMA-local memory regions
pub unsafe fn register_numa_nodes(apic_to_domain: BTreeMap<u32, u32>,
domain_to_mem: BTreeMap<u32, RangeSet>) {
// Create a heap-based database
let mut apic_mappings: Box<MaybeUninit<[Option<RangeSet>; MAX_CORES]>> =
Box::new_uninit();
// Initialize the heap based memory
for core in 0..MAX_CORES {
let foo = apic_mappings.as_mut_ptr() as *mut Option<RangeSet>;
core::ptr::write(foo.offset(core as isize), None);
}
// APIC mappings are now initialized
let mut apic_mappings = apic_mappings.assume_init();
// Go through each APIC to domain mapping
for (&apic, domain) in apic_to_domain.iter() {
apic_mappings[apic as usize] = domain_to_mem.get(domain)
.and_then(|&rs| {
Some(rs)
});
}
// Store the apic mapping database into the global!
APIC_TO_MEMORY_RANGE.store(Box::into_raw(apic_mappings), Ordering::SeqCst);
}
/// Find a free region of virtual memory that can hold `size` bytes and return
/// the virtual address
///
/// This is only valid for virtual requests for 4 KiB mappings
pub fn alloc_virt_addr_4k(size: u64) -> VirtAddr {
/// Base address for virtual allocations
static NEXT_FREE_VADDR: AtomicU64 = AtomicU64::new(KERNEL_VMEM_BASE);
/// Gap between virtual allocations
const GUARD_PAGE_SIZE: u64 = 32 * 1024;
assert!(size > 0 && (size & 0xfff) == 0,
"Invalid size for virtual region allocation");
// Compute the amount of virtual memory to reserve, including the guard
// size.
let reserve_size = GUARD_PAGE_SIZE.checked_add(size as u64)
.expect("Integer overflow on virtual region size");
// Get a new virtual region that is free
let ret = VirtAddr(
NEXT_FREE_VADDR.fetch_add(reserve_size, Ordering::SeqCst)
);
// If we cannot add the reserve size from the return value, then the
// virtual memory wrapped the 64-bit boundary
ret.0.checked_add(reserve_size)
.expect("Integer overflow on virtual address range");
ret
}
/// Gets access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys<'a>(paddr: PhysAddr, size: u64) -> &'a [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *const u8,
size as usize)
}
/// Gets mutable access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys_mut<'a>(paddr: PhysAddr, size: u64) -> &'a mut [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts_mut(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut u8,
size as usize)
}
/// Read a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::read_volatile`.
#[allow(dead_code)]
pub unsafe fn read_phys<T>(paddr: PhysAddr) -> T {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::read_volatile((KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T)
}
/// Write to a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::write_volatile`.
pub unsafe fn write_phys<T>(paddr: PhysAddr, val: T) {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on write_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::write_volatile(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T, val);
}
/// Metadata on a freed allocation
#[repr(C)]
struct FreeListNode {
/// Virtual address of the next `FreeListNode`
next: usize,
/// Number of free slots in `free_mem`
free_slots: usize,
/// Virtual addresses of free allocations
free_addrs: [*mut u8; 0],
}
/// A free list which holds free entries of `size` bytes in a semi-linked list
/// table thingy.
pub struct FreeList {
/// Pointer to the first entry in the free list
head: usize,
/// Size of allocations (in bytes) for this free list
size: usize,
}
impl FreeList {
/// Create a new, empty free list containing addresses to `size` byte
/// allocations
pub fn new(size: usize) -> Self {
// Ensure some properties of the free list size
assert!(size.count_ones() == 1,
"Free list size must be a power of two");
assert!(size >= size_of::<usize>(),
"Free list size must be at least pointer width");
FreeList { head: 0, size }
}
/// Get a address from the free list
pub unsafe fn pop(&mut self) -> *mut u8 {
// If the free list is empty
if self.head == 0 {
if self.size <= 4096 {
// Special case, if the allocation fits within a page, we can
// directly return virtual addresses to our physical memory
// map. This is significantly better for TLBs and caches than
// to create new page tables for allocating a new virtual
// address. Especially since we use large pages (if possible)
// to map in the physical map
// Get access to physical memory
let alc = {
let mut phys_mem =
core!().boot_args.free_memory_ref().lock();
let phys_mem = phys_mem.as_mut().unwrap();
// Allocate 4096 bytes of page aligned physical memory, we
// do bulk allocations here to improve performance and to
// decrease the amount of physical memory lost due to
// carving off alignment bytes
let alc = phys_mem.allocate_prefer(4096, 4096,
memory_range())
.expect("Failed to allocate physical memory") as u64;
// Update stats
GLOBAL_ALLOCATOR.free_physical.store(
phys_mem.sum().unwrap(),
Ordering::Relaxed);
alc
};
// Split up this allocation and free the segments
for offset in (0..4096).step_by(self.size) {
// Get the virtual address for this physical address
let vaddr = slice_phys_mut(
PhysAddr(alc + offset), self.size as u64).as_mut_ptr();
// Add this to the free list
self.push(vaddr);
}
} else {
// Allocation size exceeds a page, we must allocate new virtual
// memory to satisfy the allocation
// Allocate a virtual address to hold this allocation
let vaddr = alloc_virt_addr_4k(self.size as u64);
// Get access to physical memory
let mut pmem = PhysicalMemory;
// Get access to virtual memory
let mut page_table = core!().boot_args.page_table.lock();
let page_table = page_table.as_mut().unwrap();
// Map in the memory as RW
page_table.map(&mut pmem, vaddr, PageType::Page4K,
self.size as u64, true, true, false, false)
.expect("Failed to map RW memory");
// Return out the allocation
return vaddr.0 as *mut u8;
}
}
// We're about to pop from the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_sub(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// Basic linked list for super small allocations which can't hold
// our stack-based free list metadata
// Save the current head (our new allocation)
let alc = self.head as *mut FreeListNode;
// Set the head to the next node
self.head = (*alc).next;
alc as *mut u8
} else {
// Get access to the free list stack
let fl = &mut *(self.head as *mut FreeListNode);
// Check if there are any addresses on the stack
if fl.free_slots <
((self.size / core::mem::size_of::<usize>()) - 2) {
// Just grab the free entry
let alc =
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize);
// Update number of free slots
fl.free_slots += 1;
// Return the allocation
alc
} else {
// The free page stack is empty at this level, take the entire
// node and use it as the allocation
// Get the old head, will be our allocation
let alc = self.head;
// Update the head to point to the next entry
self.head = fl.next;
// Return out the allocation
alc as *mut u8
}
}
}
/// Put an allocation back onto the free list
pub unsafe fn push(&mut self, vaddr: *mut u8) {
// We're about to push to the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_add(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// If the free list is too small to contain our stack free list,
// then just directly use a linked list
// Write the old head into the newly freed `vaddr`
let vaddr = vaddr as *mut FreeListNode;
(*vaddr).next = self.head;
// Update the head
self.head = vaddr as usize;
} else |
}
}
/// A wrapper on a range set to allow implementing the `PhysMem` trait
pub struct PhysicalMemory;
impl PhysMem for PhysicalMemory {
unsafe fn translate(&mut self, paddr: PhysAddr, size: usize)
-> Option<*const u8> {
self.translate_mut(paddr, size).map(|x| x as *const u8)
}
unsafe fn translate_mut(&mut self, paddr: PhysAddr, size: usize)
-> Option<*mut u8> {
// Compute the ending physical address
let end = (size as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
})?;
// Make sure this physical address fits inside our window
if end >= KERNEL_PHYS_WINDOW_SIZE {
return None;
}
// Convert the physical address into linear mapping view address
Some((paddr.0 + KERNEL_PHYS_WINDOW_BASE) as *mut u8)
}
fn alloc_phys(&mut self, layout: Layout) -> Option<PhysAddr> {
if layout.size() <= 4096 && layout.align() <= layout.size() {
// Special case, just allocate directly from our free lists. Our
// free lists for allocations <= 4096 bytes directly map to the
// physical memory map, and are naturally aligned
unsafe {
let ptr = core!().free_list(layout).lock().pop();
Some(PhysAddr(ptr as u64 - KERNEL_PHYS_WINDOW_BASE))
}
} else {
// Get access to physical memory
let mut phys_mem = unsafe {
core!().boot_args.free_memory_ref().lock()
};
let phys_mem = phys_mem.as_mut()?;
// Could not satisfy allocation from free list, allocate
// directly from the physical memory pool
let alc = phys_mem.allocate_prefer(layout.size() as u64,
layout.align() as u64,
memory_range())?;
// Update stats
GLOBAL_ALLOCATOR.free_physical
.store(phys_mem.sum().unwrap(), Ordering::Relaxed);
Some(PhysAddr(alc as u64))
}
}
}
/// The global allocator for the bootloader, this just uses physical memory as
/// a backing and does not handle any fancy things like fragmentation. Use this
/// carefully.
#[global_allocator]
pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator {
num_allocs: AtomicU64::new(0),
num_frees: AtomicU64::new(0),
free_physical: AtomicU64::new(0),
free_list: AtomicU64::new(0),
};
/// Empty structure that we can implement `GlobalAlloc` for such that we can
/// use the `#[global_allocator]`
#[derive(Debug)]
pub struct GlobalAllocator {
/// Number of allocations performed
pub num_allocs: AtomicU64,
/// Number of frees performed
pub num_frees: AtomicU64,
/// Current number of free bytes in the physical memory pool, this only
/// ever decreases since we do not free back to physical memory
pub free_physical: AtomicU64,
/// Number of bytes sitting in free lists
pub free_list: AtomicU64,
}
/// Print the allocation statistics to the screen
pub fn print_alloc_stats() {
// Get total amount of physical memory
let total_phys = core!().boot_args
.total_physical_memory.load(Ordering::Relaxed);
// Get physical memory in use
let phys_inuse =
total_phys - GLOBAL_ALLOCATOR.free_physical.load(Ordering::Relaxed);
print!("Allocs {:8} | Frees {:8} | Physical {:10.2} MiB / {:10.2} MiB | \
Free List {:10.2} MiB\n",
GLOBAL_ALLOCATOR.num_allocs.load(Ordering::Relaxed),
GLOBAL_ALLOCATOR.num_frees.load(Ordering::Relaxed),
phys_inuse as f64 / 1024. / 1024.,
total_phys as f64 / 1024. / 1024.,
GLOBAL_ALLOCATOR.free_list
.load(Ordering::Relaxed) as f64 / 1024. / 1024.);
}
unsafe impl GlobalAlloc for GlobalAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Allocate memory from our free lists
let ptr = core!().free_list(layout).lock().pop();
// Update stats
self.num_allocs.fetch_add(1, Ordering::Relaxed);
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// Free the memory
core!().free_list(layout).lock().push(ptr);
// Update stats
self.num_frees.fetch_add(1, Ordering::Relaxed);
}
}
/// Allocation containing a physically contiguous allocation
pub struct PhysContig<T> {
/// Virtual address of the allocation
vaddr: VirtAddr,
/// Physical address of the allocation
paddr: PhysAddr,
/// Mark that this "holds" a `T`
_phantom: PhantomData<T>,
}
impl<T> PhysContig<T> {
/// Allocate physically contiguous memory large enough to hold `val` and
/// move `val` into it
pub fn new(val: T) -> PhysContig<T> {
assert!(size_of::<T>() > 0, "Cannot use ZST for PhysContig");
assert!(size_of::<T>() <= 4096, "Size too large for PhysContig");
unsafe {
// Allocate a 4 KiB page
let alloc = GLOBAL_ALLOCATOR.alloc(
Layout::from_size_align(4096, 4096).unwrap());
// Compute the physical address of this allocation
let paddr = PhysAddr(alloc as u64 - KERNEL_PHYS_WINDOW_BASE);
// Initialize the memory to `val`
core::ptr::write(alloc as *mut T, val);
// Create the `PhysContig` structure
PhysContig {
vaddr: VirtAddr(alloc as u64),
paddr: paddr,
_phantom: PhantomData,
}
}
}
/// Get the physical address of the allocation
pub fn phys_addr(&self) -> PhysAddr {
self.paddr
}
}
impl<T> Drop for PhysContig<T> {
fn drop(&mut self) {
unsafe {
GLOBAL_ALLOCATOR.dealloc(self.vaddr.0 as *mut u8,
Layout::from_size_align(4096, 4096).unwrap());
}
}
}
impl<T> Deref for PhysContig<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe {
&*(self.vaddr.0 as *const T)
}
}
}
impl<T> DerefMut for PhysContig<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe {
&mut *(self.vaddr.0 as *mut T)
}
}
}
/// Out-of-memory handler, we just panic
#[alloc_error_handler]
fn alloc_error(_layout: Layout) -> ! {
panic!("Out of memory");
}
| {
// Check if there is room for this allocation in the free stack,
// or if we need to create a new stack
if self.head == 0 ||
(*(self.head as *const FreeListNode)).free_slots == 0 {
// No free slots, create a new stack out of the freed vaddr
let vaddr = &mut *(vaddr as *mut FreeListNode);
// Set the number of free slots to the maximum size, as all
// entries are free in the stack
// This is the size of the allocation, minus the 2 `usize`
// header (in entries)
vaddr.free_slots =
(self.size / core::mem::size_of::<usize>()) - 2;
// Update the next to point to the old head
vaddr.next = self.head;
// Establish this as the new free list head
self.head = vaddr as *mut FreeListNode as usize;
} else {
// There's room in the current stack, just throw us in there
let fl = &mut *(self.head as *mut FreeListNode);
// Decrement the number of free slots
fl.free_slots -= 1;
// Store our newly freed virtual address into this slot
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize) =
vaddr;
}
} | conditional_block |
main.rs | use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
use log::{debug, info, error};
use winit::window::Window;
#[macro_use]
extern crate bitflags;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
// Changed
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct | {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?;
let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {position, ..} => {
self.mouse_pos.x = position.x;
self.mouse_pos.y = position.y;
// debug!("Mouse moved to point: {:?}", self.mouse_pos);
true
},
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
} => match state {
ElementState::Pressed => {
self.levers = self.levers | Levers::LEVER1;
true
},
ElementState::Released => {
self.levers = self.levers & !Levers::LEVER1;
true
},
},
_ => false
},
_ => false
}
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
let frame = self.swap_chain
.get_current_frame()?
.output;
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}
);
{
let mut render_pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[
// This is what [[location(0)]] in the fragment shader targets
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
depth_stencil_attachment: None,
}
);
let data = {
if self.levers.contains(Levers::LEVER1) {
(&self.second_index_buffer, self.second_num_indices)
} else {
(&self.index_buffer, self.num_indices)
}
};
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(data.0.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(
0..data.1,
0,
0..1
);
}
self.queue.submit(std::iter::once(encoder.finish()));
Ok(())
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
// env_logger::Builder::new()
// .filter_module(
// "learn_wgpu_book", log::LevelFilter::Debug
// )
// .init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)?;
let mut state = futures::executor::block_on(State::new(&window))?;
event_loop.run(move |event, _, control_flow|
match event {
Event::WindowEvent {
ref event,
window_id
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = handle_exit(ExitReason::CloseRequest),
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = handle_exit(ExitReason::Escape),
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size)
}
WindowEvent::ScaleFactorChanged {new_inner_size, ..} => {
// new_inner_size is &&mut so we have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {
state.update();
match state.render() {
Ok(_) => {},
// Recreate the swap chain if lost
Err(wgpu::SwapChainError::Lost) => state.resize(state.size),
// If the system is OOM, we should quit.
Err(wgpu::SwapChainError::OutOfMemory) => *control_flow = handle_exit(ExitReason::OOM),
// The other swap chain errors will be fixed in the next cycle.
Err(e) => error!("{:?}", e),
}
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => {}
}
);
}
enum ExitReason {
Escape,
CloseRequest,
OOM,
}
fn handle_exit(why: ExitReason) -> ControlFlow {
let reason = match why {
ExitReason::CloseRequest => "Close request received.",
ExitReason::Escape => "Escape received",
ExitReason::OOM => "System is OOM",
};
debug!("{}", reason);
info!("Bye");
ControlFlow::Exit
}
| State | identifier_name |
main.rs | use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
use log::{debug, info, error};
use winit::window::Window;
#[macro_use]
extern crate bitflags;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
// Changed
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?; | let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {position, ..} => {
self.mouse_pos.x = position.x;
self.mouse_pos.y = position.y;
// debug!("Mouse moved to point: {:?}", self.mouse_pos);
true
},
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
} => match state {
ElementState::Pressed => {
self.levers = self.levers | Levers::LEVER1;
true
},
ElementState::Released => {
self.levers = self.levers & !Levers::LEVER1;
true
},
},
_ => false
},
_ => false
}
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
let frame = self.swap_chain
.get_current_frame()?
.output;
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}
);
{
let mut render_pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[
// This is what [[location(0)]] in the fragment shader targets
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
depth_stencil_attachment: None,
}
);
let data = {
if self.levers.contains(Levers::LEVER1) {
(&self.second_index_buffer, self.second_num_indices)
} else {
(&self.index_buffer, self.num_indices)
}
};
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(data.0.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(
0..data.1,
0,
0..1
);
}
self.queue.submit(std::iter::once(encoder.finish()));
Ok(())
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
// env_logger::Builder::new()
// .filter_module(
// "learn_wgpu_book", log::LevelFilter::Debug
// )
// .init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)?;
let mut state = futures::executor::block_on(State::new(&window))?;
event_loop.run(move |event, _, control_flow|
match event {
Event::WindowEvent {
ref event,
window_id
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = handle_exit(ExitReason::CloseRequest),
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = handle_exit(ExitReason::Escape),
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size)
}
WindowEvent::ScaleFactorChanged {new_inner_size, ..} => {
// new_inner_size is &&mut so we have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {
state.update();
match state.render() {
Ok(_) => {},
// Recreate the swap chain if lost
Err(wgpu::SwapChainError::Lost) => state.resize(state.size),
// If the system is OOM, we should quit.
Err(wgpu::SwapChainError::OutOfMemory) => *control_flow = handle_exit(ExitReason::OOM),
// The other swap chain errors will be fixed in the next cycle.
Err(e) => error!("{:?}", e),
}
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => {}
}
);
}
enum ExitReason {
Escape,
CloseRequest,
OOM,
}
fn handle_exit(why: ExitReason) -> ControlFlow {
let reason = match why {
ExitReason::CloseRequest => "Close request received.",
ExitReason::Escape => "Escape received",
ExitReason::OOM => "System is OOM",
};
debug!("{}", reason);
info!("Bye");
ControlFlow::Exit
} | random_line_split | |
main.rs | use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
use log::{debug, info, error};
use winit::window::Window;
#[macro_use]
extern crate bitflags;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
// Changed
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?;
let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {position, ..} => {
self.mouse_pos.x = position.x;
self.mouse_pos.y = position.y;
// debug!("Mouse moved to point: {:?}", self.mouse_pos);
true
},
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
} => match state {
ElementState::Pressed => {
self.levers = self.levers | Levers::LEVER1;
true
},
ElementState::Released => {
self.levers = self.levers & !Levers::LEVER1;
true
},
},
_ => false
},
_ => false
}
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
let frame = self.swap_chain
.get_current_frame()?
.output;
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}
);
{
let mut render_pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[
// This is what [[location(0)]] in the fragment shader targets
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
depth_stencil_attachment: None,
}
);
let data = {
if self.levers.contains(Levers::LEVER1) {
(&self.second_index_buffer, self.second_num_indices)
} else |
};
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(data.0.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(
0..data.1,
0,
0..1
);
}
self.queue.submit(std::iter::once(encoder.finish()));
Ok(())
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
// env_logger::Builder::new()
// .filter_module(
// "learn_wgpu_book", log::LevelFilter::Debug
// )
// .init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)?;
let mut state = futures::executor::block_on(State::new(&window))?;
event_loop.run(move |event, _, control_flow|
match event {
Event::WindowEvent {
ref event,
window_id
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = handle_exit(ExitReason::CloseRequest),
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = handle_exit(ExitReason::Escape),
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size)
}
WindowEvent::ScaleFactorChanged {new_inner_size, ..} => {
// new_inner_size is &&mut so we have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {
state.update();
match state.render() {
Ok(_) => {},
// Recreate the swap chain if lost
Err(wgpu::SwapChainError::Lost) => state.resize(state.size),
// If the system is OOM, we should quit.
Err(wgpu::SwapChainError::OutOfMemory) => *control_flow = handle_exit(ExitReason::OOM),
// The other swap chain errors will be fixed in the next cycle.
Err(e) => error!("{:?}", e),
}
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => {}
}
);
}
enum ExitReason {
Escape,
CloseRequest,
OOM,
}
fn handle_exit(why: ExitReason) -> ControlFlow {
let reason = match why {
ExitReason::CloseRequest => "Close request received.",
ExitReason::Escape => "Escape received",
ExitReason::OOM => "System is OOM",
};
debug!("{}", reason);
info!("Bye");
ControlFlow::Exit
}
| {
(&self.index_buffer, self.num_indices)
} | conditional_block |
main.rs | use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
use log::{debug, info, error};
use winit::window::Window;
#[macro_use]
extern crate bitflags;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
// Changed
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?;
let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) |
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {position, ..} => {
self.mouse_pos.x = position.x;
self.mouse_pos.y = position.y;
// debug!("Mouse moved to point: {:?}", self.mouse_pos);
true
},
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
} => match state {
ElementState::Pressed => {
self.levers = self.levers | Levers::LEVER1;
true
},
ElementState::Released => {
self.levers = self.levers & !Levers::LEVER1;
true
},
},
_ => false
},
_ => false
}
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
let frame = self.swap_chain
.get_current_frame()?
.output;
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}
);
{
let mut render_pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[
// This is what [[location(0)]] in the fragment shader targets
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
depth_stencil_attachment: None,
}
);
let data = {
if self.levers.contains(Levers::LEVER1) {
(&self.second_index_buffer, self.second_num_indices)
} else {
(&self.index_buffer, self.num_indices)
}
};
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(data.0.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(
0..data.1,
0,
0..1
);
}
self.queue.submit(std::iter::once(encoder.finish()));
Ok(())
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
// env_logger::Builder::new()
// .filter_module(
// "learn_wgpu_book", log::LevelFilter::Debug
// )
// .init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)?;
let mut state = futures::executor::block_on(State::new(&window))?;
event_loop.run(move |event, _, control_flow|
match event {
Event::WindowEvent {
ref event,
window_id
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = handle_exit(ExitReason::CloseRequest),
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = handle_exit(ExitReason::Escape),
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size)
}
WindowEvent::ScaleFactorChanged {new_inner_size, ..} => {
// new_inner_size is &&mut so we have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {
state.update();
match state.render() {
Ok(_) => {},
// Recreate the swap chain if lost
Err(wgpu::SwapChainError::Lost) => state.resize(state.size),
// If the system is OOM, we should quit.
Err(wgpu::SwapChainError::OutOfMemory) => *control_flow = handle_exit(ExitReason::OOM),
// The other swap chain errors will be fixed in the next cycle.
Err(e) => error!("{:?}", e),
}
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => {}
}
);
}
enum ExitReason {
Escape,
CloseRequest,
OOM,
}
fn handle_exit(why: ExitReason) -> ControlFlow {
let reason = match why {
ExitReason::CloseRequest => "Close request received.",
ExitReason::Escape => "Escape received",
ExitReason::OOM => "System is OOM",
};
debug!("{}", reason);
info!("Bye");
ControlFlow::Exit
}
| {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
} | identifier_body |
PEATSAplugin.py | #!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
try:
from Plugins import Plugin
except:
from PEATDB.Plugins import Plugin
import os, types, copy, pickle
from Tkinter import *
import Pmw
import PEATSA.WebApp.Data
import PEATSA.WebApp.UtilityFunctions
import PEATSA.Core as Core
from PEATDB.Dialogs import MultipleValDialog
from PEATDB.Actions import DBActions
from PEATDB.TableModels import TableModel
from PEATDB.Tables import TableCanvas
import tkMessageBox, tkSimpleDialog, tkFileDialog
class PEATSAPlugin(Plugin):
"""Template GUI plugin for PEAT App"""
capabilities = ['gui']
requires = ['PEATSA']
menuentry = 'PEATSA Plugin'
gui_methods = {'fetchJob':'Fetch Job from Server',
'editConfigFile' : 'Configure Server',
'help':'Help',
'quit':'Close Window'}
buttonorder = ['createJobDialog','fetchJob','editConfigFile','help','quit']
about = 'This plugin allows you to call PEATSA'
calctypes = ['stability','binding','pka']
def main(self, parent=None, DB=None):
if parent == None:
if DB != None:
self.DB = DB
self.setupConnection()
else:
return
else:
self.parent = parent
self.DB = parent.DB
if self.DB == None:
self.displayNoDBWarning()
return
self._doFrame()
self.setupConnection()
print 'Updating jobs table..'
self.updateJobs()
return self
def setupConnection(self):
"""Set up connection"""
homepath = os.path.expanduser("~")
self.confpath = os.path.join(homepath, 'peatsa.conf')
if os.path.exists(self.confpath):
configuration = Core.Environment.Configuration(filename=self.confpath)
else:
configuration = Core.Environment.Configuration(searchDefaultLocations=False)
configuration.add_section('DATABASE')
configuration.set('DATABASE', 'database', 'DBSAInterface')
configuration.set('DATABASE', 'host', 'enzyme.ucd.ie')
configuration.set('DATABASE', 'user', 'peatdb')
configuration.set('DATABASE', 'password', '123')
configuration.writeToFile(self.confpath)
if self.parent != None:
tkMessageBox.showwarning("Connection Error",
'No PEATSA server configured, press configure server'
' to set a server, username and password.')
self.connect(configuration)
return
def _doFrame(self):
self.mainwin = self.parent.createChildFrame(width=460,title='PEATSA Plugin')
#self.mainwin = self.parent.create
methods = self._getmethods()
methods = [m for m in methods if m[0] in self.gui_methods.keys()]
l=Label(self.mainwin, text='PEATSA Interface')
l.pack(side=TOP,fill=BOTH)
self.tf=LabelFrame(self.mainwin,text='Project Calculations')
self.tf.pack(side=TOP,fill=BOTH,expand=1)
self.manageJobsButtons(self.mainwin)
self._createButtons(methods)
self.log = self.createLogWin(self.mainwin)
self.log.pack(side=TOP,fill=BOTH,expand=1)
self.stdout2Log()
self.mainwin.bind("<Destroy>", self.quit)
#self.parent.sidepane.bind("<Destroy>", self.test1)
return
def _createButtons(self, methods):
"""Dynamically create buttons for supplied methods, which is a tuple
of (method name, label)"""
mbutton=Menubutton(self.mainwin, text='Options', width=12,
borderwidth=2, relief=RIDGE,
activeforeground='red')
menu=Menu(mbutton,tearoff=0)
mbutton['menu']=menu
mbutton.pack(side=BOTTOM,fill=BOTH)
for m in methods:
menu.add_radiobutton(label=self.gui_methods[m[0]],
indicatoron=0,
command=m[1])
b=Button(self.mainwin,text='Create Calculation',command=self.createJobDialog)
b.pack(side=BOTTOM,fill=BOTH)
return
def updateJobsTable(self):
"""Show table for current jobs list"""
self.checkJobsDict()
jobdict = self.DB.meta.peatsa_jobs
M = TableModel()
#open job log from file
f=open('jobstates.log','r')
jl = pickle.load(f)
for j in jobdict:
jobid = jobdict[j]
try:
M.addRecord(j,state=jl[jobid]['State'],date=jl[jobid]['Date'])
except:
M.addRecord(j,state='Not in DB')
self.jobstable = TableCanvas(self.tf, model=M, height=100, editable=False)
self.jobstable.createTableFrame()
self.log.yview('moveto', 1)
f.close()
return
def manageJobsButtons(self, parent):
fr1 = Frame(parent)
Button(fr1,text='View Results',command=self.showAllResults,bg='#ccFFFF').pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
Button(fr1,text='Merge Results',command=self.mergeCurrent).pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
fr = Frame(parent)
c='#ADD8E6'
Button(fr,text='Show Details',command=self.viewDetails,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Manage Results',command=self.manageResults,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Remove',command=self.removeJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Resubmit',command=self.resubmitJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
fr.pack(fill=BOTH)
return
def createLogWin(self, parent):
log = Pmw.ScrolledText(parent,
borderframe=1,
labelpos = 'n',
label_text='Log',
usehullsize = 1,
hull_width = 800,
hull_height = 200,
text_wrap='word')
return log
def stdout2Log(self):
"""Redirect stdout to app control"""
sys.stdout = self
sys.stderr = self
return
def log2Stdout(self):
"""return to stdout"""
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
return
def write(self, txt):
"""Handle stdout if required"""
self.log.appendtext(txt)
self.log.update_idletasks()
return
def flush(self):
return
def connect(self, configuration):
"""Create connection"""
self.connection = PEATSA.WebApp.UtilityFunctions.ConnectionFromConfiguration(configuration)
self.jobManager = PEATSA.WebApp.Data.JobManager(self.connection)
self.jobManager.setJobStateLogging('jobstates.log',interval=60)
print '\nConnection to server made sucessfully.\n'
return
def createMutationList(self, filename=None):
self.mutationList = Core.Data.MutationListFile(create=False)
return
def fetchJob(self):
"""Get job from it's db ID and add to list"""
mpDlg = MultipleValDialog(title='Get Job',
initialvalues=('','my job1'),
labels=('ID','Your label',),
types=('string','string'),
parent=self.mainwin)
if mpDlg.result == True:
jobid = mpDlg.results[0]
name = mpDlg.results[1]
else:
return
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
if job != None:
print 'adding job id %s to list' %job.identification
self.storeJob(name, job)
self.updateJobs()
return
def writetempPDB(self,name=None,pdbfile='refprot.pdb'):
if name==None:
name = self.DB.meta.refprotein
pdblines = self.DB[name].Structure
#pdbfile = 'refprot.pdb'
fd=open(pdbfile,'w')
for line in pdblines:
fd.write(line)
fd.close()
return pdbfile
def getrefPDBName(self):
name = self.DB.meta.refprotein
if self.DB[name].has_key('pdbname'):
name = self.DB[name]['pdbname']
return name.split('.')[0]
else:
return ''
def createJobDialog(self):
"""Get details from user using dialog
required: structure, mutations, type of calc and a tag (optional)"""
def validatename(text):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return 1
if text in self.DB.meta.peatsa_jobs:
return -1
else:
return 1
def close():
jobdlg.destroy()
def loadmuts():
filename=tkFileDialog.askopenfilename(initialdir=os.getcwd(),
filetypes=[("All files","*")])
if filename:
mutlist.importfile(filename)
return
def loadmutsfromDB():
for p in self.DB.getRecs():
mut = self.DB.get(p).Mutations
if mut == None or mut=='':
continue
if type(mut) is types.StringType:
mutlist.appendtext(mut+'\n')
else:
mutstring = mut.getMutationString()
if mutstring != None:
mutlist.appendtext(mutstring+'\n')
return
def getstruct():
filename=tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("pdb","*.pdb"),("All files","*.*")])
pdbentry.setvalue(filename)
return
def getligand():
self.ligandfile = tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("mol2","*.mol2"),("All files","*.*")])
def submit():
#if calcmenu.getcurselection() == 'both':
# calcs = ['stability','binding']
if calcmenu.getcurselection() == 'pka':
calcs = ['scan']
else:
calcs = [calcmenu.getcurselection()]
mutationlist = mutlist.getvalue().split('\n')
mutationlist.remove('')
pdbfile=None; pdb = None
quality = mutqualentry.getvalue()
if not hasattr(self.DB.meta, 'refprotein') or self.DB.meta.refprotein == None:
tkMessageBox.showinfo('No ref protein',
'Set a reference (wt) protein first')
return
#if self.useref.get() == 1:
#we use ref pdb by default now
pdbfile = self.writetempPDB()
pdbname = self.getrefPDBName()
if len(mutationlist) == 0 or mutationlist==[u'']:
print 'mutation list is empty'
return
if hasattr(self.DB.meta,'peatsa_jobs') and nameentry.getvalue() in self.DB.meta.peatsa_jobs:
print 'job name already used'
return
name=nameentry.getvalue()
expcol = expcolmenu.getcurselection()
self.submitJob(name=name, pdbname=pdbname,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=mutationlist,
calcs=calcs, mutationquality=quality,
meta={'expcol':expcol,'pdbname':pdbname})
close()
jobdlg = Toplevel()
jobdlg.geometry('+220+220')
jobdlg.title('Create Calculation')
balloon = Pmw.Balloon(jobdlg)
nameentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Name:',
validate = validatename,
value = 'mycalc')
nameentry.pack(fill=BOTH,expand=1)
balloon.bind(nameentry, 'Calculation name can be anything, but should be unique')
expcols = ['']+self.DB.getSimpleFields()
expcolmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Exp. col:',
items = expcols,
initialitem = '',
menubutton_width = 8)
expcolmenu.pack(fill=BOTH,expand=1)
balloon.bind(expcolmenu, 'Field with experimental data to compare, optional')
calcmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Calculation Type:',
items = self.calctypes,
initialitem = 'stability',
menubutton_width = 8)
calcmenu.pack(fill=X,expand=1)
fr=Frame(jobdlg)
fr.pack(fill=X,expand=1)
mutqualentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Quality:',
validate = validatename,
value = '2.0')
mutqualentry.pack(fill=BOTH,expand=1)
Label(jobdlg,text='Using PDB: '+self.getrefPDBName()).pack(fill=BOTH,expand=1)
self.ligandfile=None
mutlist = Pmw.ScrolledText(jobdlg,
labelpos = 'n',
label_text='Mutations:',
usehullsize = 1,
hull_width = 200,
hull_height = 250,
text_wrap='word')
mutlist.pack(fill=BOTH,expand=1)
Button(jobdlg,text='Load Mutations from Project',command=loadmutsfromDB).pack(fill=X,expand=1)
Button(jobdlg,text='Load Mutations from File',command=loadmuts).pack(fill=X,expand=1)
balloon.bind(mutlist, 'Enter one mutation per line in the form\n A:0003:ALA or A3A')
f=Frame(jobdlg); f.pack(fill=X,expand=1)
Button(f,text='Submit',command=submit).pack(side=LEFT,fill=X,expand=1,pady=2)
Button(f,text='Cancel',command=close).pack(fill=X,expand=1,pady=2)
jobdlg.grab_set()
jobdlg.transient(self.parent)
self.parent.wait_window(jobdlg)
return
def submitJob(self, name='mycalc', pdbname=None, pdb=None, pdbfile=None, ligandfile=None,
mutations=[], calcs=['stability'], mutationquality='2.0', meta={}):
"""Submit job to server"""
if 'scan' in calcs and pdbname==None:
print 'You must provide pdb code for pKa calcs'
return
if pdb==None and pdbfile==None:
return
job = self.jobManager.createJob(pdbId=pdbname, calculations=calcs,
dataTable='Data', metadata=meta,
optionArgs={'--mutationQuality':mutationquality})
if pdb != None:
job.setStructure(pdb)
else:
job.setStructureFromFile(pdbfile)
if 'binding' in calcs:
job.setLigandFromFile(ligandfile)
self.mutationList = Core.Data.MutationListFile(filename='tempmutlist', create=True)
sets=[]
for code in mutations:
if code == '': continue
try:
sets.append(Core.Data.MutationSet(code))
except:
print 'mutation code %s incorrect' %code
for s in sets:
self.mutationList.addMutant(s, autoUpdate=False, ignoreDuplicates=True)
self.mutationList.removeDuplicates(autoUpdate=False)
job.setMutationListFile(self.mutationList)
job.setState('Ready')
self.jobManager.logJobStates('jobstates.log')
#add job to peat database
self.storeJob(name, job)
if self.parent != None:
username = self.parent.username
self.updateJobs()
else:
username = None
self.DB.commit(note='peatsa job',user=username)
print 'job submitted successfully'
return
def resubmitJob(self):
"""Resend a job based on new mutations in DB that are not in job already"""
job, name = self.getJob()
if job == None:
return
DB=self.DB
self.matrices = job.data.allMatrices()
for m in matrices:
matrix=matrices[m]
if matrix==None: return
muts = matrix.mutationCodes()
dbmuts = [DB.get(p).Mutations for p in DB.getRecs()]
newmuts = list(set(dbmuts) - set(muts))
print 'the following mutations in the project are not in the job: %s' %newmuts
'''self.submitJob(name=name,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=newmuts,
calcs=calcs, meta={'expcol':expcol}) '''
self.log.yview('moveto', 1)
return
def getJob(self, name=None):
"""Get job from name"""
if name == None:
name = self.jobstable.get_selectedRecordNames()[0]
if name == None:
return None, name
jobid = self.DB.meta.peatsa_jobs[name]
try:
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
except:
#print 'job not in database'
return None,name
return job, name
def removeJob(self):
"""Remove a job from the db"""
job, name = self.getJob()
answer = tkMessageBox.askyesno("Warning",'Remove this job?')
if answer == False:
return
try:
self.jobManager.deleteJob(job)
except:
print 'job not in database, removing from peat'
del self.DB.meta.peatsa_jobs[name]
self.DB.meta.__p__changed = 1
self.updateJobs()
return
def viewDetails(self, name=None):
job, name = self.getJob()
if job==None:
return
jobmeta = job.metadata()
print
print job.data
print 'details for job %s' %name
print 'job status:',job.state()
print 'submitted on ',job.date
if jobmeta.has_key('pdbname'):
print 'original pdb file:', jobmeta['pdbname']
print 'mutations:', len(job.mutationListFile().mutantList())
print '(this job has id %s)' %job.identification
if job.error() != None:
print 'The job had an error..'
print job.error()['ErrorDescription']
print job.error()['DetailedDescription']
print
self.log.yview('moveto', 1)
return
def addColoredText(self, st, tag, word, fg='black', bg='white'):
"""add a space to the end of the word"""
word = word + " "
st.insert('end', word)
end_index = st.index('end')
begin_index = "%s-%sc" % (end_index, len(word) + 1)
st.tag_add(tag, begin_index, end_index)
st.tag_config(tag, foreground=fg, background=bg)
return
def checkJobsDict(self):
"""Check jobs data structure exists"""
if not hasattr(self.DB.meta,'peatsa_jobs'):
from ZODB.PersistentMapping import PersistentMapping
self.DB.meta.peatsa_jobs = PersistentMapping()
def storeJob(self, name, job):
"""Store job to DB"""
self.checkJobsDict()
self.DB.meta.peatsa_jobs[name] = job.identification
return
def updateJobs(self):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return
self.updateJobsTable()
self.wait=self.mainwin.after(60000, self.updateJobs)
return
def mergeResults(self, job, colname, tablemodel):
"""Merge given job results to tablemodel"""
if job==None:
return
matrices = job.data.allMatrices()
if not colname:
return
nf={'Total':colname}
for m in matrices:
matrix = matrices[m]
if matrix == None: continue
M = self.mergeMatrix(matrix, tablemodel, fields=['Total'], newfields=nf)
return
def mergeCurrent(self):
"""Auto merge selected job results to main table
called from GUI """
job, name = self.getJob()
if job==None:
return
#get field name to use
colname = tkSimpleDialog.askstring("Column name?",
"Name for column:",
initialvalue=name+'_Predictions',
parent=self.mainwin)
M = self.parent.tablemodel
self.mergeResults(job, colname, M)
self.parent.updateTable()
#also send some meta data to peatsa_meta?
'''from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
cc,rmse = C.getStats(pre,exp)
data.append({'name':p,'rmse':rmse,'cc':cc}) '''
return
def manageResults(self, name=None):
"""Get the results back - we can send the matrix to the main peat
table or put results into a labbook sheet.
Also allow user to merge with an existing table"""
job, name = self.getJob(name)
if job.error() != None:
print 'job had an error, use view details'
elif job.state() == 'Finished':
self.showPEATSAResultsDialog(job, name)
else:
print 'Job is not finished yet.'
return
def editConfigFile(self):
"""Edit config file"""
from PEATDB.textFrame import textFrame
tf = textFrame(parent=self.mainwin,
title='PEATSA Conf file')
tf.load_from_file(self.confpath)
self.parent.wait_window(tf.frame)
#reconnect
configuration = Core.Environment.Configuration(filename=self.confpath)
self.connect(configuration)
return
def showPEATSAResultsDialog(self, job, name):
resdlg = Toplevel()
resdlg.geometry('600x450+300+200')
resdlg.title('PEATSA results '+name)
balloon = Pmw.Balloon(resdlg)
self.currname = name
body = Frame(resdlg)
resdlg.initial_focus = body
body.pack(fill=BOTH,expand=1,padx=5, pady=5)
self.matrices = job.data.allMatrices()
fr=Frame(body)
fr.grid(row=0,column=0,sticky='news',rowspan=2)
for m in self.matrices:
|
self.labboklist = self.parent.labbookSheetsSelector(body)
self.labboklist.grid(row=0,column=1,sticky='news')
bf=Frame(body)
bf.grid(row=1,column=1,sticky='ew')
b=Button(bf,text='Merge into main table', command=lambda: self.mergeTable(main=True))
b.pack(fill=X,expand=1)
balloon.bind(b,'Merge results into main DB table')
b=Button(bf,text='Merge into Selected', command=self.mergeTable)
b.pack(fill=X,expand=1)
balloon.bind(b,'Merge results into an existing labbook table by matching the mutations')
b=Button(bf,text='Create new table', command=self.send2Labbook)
b.pack(fill=X,expand=1)
balloon.bind(b,'Send results to a new sheet in the main labbook')
Button(bf,text='Save as CSV', command=self.saveCSV).pack(fill=X,expand=1)
body.columnconfigure(0,weight=1)
body.rowconfigure(0,weight=1)
return
def showMatrix(self, frame, matrix, label=''):
"""Show matrix in table"""
M = self.matrix2Table(matrix)
mtable = self.showTable(frame, M, label)
return mtable
def showTable(self, frame, model, label=''):
"""Show model in table"""
tf=LabelFrame(frame,text=label)
tf.pack(fill=BOTH,expand=1)
mtable = TableCanvas(tf, model=model, cellwidth=70,
editable=False)
mtable.createTableFrame()
return mtable
def mergeTable(self, main=False):
"""Send a matrix to the peat main table or labbook sheet
by merging matching mutations.
Requires that one field in the table stores compatible
mutant format supported by PEATSA"""
if main == False:
try:
name = self.labboklist.getcurselection()[0]
except:
print 'no name selected'
return
if main == True:
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None: continue
M = self.parent.tablemodel
M = self.mergeMatrix(matrix, M)
self.parent.updateTable()
else:
M = self.DB.getLabbookSheet(name)
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None: continue
M = self.mergeMatrix(matrix, M)
if M != None:
self.DB.createLabbookSheet(name, M)
self.parent.startLabbook('ALL')
return
def send2Labbook(self):
"""Send matrix to selected labbook"""
#get name
cols = ['']+self.DB.getSimpleFields()
DB=self.DB
mpDlg = MultipleValDialog(title='Send to Labbook',
initialvalues=(self.currname, cols),
labels=('table name','exp data column'),
types=('string','list'),
parent=self.mainwin)
if mpDlg.result == False:
return
name = mpDlg.results[0]
expcol = mpDlg.results[1]
M = DBActions.sendDB2Labbook(DB,recs=None,cols=['Mutations',expcol],name=name)
for m in self.matrices:
matrix = self.matrices[m]
if matrix != None:
M = self.mergeMatrix(matrix, M)
self.DB.createLabbookSheet(name, M)
self.parent.startLabbook('ALL')
return
def saveCSV(self):
"""Save matrix to csv"""
filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',
initialdir=os.getcwd(),
filetypes=[("csv","*.csv"),("All files","*.*")])
if not filename:
return
for m in self.matrices:
matrix = self.matrices[m]
if matrix != None:
c=matrix.csvRepresentation()
f=open(filename,'w')
f.write(c)
f.close()
return
def matrix2Table(self, matrix):
"""Creates a table model from a peatsa matrix"""
M = TableModel()
M.addColumn('Mutations')
fields = matrix.columnHeaders()
for f in fields:
M.addColumn(f)
i = matrix.indexOfColumnWithHeader('Mutations')
for row in matrix:
mutationSet = Core.Data.MutationSet(row[i])
code = '+'.join(mutationSet.mutationCodes(reduced=True))
M.addRow(code)
for f in fields:
j = matrix.indexOfColumnWithHeader(f)
if f == 'Mutations':
M.data[code]['Mutations'] = code
else:
M.data[code][f] = str(row[j])
return M
def mergeMatrix(self, matrix, model, fields=None, newfields=None):
"""Merge a peatsa matrix with a table, returns merged tablemodel
tablemodel: input tablemodel
fields: which fields from matrix should be included in merge, default all
newfields: a dict that can map matrix names to new col names
"""
M = self.matrix2Table(matrix)
if fields==None:
fields = M.columnNames
key = 'Mutations'
if not key in model.columnNames:
print 'this table has no mutations column, we cannot merge'
return
i = matrix.indexOfColumnWithHeader(key)
for row in model.reclist:
try:
mset1 = Core.Data.MutationSet(model.data[row][key])
except:
continue
for rec in M.reclist:
try:
mset2 = Core.Data.MutationSet(M.data[rec][key])
except:
continue
if mset1 == mset2:
#add this data to table
for f in fields:
if newfields!=None and newfields.has_key(f):
col = newfields[f]
else:
col = f
if not M.data[rec].has_key(f): continue
model.addColumn(col)
try:
model.data[row][col] = float(M.data[rec][f])
except:
model.data[row][col] = M.data[rec][f]
return model
def showAllResults(self):
"""Show results for single or multiple jobs together"""
names = self.jobstable.get_selectedRecordNames()
if len(names)==1:
ax,mh,x,y=self.showResults()
else:
tx=[]; ty=[]
import pylab as plt
f=plt.figure(figsize=(8,8))
ax=f.add_subplot(111)
for n in names:
a,mh,x,y = self.showResults(n,showtable=False, ax=ax,stats=False)
tx.extend(x)
ty.extend(y)
ax.legend()
#add stats for summary
from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
C.addStats(ax,tx,ty)
f.show()
return
def showResults(self, name=None, showtable=True, ax=None, stats=True):
"""Show results with correlation plot from selected job"""
job, name = self.getJob(name)
if job == None:
print 'job not in DB'
return
if job.state() != 'Finished':
print 'job not finished'
return
self.matrices = job.data.allMatrices()
#print self.matrices['ModellingResults'].csvRepresentation()
jobmeta = job.metadata()
cols = self.DB.getSimpleFields()
expcol = None
expdata = None
#print jobmeta
if jobmeta.has_key('expcol'):
expcol = jobmeta['expcol']
if expcol not in cols and jobmeta.has_key('project'):
#we may have stored the exp data in another project
prjdata = jobmeta['project']
print 'trying to loading exp data from external project(s)'
from PEATDB.Base import PDatabase
from PEATTables import PEATTableModel
tmpdb = PDatabase(**prjdata)
print tmpdb
S = PEATTableModel(tmpdb)
expdata = S.simpleCopy(include=['Mutations'])
print expdata
#if exp column not known then ask user
if expcol == '' or expcol == None:
mpDlg = MultipleValDialog(title='Select Experimental Data',
initialvalues=[cols],
labels=['exp data column:'],
types=['list'],
parent=self.mainwin)
if mpDlg.result == True:
expcol = mpDlg.results[0]
else:
return
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None or not 'Total' in matrix.columnHeaders():
continue
ax,mh,x,y = self.plotMerged(matrix, expcol, expdata, m,
showtable, ax, name, stats)
#need to add this for mousehandler to work.. hack
'''from Correlation import MouseHandler
mh = MouseHandler(ax, labels=expcol, key='Mutations')
mh.connect()'''
return ax,mh,x,y
def plotMerged(self, matrix, expcol, expdata=None,
title='', showtable=True, ax=None, name=None,
stats=True):
"""Merge a set of exp vals with predictions and plot"""
if expdata==None:
expdata = self.parent.tablemodel.simpleCopy(include=['Mutations'])
merged = self.mergeMatrix(matrix, expdata)
x,y,names,muts = merged.getColumns(['Total',expcol,'name','Mutations'],allowempty=False)
from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
muts = ['mutation: '+i for i in muts]
labels = zip(names, muts)
ax,frame,mh = C.plotCorrelation(x,y,labels,title=title,ylabel=expcol,
ax=ax,plotname=name,stats=stats,err=4)
x=[round(float(i),2) for i in x]
y=[round(float(i),2) for i in y]
if showtable == True:
table = self.showTable(frame, merged)
mh.table = table
return ax,mh,x,y
def test(self):
job, name = self.getJob('myjob')
if job.error() != None or job.state() != 'Finished':
return
stabmatrix = job.data.stabilityResults
L = self.DB.getLabbookSheet('myjob')
L = self.mergeMatrix(stabmatrix, L, fields=['name'])
print L.columnNames
#L1 = self.DB.getLabbookSheet('myjob3')
#L.merge(L1)
return
def displayNoDBWarning(self):
"""Warn user that no DB is present"""
tkMessageBox.showwarning("Cannot launch plugin",
'No Database is currently open. '
'You should first open a project.')
return
def help(self):
import webbrowser
link='http://enzyme.ucd.ie/main/index.php/PEAT_SA'
webbrowser.open(link,autoraise=1)
return
def quit(self, evt=None):
"""We MUST stop the jobManager"""
self.log2Stdout()
self.jobManager.stopLogging()
self.mainwin.destroy()
print 'closing plugin'
return
def main():
import os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="file",
help="Open a local db")
opts, remainder = parser.parse_args()
#test
if opts.file != None and os.path.exists(opts.file):
path=os.path.abspath(opts.file)
from PEATDB.Base import PDatabase
DB = PDatabase(local=path)
P = PEATSAPlugin()
P.main(DB=DB)
P.test()
if __name__ == '__main__':
main()
| if self.matrices[m] != None:
self.showMatrix(fr,self.matrices[m], m) | conditional_block |
PEATSAplugin.py | #!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
try:
from Plugins import Plugin
except:
from PEATDB.Plugins import Plugin
import os, types, copy, pickle
from Tkinter import *
import Pmw
import PEATSA.WebApp.Data
import PEATSA.WebApp.UtilityFunctions
import PEATSA.Core as Core
from PEATDB.Dialogs import MultipleValDialog
from PEATDB.Actions import DBActions
from PEATDB.TableModels import TableModel
from PEATDB.Tables import TableCanvas
import tkMessageBox, tkSimpleDialog, tkFileDialog
class PEATSAPlugin(Plugin):
"""Template GUI plugin for PEAT App"""
capabilities = ['gui']
requires = ['PEATSA']
menuentry = 'PEATSA Plugin'
gui_methods = {'fetchJob':'Fetch Job from Server',
'editConfigFile' : 'Configure Server',
'help':'Help',
'quit':'Close Window'}
buttonorder = ['createJobDialog','fetchJob','editConfigFile','help','quit']
about = 'This plugin allows you to call PEATSA'
calctypes = ['stability','binding','pka']
def main(self, parent=None, DB=None):
if parent == None:
if DB != None:
self.DB = DB
self.setupConnection()
else:
return
else:
self.parent = parent
self.DB = parent.DB
if self.DB == None:
self.displayNoDBWarning()
return
self._doFrame()
self.setupConnection()
print 'Updating jobs table..'
self.updateJobs()
return self
def setupConnection(self):
"""Set up connection"""
homepath = os.path.expanduser("~")
self.confpath = os.path.join(homepath, 'peatsa.conf')
if os.path.exists(self.confpath):
configuration = Core.Environment.Configuration(filename=self.confpath)
else:
configuration = Core.Environment.Configuration(searchDefaultLocations=False)
configuration.add_section('DATABASE')
configuration.set('DATABASE', 'database', 'DBSAInterface')
configuration.set('DATABASE', 'host', 'enzyme.ucd.ie')
configuration.set('DATABASE', 'user', 'peatdb')
configuration.set('DATABASE', 'password', '123')
configuration.writeToFile(self.confpath)
if self.parent != None:
tkMessageBox.showwarning("Connection Error",
'No PEATSA server configured, press configure server'
' to set a server, username and password.')
self.connect(configuration)
return
def _doFrame(self):
self.mainwin = self.parent.createChildFrame(width=460,title='PEATSA Plugin')
#self.mainwin = self.parent.create
methods = self._getmethods()
methods = [m for m in methods if m[0] in self.gui_methods.keys()]
l=Label(self.mainwin, text='PEATSA Interface')
l.pack(side=TOP,fill=BOTH)
self.tf=LabelFrame(self.mainwin,text='Project Calculations')
self.tf.pack(side=TOP,fill=BOTH,expand=1)
self.manageJobsButtons(self.mainwin)
self._createButtons(methods)
self.log = self.createLogWin(self.mainwin)
self.log.pack(side=TOP,fill=BOTH,expand=1)
self.stdout2Log()
self.mainwin.bind("<Destroy>", self.quit)
#self.parent.sidepane.bind("<Destroy>", self.test1)
return
def _createButtons(self, methods):
"""Dynamically create buttons for supplied methods, which is a tuple
of (method name, label)"""
mbutton=Menubutton(self.mainwin, text='Options', width=12,
borderwidth=2, relief=RIDGE,
activeforeground='red')
menu=Menu(mbutton,tearoff=0)
mbutton['menu']=menu
mbutton.pack(side=BOTTOM,fill=BOTH)
for m in methods:
menu.add_radiobutton(label=self.gui_methods[m[0]],
indicatoron=0,
command=m[1])
b=Button(self.mainwin,text='Create Calculation',command=self.createJobDialog)
b.pack(side=BOTTOM,fill=BOTH)
return
def updateJobsTable(self):
"""Show table for current jobs list"""
self.checkJobsDict()
jobdict = self.DB.meta.peatsa_jobs
M = TableModel()
#open job log from file
f=open('jobstates.log','r')
jl = pickle.load(f)
for j in jobdict:
jobid = jobdict[j]
try:
M.addRecord(j,state=jl[jobid]['State'],date=jl[jobid]['Date'])
except:
M.addRecord(j,state='Not in DB')
self.jobstable = TableCanvas(self.tf, model=M, height=100, editable=False)
self.jobstable.createTableFrame()
self.log.yview('moveto', 1)
f.close()
return
def manageJobsButtons(self, parent):
fr1 = Frame(parent)
Button(fr1,text='View Results',command=self.showAllResults,bg='#ccFFFF').pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
Button(fr1,text='Merge Results',command=self.mergeCurrent).pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
fr = Frame(parent)
c='#ADD8E6'
Button(fr,text='Show Details',command=self.viewDetails,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Manage Results',command=self.manageResults,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Remove',command=self.removeJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Resubmit',command=self.resubmitJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
fr.pack(fill=BOTH)
return
def createLogWin(self, parent):
log = Pmw.ScrolledText(parent,
borderframe=1,
labelpos = 'n',
label_text='Log',
usehullsize = 1,
hull_width = 800,
hull_height = 200,
text_wrap='word')
return log
def stdout2Log(self):
"""Redirect stdout to app control"""
sys.stdout = self
sys.stderr = self
return
def log2Stdout(self):
"""return to stdout"""
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
return
def write(self, txt):
"""Handle stdout if required"""
self.log.appendtext(txt)
self.log.update_idletasks()
return
def flush(self):
return
def connect(self, configuration):
"""Create connection"""
self.connection = PEATSA.WebApp.UtilityFunctions.ConnectionFromConfiguration(configuration)
self.jobManager = PEATSA.WebApp.Data.JobManager(self.connection)
self.jobManager.setJobStateLogging('jobstates.log',interval=60)
print '\nConnection to server made sucessfully.\n'
return
def createMutationList(self, filename=None):
self.mutationList = Core.Data.MutationListFile(create=False)
return
def fetchJob(self):
"""Get job from it's db ID and add to list"""
mpDlg = MultipleValDialog(title='Get Job',
initialvalues=('','my job1'),
labels=('ID','Your label',),
types=('string','string'),
parent=self.mainwin)
if mpDlg.result == True:
jobid = mpDlg.results[0]
name = mpDlg.results[1]
else:
return
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
if job != None:
print 'adding job id %s to list' %job.identification
self.storeJob(name, job)
self.updateJobs()
return
def writetempPDB(self,name=None,pdbfile='refprot.pdb'):
if name==None:
name = self.DB.meta.refprotein
pdblines = self.DB[name].Structure
#pdbfile = 'refprot.pdb'
fd=open(pdbfile,'w')
for line in pdblines:
fd.write(line)
fd.close()
return pdbfile
def getrefPDBName(self):
name = self.DB.meta.refprotein
if self.DB[name].has_key('pdbname'):
name = self.DB[name]['pdbname']
return name.split('.')[0]
else:
return ''
def createJobDialog(self):
"""Get details from user using dialog
required: structure, mutations, type of calc and a tag (optional)"""
def | (text):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return 1
if text in self.DB.meta.peatsa_jobs:
return -1
else:
return 1
def close():
jobdlg.destroy()
def loadmuts():
filename=tkFileDialog.askopenfilename(initialdir=os.getcwd(),
filetypes=[("All files","*")])
if filename:
mutlist.importfile(filename)
return
def loadmutsfromDB():
for p in self.DB.getRecs():
mut = self.DB.get(p).Mutations
if mut == None or mut=='':
continue
if type(mut) is types.StringType:
mutlist.appendtext(mut+'\n')
else:
mutstring = mut.getMutationString()
if mutstring != None:
mutlist.appendtext(mutstring+'\n')
return
def getstruct():
filename=tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("pdb","*.pdb"),("All files","*.*")])
pdbentry.setvalue(filename)
return
def getligand():
self.ligandfile = tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("mol2","*.mol2"),("All files","*.*")])
def submit():
#if calcmenu.getcurselection() == 'both':
# calcs = ['stability','binding']
if calcmenu.getcurselection() == 'pka':
calcs = ['scan']
else:
calcs = [calcmenu.getcurselection()]
mutationlist = mutlist.getvalue().split('\n')
mutationlist.remove('')
pdbfile=None; pdb = None
quality = mutqualentry.getvalue()
if not hasattr(self.DB.meta, 'refprotein') or self.DB.meta.refprotein == None:
tkMessageBox.showinfo('No ref protein',
'Set a reference (wt) protein first')
return
#if self.useref.get() == 1:
#we use ref pdb by default now
pdbfile = self.writetempPDB()
pdbname = self.getrefPDBName()
if len(mutationlist) == 0 or mutationlist==[u'']:
print 'mutation list is empty'
return
if hasattr(self.DB.meta,'peatsa_jobs') and nameentry.getvalue() in self.DB.meta.peatsa_jobs:
print 'job name already used'
return
name=nameentry.getvalue()
expcol = expcolmenu.getcurselection()
self.submitJob(name=name, pdbname=pdbname,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=mutationlist,
calcs=calcs, mutationquality=quality,
meta={'expcol':expcol,'pdbname':pdbname})
close()
jobdlg = Toplevel()
jobdlg.geometry('+220+220')
jobdlg.title('Create Calculation')
balloon = Pmw.Balloon(jobdlg)
nameentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Name:',
validate = validatename,
value = 'mycalc')
nameentry.pack(fill=BOTH,expand=1)
balloon.bind(nameentry, 'Calculation name can be anything, but should be unique')
expcols = ['']+self.DB.getSimpleFields()
expcolmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Exp. col:',
items = expcols,
initialitem = '',
menubutton_width = 8)
expcolmenu.pack(fill=BOTH,expand=1)
balloon.bind(expcolmenu, 'Field with experimental data to compare, optional')
calcmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Calculation Type:',
items = self.calctypes,
initialitem = 'stability',
menubutton_width = 8)
calcmenu.pack(fill=X,expand=1)
fr=Frame(jobdlg)
fr.pack(fill=X,expand=1)
mutqualentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Quality:',
validate = validatename,
value = '2.0')
mutqualentry.pack(fill=BOTH,expand=1)
Label(jobdlg,text='Using PDB: '+self.getrefPDBName()).pack(fill=BOTH,expand=1)
self.ligandfile=None
mutlist = Pmw.ScrolledText(jobdlg,
labelpos = 'n',
label_text='Mutations:',
usehullsize = 1,
hull_width = 200,
hull_height = 250,
text_wrap='word')
mutlist.pack(fill=BOTH,expand=1)
Button(jobdlg,text='Load Mutations from Project',command=loadmutsfromDB).pack(fill=X,expand=1)
Button(jobdlg,text='Load Mutations from File',command=loadmuts).pack(fill=X,expand=1)
balloon.bind(mutlist, 'Enter one mutation per line in the form\n A:0003:ALA or A3A')
f=Frame(jobdlg); f.pack(fill=X,expand=1)
Button(f,text='Submit',command=submit).pack(side=LEFT,fill=X,expand=1,pady=2)
Button(f,text='Cancel',command=close).pack(fill=X,expand=1,pady=2)
jobdlg.grab_set()
jobdlg.transient(self.parent)
self.parent.wait_window(jobdlg)
return
def submitJob(self, name='mycalc', pdbname=None, pdb=None, pdbfile=None, ligandfile=None,
mutations=[], calcs=['stability'], mutationquality='2.0', meta={}):
"""Submit job to server"""
if 'scan' in calcs and pdbname==None:
print 'You must provide pdb code for pKa calcs'
return
if pdb==None and pdbfile==None:
return
job = self.jobManager.createJob(pdbId=pdbname, calculations=calcs,
dataTable='Data', metadata=meta,
optionArgs={'--mutationQuality':mutationquality})
if pdb != None:
job.setStructure(pdb)
else:
job.setStructureFromFile(pdbfile)
if 'binding' in calcs:
job.setLigandFromFile(ligandfile)
self.mutationList = Core.Data.MutationListFile(filename='tempmutlist', create=True)
sets=[]
for code in mutations:
if code == '': continue
try:
sets.append(Core.Data.MutationSet(code))
except:
print 'mutation code %s incorrect' %code
for s in sets:
self.mutationList.addMutant(s, autoUpdate=False, ignoreDuplicates=True)
self.mutationList.removeDuplicates(autoUpdate=False)
job.setMutationListFile(self.mutationList)
job.setState('Ready')
self.jobManager.logJobStates('jobstates.log')
#add job to peat database
self.storeJob(name, job)
if self.parent != None:
username = self.parent.username
self.updateJobs()
else:
username = None
self.DB.commit(note='peatsa job',user=username)
print 'job submitted successfully'
return
def resubmitJob(self):
"""Resend a job based on new mutations in DB that are not in job already"""
job, name = self.getJob()
if job == None:
return
DB=self.DB
self.matrices = job.data.allMatrices()
for m in matrices:
matrix=matrices[m]
if matrix==None: return
muts = matrix.mutationCodes()
dbmuts = [DB.get(p).Mutations for p in DB.getRecs()]
newmuts = list(set(dbmuts) - set(muts))
print 'the following mutations in the project are not in the job: %s' %newmuts
'''self.submitJob(name=name,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=newmuts,
calcs=calcs, meta={'expcol':expcol}) '''
self.log.yview('moveto', 1)
return
def getJob(self, name=None):
"""Get job from name"""
if name == None:
name = self.jobstable.get_selectedRecordNames()[0]
if name == None:
return None, name
jobid = self.DB.meta.peatsa_jobs[name]
try:
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
except:
#print 'job not in database'
return None,name
return job, name
def removeJob(self):
"""Remove a job from the db"""
job, name = self.getJob()
answer = tkMessageBox.askyesno("Warning",'Remove this job?')
if answer == False:
return
try:
self.jobManager.deleteJob(job)
except:
print 'job not in database, removing from peat'
del self.DB.meta.peatsa_jobs[name]
self.DB.meta.__p__changed = 1
self.updateJobs()
return
def viewDetails(self, name=None):
job, name = self.getJob()
if job==None:
return
jobmeta = job.metadata()
print
print job.data
print 'details for job %s' %name
print 'job status:',job.state()
print 'submitted on ',job.date
if jobmeta.has_key('pdbname'):
print 'original pdb file:', jobmeta['pdbname']
print 'mutations:', len(job.mutationListFile().mutantList())
print '(this job has id %s)' %job.identification
if job.error() != None:
print 'The job had an error..'
print job.error()['ErrorDescription']
print job.error()['DetailedDescription']
print
self.log.yview('moveto', 1)
return
def addColoredText(self, st, tag, word, fg='black', bg='white'):
"""add a space to the end of the word"""
word = word + " "
st.insert('end', word)
end_index = st.index('end')
begin_index = "%s-%sc" % (end_index, len(word) + 1)
st.tag_add(tag, begin_index, end_index)
st.tag_config(tag, foreground=fg, background=bg)
return
def checkJobsDict(self):
"""Check jobs data structure exists"""
if not hasattr(self.DB.meta,'peatsa_jobs'):
from ZODB.PersistentMapping import PersistentMapping
self.DB.meta.peatsa_jobs = PersistentMapping()
def storeJob(self, name, job):
"""Store job to DB"""
self.checkJobsDict()
self.DB.meta.peatsa_jobs[name] = job.identification
return
def updateJobs(self):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return
self.updateJobsTable()
self.wait=self.mainwin.after(60000, self.updateJobs)
return
def mergeResults(self, job, colname, tablemodel):
"""Merge given job results to tablemodel"""
if job==None:
return
matrices = job.data.allMatrices()
if not colname:
return
nf={'Total':colname}
for m in matrices:
matrix = matrices[m]
if matrix == None: continue
M = self.mergeMatrix(matrix, tablemodel, fields=['Total'], newfields=nf)
return
def mergeCurrent(self):
"""Auto merge selected job results to main table
called from GUI """
job, name = self.getJob()
if job==None:
return
#get field name to use
colname = tkSimpleDialog.askstring("Column name?",
"Name for column:",
initialvalue=name+'_Predictions',
parent=self.mainwin)
M = self.parent.tablemodel
self.mergeResults(job, colname, M)
self.parent.updateTable()
#also send some meta data to peatsa_meta?
'''from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
cc,rmse = C.getStats(pre,exp)
data.append({'name':p,'rmse':rmse,'cc':cc}) '''
return
def manageResults(self, name=None):
"""Get the results back - we can send the matrix to the main peat
table or put results into a labbook sheet.
Also allow user to merge with an existing table"""
job, name = self.getJob(name)
if job.error() != None:
print 'job had an error, use view details'
elif job.state() == 'Finished':
self.showPEATSAResultsDialog(job, name)
else:
print 'Job is not finished yet.'
return
def editConfigFile(self):
"""Edit config file"""
from PEATDB.textFrame import textFrame
tf = textFrame(parent=self.mainwin,
title='PEATSA Conf file')
tf.load_from_file(self.confpath)
self.parent.wait_window(tf.frame)
#reconnect
configuration = Core.Environment.Configuration(filename=self.confpath)
self.connect(configuration)
return
def showPEATSAResultsDialog(self, job, name):
resdlg = Toplevel()
resdlg.geometry('600x450+300+200')
resdlg.title('PEATSA results '+name)
balloon = Pmw.Balloon(resdlg)
self.currname = name
body = Frame(resdlg)
resdlg.initial_focus = body
body.pack(fill=BOTH,expand=1,padx=5, pady=5)
self.matrices = job.data.allMatrices()
fr=Frame(body)
fr.grid(row=0,column=0,sticky='news',rowspan=2)
for m in self.matrices:
if self.matrices[m] != None:
self.showMatrix(fr,self.matrices[m], m)
self.labboklist = self.parent.labbookSheetsSelector(body)
self.labboklist.grid(row=0,column=1,sticky='news')
bf=Frame(body)
bf.grid(row=1,column=1,sticky='ew')
b=Button(bf,text='Merge into main table', command=lambda: self.mergeTable(main=True))
b.pack(fill=X,expand=1)
balloon.bind(b,'Merge results into main DB table')
b=Button(bf,text='Merge into Selected', command=self.mergeTable)
b.pack(fill=X,expand=1)
balloon.bind(b,'Merge results into an existing labbook table by matching the mutations')
b=Button(bf,text='Create new table', command=self.send2Labbook)
b.pack(fill=X,expand=1)
balloon.bind(b,'Send results to a new sheet in the main labbook')
Button(bf,text='Save as CSV', command=self.saveCSV).pack(fill=X,expand=1)
body.columnconfigure(0,weight=1)
body.rowconfigure(0,weight=1)
return
def showMatrix(self, frame, matrix, label=''):
"""Show matrix in table"""
M = self.matrix2Table(matrix)
mtable = self.showTable(frame, M, label)
return mtable
def showTable(self, frame, model, label=''):
"""Show model in table"""
tf=LabelFrame(frame,text=label)
tf.pack(fill=BOTH,expand=1)
mtable = TableCanvas(tf, model=model, cellwidth=70,
editable=False)
mtable.createTableFrame()
return mtable
def mergeTable(self, main=False):
"""Send a matrix to the peat main table or labbook sheet
by merging matching mutations.
Requires that one field in the table stores compatible
mutant format supported by PEATSA"""
if main == False:
try:
name = self.labboklist.getcurselection()[0]
except:
print 'no name selected'
return
if main == True:
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None: continue
M = self.parent.tablemodel
M = self.mergeMatrix(matrix, M)
self.parent.updateTable()
else:
M = self.DB.getLabbookSheet(name)
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None: continue
M = self.mergeMatrix(matrix, M)
if M != None:
self.DB.createLabbookSheet(name, M)
self.parent.startLabbook('ALL')
return
def send2Labbook(self):
"""Send matrix to selected labbook"""
#get name
cols = ['']+self.DB.getSimpleFields()
DB=self.DB
mpDlg = MultipleValDialog(title='Send to Labbook',
initialvalues=(self.currname, cols),
labels=('table name','exp data column'),
types=('string','list'),
parent=self.mainwin)
if mpDlg.result == False:
return
name = mpDlg.results[0]
expcol = mpDlg.results[1]
M = DBActions.sendDB2Labbook(DB,recs=None,cols=['Mutations',expcol],name=name)
for m in self.matrices:
matrix = self.matrices[m]
if matrix != None:
M = self.mergeMatrix(matrix, M)
self.DB.createLabbookSheet(name, M)
self.parent.startLabbook('ALL')
return
def saveCSV(self):
"""Save matrix to csv"""
filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',
initialdir=os.getcwd(),
filetypes=[("csv","*.csv"),("All files","*.*")])
if not filename:
return
for m in self.matrices:
matrix = self.matrices[m]
if matrix != None:
c=matrix.csvRepresentation()
f=open(filename,'w')
f.write(c)
f.close()
return
def matrix2Table(self, matrix):
"""Creates a table model from a peatsa matrix"""
M = TableModel()
M.addColumn('Mutations')
fields = matrix.columnHeaders()
for f in fields:
M.addColumn(f)
i = matrix.indexOfColumnWithHeader('Mutations')
for row in matrix:
mutationSet = Core.Data.MutationSet(row[i])
code = '+'.join(mutationSet.mutationCodes(reduced=True))
M.addRow(code)
for f in fields:
j = matrix.indexOfColumnWithHeader(f)
if f == 'Mutations':
M.data[code]['Mutations'] = code
else:
M.data[code][f] = str(row[j])
return M
def mergeMatrix(self, matrix, model, fields=None, newfields=None):
"""Merge a peatsa matrix with a table, returns merged tablemodel
tablemodel: input tablemodel
fields: which fields from matrix should be included in merge, default all
newfields: a dict that can map matrix names to new col names
"""
M = self.matrix2Table(matrix)
if fields==None:
fields = M.columnNames
key = 'Mutations'
if not key in model.columnNames:
print 'this table has no mutations column, we cannot merge'
return
i = matrix.indexOfColumnWithHeader(key)
for row in model.reclist:
try:
mset1 = Core.Data.MutationSet(model.data[row][key])
except:
continue
for rec in M.reclist:
try:
mset2 = Core.Data.MutationSet(M.data[rec][key])
except:
continue
if mset1 == mset2:
#add this data to table
for f in fields:
if newfields!=None and newfields.has_key(f):
col = newfields[f]
else:
col = f
if not M.data[rec].has_key(f): continue
model.addColumn(col)
try:
model.data[row][col] = float(M.data[rec][f])
except:
model.data[row][col] = M.data[rec][f]
return model
def showAllResults(self):
"""Show results for single or multiple jobs together"""
names = self.jobstable.get_selectedRecordNames()
if len(names)==1:
ax,mh,x,y=self.showResults()
else:
tx=[]; ty=[]
import pylab as plt
f=plt.figure(figsize=(8,8))
ax=f.add_subplot(111)
for n in names:
a,mh,x,y = self.showResults(n,showtable=False, ax=ax,stats=False)
tx.extend(x)
ty.extend(y)
ax.legend()
#add stats for summary
from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
C.addStats(ax,tx,ty)
f.show()
return
def showResults(self, name=None, showtable=True, ax=None, stats=True):
"""Show results with correlation plot from selected job"""
job, name = self.getJob(name)
if job == None:
print 'job not in DB'
return
if job.state() != 'Finished':
print 'job not finished'
return
self.matrices = job.data.allMatrices()
#print self.matrices['ModellingResults'].csvRepresentation()
jobmeta = job.metadata()
cols = self.DB.getSimpleFields()
expcol = None
expdata = None
#print jobmeta
if jobmeta.has_key('expcol'):
expcol = jobmeta['expcol']
if expcol not in cols and jobmeta.has_key('project'):
#we may have stored the exp data in another project
prjdata = jobmeta['project']
print 'trying to loading exp data from external project(s)'
from PEATDB.Base import PDatabase
from PEATTables import PEATTableModel
tmpdb = PDatabase(**prjdata)
print tmpdb
S = PEATTableModel(tmpdb)
expdata = S.simpleCopy(include=['Mutations'])
print expdata
#if exp column not known then ask user
if expcol == '' or expcol == None:
mpDlg = MultipleValDialog(title='Select Experimental Data',
initialvalues=[cols],
labels=['exp data column:'],
types=['list'],
parent=self.mainwin)
if mpDlg.result == True:
expcol = mpDlg.results[0]
else:
return
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None or not 'Total' in matrix.columnHeaders():
continue
ax,mh,x,y = self.plotMerged(matrix, expcol, expdata, m,
showtable, ax, name, stats)
#need to add this for mousehandler to work.. hack
'''from Correlation import MouseHandler
mh = MouseHandler(ax, labels=expcol, key='Mutations')
mh.connect()'''
return ax,mh,x,y
def plotMerged(self, matrix, expcol, expdata=None,
title='', showtable=True, ax=None, name=None,
stats=True):
"""Merge a set of exp vals with predictions and plot"""
if expdata==None:
expdata = self.parent.tablemodel.simpleCopy(include=['Mutations'])
merged = self.mergeMatrix(matrix, expdata)
x,y,names,muts = merged.getColumns(['Total',expcol,'name','Mutations'],allowempty=False)
from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
muts = ['mutation: '+i for i in muts]
labels = zip(names, muts)
ax,frame,mh = C.plotCorrelation(x,y,labels,title=title,ylabel=expcol,
ax=ax,plotname=name,stats=stats,err=4)
x=[round(float(i),2) for i in x]
y=[round(float(i),2) for i in y]
if showtable == True:
table = self.showTable(frame, merged)
mh.table = table
return ax,mh,x,y
def test(self):
job, name = self.getJob('myjob')
if job.error() != None or job.state() != 'Finished':
return
stabmatrix = job.data.stabilityResults
L = self.DB.getLabbookSheet('myjob')
L = self.mergeMatrix(stabmatrix, L, fields=['name'])
print L.columnNames
#L1 = self.DB.getLabbookSheet('myjob3')
#L.merge(L1)
return
def displayNoDBWarning(self):
"""Warn user that no DB is present"""
tkMessageBox.showwarning("Cannot launch plugin",
'No Database is currently open. '
'You should first open a project.')
return
def help(self):
import webbrowser
link='http://enzyme.ucd.ie/main/index.php/PEAT_SA'
webbrowser.open(link,autoraise=1)
return
def quit(self, evt=None):
"""We MUST stop the jobManager"""
self.log2Stdout()
self.jobManager.stopLogging()
self.mainwin.destroy()
print 'closing plugin'
return
def main():
import os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="file",
help="Open a local db")
opts, remainder = parser.parse_args()
#test
if opts.file != None and os.path.exists(opts.file):
path=os.path.abspath(opts.file)
from PEATDB.Base import PDatabase
DB = PDatabase(local=path)
P = PEATSAPlugin()
P.main(DB=DB)
P.test()
if __name__ == '__main__':
main()
| validatename | identifier_name |
PEATSAplugin.py | #!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
try:
from Plugins import Plugin
except:
from PEATDB.Plugins import Plugin
import os, types, copy, pickle
from Tkinter import *
import Pmw
import PEATSA.WebApp.Data
import PEATSA.WebApp.UtilityFunctions
import PEATSA.Core as Core
from PEATDB.Dialogs import MultipleValDialog
from PEATDB.Actions import DBActions
from PEATDB.TableModels import TableModel
from PEATDB.Tables import TableCanvas
import tkMessageBox, tkSimpleDialog, tkFileDialog
class PEATSAPlugin(Plugin):
"""Template GUI plugin for PEAT App"""
capabilities = ['gui']
requires = ['PEATSA']
menuentry = 'PEATSA Plugin'
gui_methods = {'fetchJob':'Fetch Job from Server',
'editConfigFile' : 'Configure Server',
'help':'Help',
'quit':'Close Window'}
buttonorder = ['createJobDialog','fetchJob','editConfigFile','help','quit']
about = 'This plugin allows you to call PEATSA'
calctypes = ['stability','binding','pka']
def main(self, parent=None, DB=None):
if parent == None:
if DB != None:
self.DB = DB
self.setupConnection()
else:
return
else:
self.parent = parent
self.DB = parent.DB
if self.DB == None:
self.displayNoDBWarning()
return
self._doFrame()
self.setupConnection()
print 'Updating jobs table..'
self.updateJobs()
return self
def setupConnection(self):
"""Set up connection"""
homepath = os.path.expanduser("~")
self.confpath = os.path.join(homepath, 'peatsa.conf')
if os.path.exists(self.confpath):
configuration = Core.Environment.Configuration(filename=self.confpath)
else:
configuration = Core.Environment.Configuration(searchDefaultLocations=False)
configuration.add_section('DATABASE')
configuration.set('DATABASE', 'database', 'DBSAInterface')
configuration.set('DATABASE', 'host', 'enzyme.ucd.ie')
configuration.set('DATABASE', 'user', 'peatdb')
configuration.set('DATABASE', 'password', '123')
configuration.writeToFile(self.confpath)
if self.parent != None:
tkMessageBox.showwarning("Connection Error",
'No PEATSA server configured, press configure server'
' to set a server, username and password.')
self.connect(configuration)
return
def _doFrame(self):
self.mainwin = self.parent.createChildFrame(width=460,title='PEATSA Plugin')
#self.mainwin = self.parent.create
methods = self._getmethods()
methods = [m for m in methods if m[0] in self.gui_methods.keys()]
l=Label(self.mainwin, text='PEATSA Interface')
l.pack(side=TOP,fill=BOTH)
self.tf=LabelFrame(self.mainwin,text='Project Calculations')
self.tf.pack(side=TOP,fill=BOTH,expand=1)
self.manageJobsButtons(self.mainwin)
self._createButtons(methods)
self.log = self.createLogWin(self.mainwin)
self.log.pack(side=TOP,fill=BOTH,expand=1)
self.stdout2Log()
self.mainwin.bind("<Destroy>", self.quit)
#self.parent.sidepane.bind("<Destroy>", self.test1)
return
def _createButtons(self, methods):
"""Dynamically create buttons for supplied methods, which is a tuple
of (method name, label)"""
mbutton=Menubutton(self.mainwin, text='Options', width=12,
borderwidth=2, relief=RIDGE,
activeforeground='red')
menu=Menu(mbutton,tearoff=0)
mbutton['menu']=menu
mbutton.pack(side=BOTTOM,fill=BOTH)
for m in methods:
menu.add_radiobutton(label=self.gui_methods[m[0]],
indicatoron=0,
command=m[1])
b=Button(self.mainwin,text='Create Calculation',command=self.createJobDialog)
b.pack(side=BOTTOM,fill=BOTH)
return
def updateJobsTable(self):
"""Show table for current jobs list"""
self.checkJobsDict()
jobdict = self.DB.meta.peatsa_jobs
M = TableModel()
#open job log from file
f=open('jobstates.log','r')
jl = pickle.load(f)
for j in jobdict:
jobid = jobdict[j]
try:
M.addRecord(j,state=jl[jobid]['State'],date=jl[jobid]['Date'])
except:
M.addRecord(j,state='Not in DB')
self.jobstable = TableCanvas(self.tf, model=M, height=100, editable=False)
self.jobstable.createTableFrame()
self.log.yview('moveto', 1)
f.close()
return |
def manageJobsButtons(self, parent):
fr1 = Frame(parent)
Button(fr1,text='View Results',command=self.showAllResults,bg='#ccFFFF').pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
Button(fr1,text='Merge Results',command=self.mergeCurrent).pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
fr = Frame(parent)
c='#ADD8E6'
Button(fr,text='Show Details',command=self.viewDetails,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Manage Results',command=self.manageResults,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Remove',command=self.removeJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Resubmit',command=self.resubmitJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
fr.pack(fill=BOTH)
return
def createLogWin(self, parent):
log = Pmw.ScrolledText(parent,
borderframe=1,
labelpos = 'n',
label_text='Log',
usehullsize = 1,
hull_width = 800,
hull_height = 200,
text_wrap='word')
return log
def stdout2Log(self):
"""Redirect stdout to app control"""
sys.stdout = self
sys.stderr = self
return
def log2Stdout(self):
"""return to stdout"""
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
return
def write(self, txt):
"""Handle stdout if required"""
self.log.appendtext(txt)
self.log.update_idletasks()
return
def flush(self):
return
def connect(self, configuration):
"""Create connection"""
self.connection = PEATSA.WebApp.UtilityFunctions.ConnectionFromConfiguration(configuration)
self.jobManager = PEATSA.WebApp.Data.JobManager(self.connection)
self.jobManager.setJobStateLogging('jobstates.log',interval=60)
print '\nConnection to server made sucessfully.\n'
return
def createMutationList(self, filename=None):
self.mutationList = Core.Data.MutationListFile(create=False)
return
def fetchJob(self):
"""Get job from it's db ID and add to list"""
mpDlg = MultipleValDialog(title='Get Job',
initialvalues=('','my job1'),
labels=('ID','Your label',),
types=('string','string'),
parent=self.mainwin)
if mpDlg.result == True:
jobid = mpDlg.results[0]
name = mpDlg.results[1]
else:
return
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
if job != None:
print 'adding job id %s to list' %job.identification
self.storeJob(name, job)
self.updateJobs()
return
def writetempPDB(self,name=None,pdbfile='refprot.pdb'):
if name==None:
name = self.DB.meta.refprotein
pdblines = self.DB[name].Structure
#pdbfile = 'refprot.pdb'
fd=open(pdbfile,'w')
for line in pdblines:
fd.write(line)
fd.close()
return pdbfile
def getrefPDBName(self):
name = self.DB.meta.refprotein
if self.DB[name].has_key('pdbname'):
name = self.DB[name]['pdbname']
return name.split('.')[0]
else:
return ''
def createJobDialog(self):
"""Get details from user using dialog
required: structure, mutations, type of calc and a tag (optional)"""
def validatename(text):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return 1
if text in self.DB.meta.peatsa_jobs:
return -1
else:
return 1
def close():
jobdlg.destroy()
def loadmuts():
filename=tkFileDialog.askopenfilename(initialdir=os.getcwd(),
filetypes=[("All files","*")])
if filename:
mutlist.importfile(filename)
return
def loadmutsfromDB():
for p in self.DB.getRecs():
mut = self.DB.get(p).Mutations
if mut == None or mut=='':
continue
if type(mut) is types.StringType:
mutlist.appendtext(mut+'\n')
else:
mutstring = mut.getMutationString()
if mutstring != None:
mutlist.appendtext(mutstring+'\n')
return
def getstruct():
filename=tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("pdb","*.pdb"),("All files","*.*")])
pdbentry.setvalue(filename)
return
def getligand():
self.ligandfile = tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("mol2","*.mol2"),("All files","*.*")])
def submit():
#if calcmenu.getcurselection() == 'both':
# calcs = ['stability','binding']
if calcmenu.getcurselection() == 'pka':
calcs = ['scan']
else:
calcs = [calcmenu.getcurselection()]
mutationlist = mutlist.getvalue().split('\n')
mutationlist.remove('')
pdbfile=None; pdb = None
quality = mutqualentry.getvalue()
if not hasattr(self.DB.meta, 'refprotein') or self.DB.meta.refprotein == None:
tkMessageBox.showinfo('No ref protein',
'Set a reference (wt) protein first')
return
#if self.useref.get() == 1:
#we use ref pdb by default now
pdbfile = self.writetempPDB()
pdbname = self.getrefPDBName()
if len(mutationlist) == 0 or mutationlist==[u'']:
print 'mutation list is empty'
return
if hasattr(self.DB.meta,'peatsa_jobs') and nameentry.getvalue() in self.DB.meta.peatsa_jobs:
print 'job name already used'
return
name=nameentry.getvalue()
expcol = expcolmenu.getcurselection()
self.submitJob(name=name, pdbname=pdbname,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=mutationlist,
calcs=calcs, mutationquality=quality,
meta={'expcol':expcol,'pdbname':pdbname})
close()
jobdlg = Toplevel()
jobdlg.geometry('+220+220')
jobdlg.title('Create Calculation')
balloon = Pmw.Balloon(jobdlg)
nameentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Name:',
validate = validatename,
value = 'mycalc')
nameentry.pack(fill=BOTH,expand=1)
balloon.bind(nameentry, 'Calculation name can be anything, but should be unique')
expcols = ['']+self.DB.getSimpleFields()
expcolmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Exp. col:',
items = expcols,
initialitem = '',
menubutton_width = 8)
expcolmenu.pack(fill=BOTH,expand=1)
balloon.bind(expcolmenu, 'Field with experimental data to compare, optional')
calcmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Calculation Type:',
items = self.calctypes,
initialitem = 'stability',
menubutton_width = 8)
calcmenu.pack(fill=X,expand=1)
fr=Frame(jobdlg)
fr.pack(fill=X,expand=1)
mutqualentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Quality:',
validate = validatename,
value = '2.0')
mutqualentry.pack(fill=BOTH,expand=1)
Label(jobdlg,text='Using PDB: '+self.getrefPDBName()).pack(fill=BOTH,expand=1)
self.ligandfile=None
mutlist = Pmw.ScrolledText(jobdlg,
labelpos = 'n',
label_text='Mutations:',
usehullsize = 1,
hull_width = 200,
hull_height = 250,
text_wrap='word')
mutlist.pack(fill=BOTH,expand=1)
Button(jobdlg,text='Load Mutations from Project',command=loadmutsfromDB).pack(fill=X,expand=1)
Button(jobdlg,text='Load Mutations from File',command=loadmuts).pack(fill=X,expand=1)
balloon.bind(mutlist, 'Enter one mutation per line in the form\n A:0003:ALA or A3A')
f=Frame(jobdlg); f.pack(fill=X,expand=1)
Button(f,text='Submit',command=submit).pack(side=LEFT,fill=X,expand=1,pady=2)
Button(f,text='Cancel',command=close).pack(fill=X,expand=1,pady=2)
jobdlg.grab_set()
jobdlg.transient(self.parent)
self.parent.wait_window(jobdlg)
return
def submitJob(self, name='mycalc', pdbname=None, pdb=None, pdbfile=None, ligandfile=None,
mutations=[], calcs=['stability'], mutationquality='2.0', meta={}):
"""Submit job to server"""
if 'scan' in calcs and pdbname==None:
print 'You must provide pdb code for pKa calcs'
return
if pdb==None and pdbfile==None:
return
job = self.jobManager.createJob(pdbId=pdbname, calculations=calcs,
dataTable='Data', metadata=meta,
optionArgs={'--mutationQuality':mutationquality})
if pdb != None:
job.setStructure(pdb)
else:
job.setStructureFromFile(pdbfile)
if 'binding' in calcs:
job.setLigandFromFile(ligandfile)
self.mutationList = Core.Data.MutationListFile(filename='tempmutlist', create=True)
sets=[]
for code in mutations:
if code == '': continue
try:
sets.append(Core.Data.MutationSet(code))
except:
print 'mutation code %s incorrect' %code
for s in sets:
self.mutationList.addMutant(s, autoUpdate=False, ignoreDuplicates=True)
self.mutationList.removeDuplicates(autoUpdate=False)
job.setMutationListFile(self.mutationList)
job.setState('Ready')
self.jobManager.logJobStates('jobstates.log')
#add job to peat database
self.storeJob(name, job)
if self.parent != None:
username = self.parent.username
self.updateJobs()
else:
username = None
self.DB.commit(note='peatsa job',user=username)
print 'job submitted successfully'
return
def resubmitJob(self):
"""Resend a job based on new mutations in DB that are not in job already"""
job, name = self.getJob()
if job == None:
return
DB=self.DB
self.matrices = job.data.allMatrices()
for m in matrices:
matrix=matrices[m]
if matrix==None: return
muts = matrix.mutationCodes()
dbmuts = [DB.get(p).Mutations for p in DB.getRecs()]
newmuts = list(set(dbmuts) - set(muts))
print 'the following mutations in the project are not in the job: %s' %newmuts
'''self.submitJob(name=name,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=newmuts,
calcs=calcs, meta={'expcol':expcol}) '''
self.log.yview('moveto', 1)
return
def getJob(self, name=None):
"""Get job from name"""
if name == None:
name = self.jobstable.get_selectedRecordNames()[0]
if name == None:
return None, name
jobid = self.DB.meta.peatsa_jobs[name]
try:
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
except:
#print 'job not in database'
return None,name
return job, name
def removeJob(self):
"""Remove a job from the db"""
job, name = self.getJob()
answer = tkMessageBox.askyesno("Warning",'Remove this job?')
if answer == False:
return
try:
self.jobManager.deleteJob(job)
except:
print 'job not in database, removing from peat'
del self.DB.meta.peatsa_jobs[name]
self.DB.meta.__p__changed = 1
self.updateJobs()
return
def viewDetails(self, name=None):
job, name = self.getJob()
if job==None:
return
jobmeta = job.metadata()
print
print job.data
print 'details for job %s' %name
print 'job status:',job.state()
print 'submitted on ',job.date
if jobmeta.has_key('pdbname'):
print 'original pdb file:', jobmeta['pdbname']
print 'mutations:', len(job.mutationListFile().mutantList())
print '(this job has id %s)' %job.identification
if job.error() != None:
print 'The job had an error..'
print job.error()['ErrorDescription']
print job.error()['DetailedDescription']
print
self.log.yview('moveto', 1)
return
def addColoredText(self, st, tag, word, fg='black', bg='white'):
"""add a space to the end of the word"""
word = word + " "
st.insert('end', word)
end_index = st.index('end')
begin_index = "%s-%sc" % (end_index, len(word) + 1)
st.tag_add(tag, begin_index, end_index)
st.tag_config(tag, foreground=fg, background=bg)
return
def checkJobsDict(self):
"""Check jobs data structure exists"""
if not hasattr(self.DB.meta,'peatsa_jobs'):
from ZODB.PersistentMapping import PersistentMapping
self.DB.meta.peatsa_jobs = PersistentMapping()
def storeJob(self, name, job):
"""Store job to DB"""
self.checkJobsDict()
self.DB.meta.peatsa_jobs[name] = job.identification
return
def updateJobs(self):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return
self.updateJobsTable()
self.wait=self.mainwin.after(60000, self.updateJobs)
return
def mergeResults(self, job, colname, tablemodel):
"""Merge given job results to tablemodel"""
if job==None:
return
matrices = job.data.allMatrices()
if not colname:
return
nf={'Total':colname}
for m in matrices:
matrix = matrices[m]
if matrix == None: continue
M = self.mergeMatrix(matrix, tablemodel, fields=['Total'], newfields=nf)
return
def mergeCurrent(self):
"""Auto merge selected job results to main table
called from GUI """
job, name = self.getJob()
if job==None:
return
#get field name to use
colname = tkSimpleDialog.askstring("Column name?",
"Name for column:",
initialvalue=name+'_Predictions',
parent=self.mainwin)
M = self.parent.tablemodel
self.mergeResults(job, colname, M)
self.parent.updateTable()
#also send some meta data to peatsa_meta?
'''from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
cc,rmse = C.getStats(pre,exp)
data.append({'name':p,'rmse':rmse,'cc':cc}) '''
return
def manageResults(self, name=None):
"""Get the results back - we can send the matrix to the main peat
table or put results into a labbook sheet.
Also allow user to merge with an existing table"""
job, name = self.getJob(name)
if job.error() != None:
print 'job had an error, use view details'
elif job.state() == 'Finished':
self.showPEATSAResultsDialog(job, name)
else:
print 'Job is not finished yet.'
return
def editConfigFile(self):
"""Edit config file"""
from PEATDB.textFrame import textFrame
tf = textFrame(parent=self.mainwin,
title='PEATSA Conf file')
tf.load_from_file(self.confpath)
self.parent.wait_window(tf.frame)
#reconnect
configuration = Core.Environment.Configuration(filename=self.confpath)
self.connect(configuration)
return
def showPEATSAResultsDialog(self, job, name):
resdlg = Toplevel()
resdlg.geometry('600x450+300+200')
resdlg.title('PEATSA results '+name)
balloon = Pmw.Balloon(resdlg)
self.currname = name
body = Frame(resdlg)
resdlg.initial_focus = body
body.pack(fill=BOTH,expand=1,padx=5, pady=5)
self.matrices = job.data.allMatrices()
fr=Frame(body)
fr.grid(row=0,column=0,sticky='news',rowspan=2)
for m in self.matrices:
if self.matrices[m] != None:
self.showMatrix(fr,self.matrices[m], m)
self.labboklist = self.parent.labbookSheetsSelector(body)
self.labboklist.grid(row=0,column=1,sticky='news')
bf=Frame(body)
bf.grid(row=1,column=1,sticky='ew')
b=Button(bf,text='Merge into main table', command=lambda: self.mergeTable(main=True))
b.pack(fill=X,expand=1)
balloon.bind(b,'Merge results into main DB table')
b=Button(bf,text='Merge into Selected', command=self.mergeTable)
b.pack(fill=X,expand=1)
balloon.bind(b,'Merge results into an existing labbook table by matching the mutations')
b=Button(bf,text='Create new table', command=self.send2Labbook)
b.pack(fill=X,expand=1)
balloon.bind(b,'Send results to a new sheet in the main labbook')
Button(bf,text='Save as CSV', command=self.saveCSV).pack(fill=X,expand=1)
body.columnconfigure(0,weight=1)
body.rowconfigure(0,weight=1)
return
def showMatrix(self, frame, matrix, label=''):
"""Show matrix in table"""
M = self.matrix2Table(matrix)
mtable = self.showTable(frame, M, label)
return mtable
def showTable(self, frame, model, label=''):
"""Show model in table"""
tf=LabelFrame(frame,text=label)
tf.pack(fill=BOTH,expand=1)
mtable = TableCanvas(tf, model=model, cellwidth=70,
editable=False)
mtable.createTableFrame()
return mtable
def mergeTable(self, main=False):
"""Send a matrix to the peat main table or labbook sheet
by merging matching mutations.
Requires that one field in the table stores compatible
mutant format supported by PEATSA"""
if main == False:
try:
name = self.labboklist.getcurselection()[0]
except:
print 'no name selected'
return
if main == True:
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None: continue
M = self.parent.tablemodel
M = self.mergeMatrix(matrix, M)
self.parent.updateTable()
else:
M = self.DB.getLabbookSheet(name)
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None: continue
M = self.mergeMatrix(matrix, M)
if M != None:
self.DB.createLabbookSheet(name, M)
self.parent.startLabbook('ALL')
return
def send2Labbook(self):
"""Send matrix to selected labbook"""
#get name
cols = ['']+self.DB.getSimpleFields()
DB=self.DB
mpDlg = MultipleValDialog(title='Send to Labbook',
initialvalues=(self.currname, cols),
labels=('table name','exp data column'),
types=('string','list'),
parent=self.mainwin)
if mpDlg.result == False:
return
name = mpDlg.results[0]
expcol = mpDlg.results[1]
M = DBActions.sendDB2Labbook(DB,recs=None,cols=['Mutations',expcol],name=name)
for m in self.matrices:
matrix = self.matrices[m]
if matrix != None:
M = self.mergeMatrix(matrix, M)
self.DB.createLabbookSheet(name, M)
self.parent.startLabbook('ALL')
return
def saveCSV(self):
"""Save matrix to csv"""
filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',
initialdir=os.getcwd(),
filetypes=[("csv","*.csv"),("All files","*.*")])
if not filename:
return
for m in self.matrices:
matrix = self.matrices[m]
if matrix != None:
c=matrix.csvRepresentation()
f=open(filename,'w')
f.write(c)
f.close()
return
def matrix2Table(self, matrix):
"""Creates a table model from a peatsa matrix"""
M = TableModel()
M.addColumn('Mutations')
fields = matrix.columnHeaders()
for f in fields:
M.addColumn(f)
i = matrix.indexOfColumnWithHeader('Mutations')
for row in matrix:
mutationSet = Core.Data.MutationSet(row[i])
code = '+'.join(mutationSet.mutationCodes(reduced=True))
M.addRow(code)
for f in fields:
j = matrix.indexOfColumnWithHeader(f)
if f == 'Mutations':
M.data[code]['Mutations'] = code
else:
M.data[code][f] = str(row[j])
return M
def mergeMatrix(self, matrix, model, fields=None, newfields=None):
"""Merge a peatsa matrix with a table, returns merged tablemodel
tablemodel: input tablemodel
fields: which fields from matrix should be included in merge, default all
newfields: a dict that can map matrix names to new col names
"""
M = self.matrix2Table(matrix)
if fields==None:
fields = M.columnNames
key = 'Mutations'
if not key in model.columnNames:
print 'this table has no mutations column, we cannot merge'
return
i = matrix.indexOfColumnWithHeader(key)
for row in model.reclist:
try:
mset1 = Core.Data.MutationSet(model.data[row][key])
except:
continue
for rec in M.reclist:
try:
mset2 = Core.Data.MutationSet(M.data[rec][key])
except:
continue
if mset1 == mset2:
#add this data to table
for f in fields:
if newfields!=None and newfields.has_key(f):
col = newfields[f]
else:
col = f
if not M.data[rec].has_key(f): continue
model.addColumn(col)
try:
model.data[row][col] = float(M.data[rec][f])
except:
model.data[row][col] = M.data[rec][f]
return model
def showAllResults(self):
"""Show results for single or multiple jobs together"""
names = self.jobstable.get_selectedRecordNames()
if len(names)==1:
ax,mh,x,y=self.showResults()
else:
tx=[]; ty=[]
import pylab as plt
f=plt.figure(figsize=(8,8))
ax=f.add_subplot(111)
for n in names:
a,mh,x,y = self.showResults(n,showtable=False, ax=ax,stats=False)
tx.extend(x)
ty.extend(y)
ax.legend()
#add stats for summary
from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
C.addStats(ax,tx,ty)
f.show()
return
def showResults(self, name=None, showtable=True, ax=None, stats=True):
"""Show results with correlation plot from selected job"""
job, name = self.getJob(name)
if job == None:
print 'job not in DB'
return
if job.state() != 'Finished':
print 'job not finished'
return
self.matrices = job.data.allMatrices()
#print self.matrices['ModellingResults'].csvRepresentation()
jobmeta = job.metadata()
cols = self.DB.getSimpleFields()
expcol = None
expdata = None
#print jobmeta
if jobmeta.has_key('expcol'):
expcol = jobmeta['expcol']
if expcol not in cols and jobmeta.has_key('project'):
#we may have stored the exp data in another project
prjdata = jobmeta['project']
print 'trying to loading exp data from external project(s)'
from PEATDB.Base import PDatabase
from PEATTables import PEATTableModel
tmpdb = PDatabase(**prjdata)
print tmpdb
S = PEATTableModel(tmpdb)
expdata = S.simpleCopy(include=['Mutations'])
print expdata
#if exp column not known then ask user
if expcol == '' or expcol == None:
mpDlg = MultipleValDialog(title='Select Experimental Data',
initialvalues=[cols],
labels=['exp data column:'],
types=['list'],
parent=self.mainwin)
if mpDlg.result == True:
expcol = mpDlg.results[0]
else:
return
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None or not 'Total' in matrix.columnHeaders():
continue
ax,mh,x,y = self.plotMerged(matrix, expcol, expdata, m,
showtable, ax, name, stats)
#need to add this for mousehandler to work.. hack
'''from Correlation import MouseHandler
mh = MouseHandler(ax, labels=expcol, key='Mutations')
mh.connect()'''
return ax,mh,x,y
def plotMerged(self, matrix, expcol, expdata=None,
title='', showtable=True, ax=None, name=None,
stats=True):
"""Merge a set of exp vals with predictions and plot"""
if expdata==None:
expdata = self.parent.tablemodel.simpleCopy(include=['Mutations'])
merged = self.mergeMatrix(matrix, expdata)
x,y,names,muts = merged.getColumns(['Total',expcol,'name','Mutations'],allowempty=False)
from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
muts = ['mutation: '+i for i in muts]
labels = zip(names, muts)
ax,frame,mh = C.plotCorrelation(x,y,labels,title=title,ylabel=expcol,
ax=ax,plotname=name,stats=stats,err=4)
x=[round(float(i),2) for i in x]
y=[round(float(i),2) for i in y]
if showtable == True:
table = self.showTable(frame, merged)
mh.table = table
return ax,mh,x,y
def test(self):
job, name = self.getJob('myjob')
if job.error() != None or job.state() != 'Finished':
return
stabmatrix = job.data.stabilityResults
L = self.DB.getLabbookSheet('myjob')
L = self.mergeMatrix(stabmatrix, L, fields=['name'])
print L.columnNames
#L1 = self.DB.getLabbookSheet('myjob3')
#L.merge(L1)
return
def displayNoDBWarning(self):
"""Warn user that no DB is present"""
tkMessageBox.showwarning("Cannot launch plugin",
'No Database is currently open. '
'You should first open a project.')
return
def help(self):
import webbrowser
link='http://enzyme.ucd.ie/main/index.php/PEAT_SA'
webbrowser.open(link,autoraise=1)
return
def quit(self, evt=None):
"""We MUST stop the jobManager"""
self.log2Stdout()
self.jobManager.stopLogging()
self.mainwin.destroy()
print 'closing plugin'
return
def main():
import os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="file",
help="Open a local db")
opts, remainder = parser.parse_args()
#test
if opts.file != None and os.path.exists(opts.file):
path=os.path.abspath(opts.file)
from PEATDB.Base import PDatabase
DB = PDatabase(local=path)
P = PEATSAPlugin()
P.main(DB=DB)
P.test()
if __name__ == '__main__':
main() | random_line_split | |
PEATSAplugin.py | #!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
try:
from Plugins import Plugin
except:
from PEATDB.Plugins import Plugin
import os, types, copy, pickle
from Tkinter import *
import Pmw
import PEATSA.WebApp.Data
import PEATSA.WebApp.UtilityFunctions
import PEATSA.Core as Core
from PEATDB.Dialogs import MultipleValDialog
from PEATDB.Actions import DBActions
from PEATDB.TableModels import TableModel
from PEATDB.Tables import TableCanvas
import tkMessageBox, tkSimpleDialog, tkFileDialog
class PEATSAPlugin(Plugin):
"""Template GUI plugin for PEAT App"""
capabilities = ['gui']
requires = ['PEATSA']
menuentry = 'PEATSA Plugin'
gui_methods = {'fetchJob':'Fetch Job from Server',
'editConfigFile' : 'Configure Server',
'help':'Help',
'quit':'Close Window'}
buttonorder = ['createJobDialog','fetchJob','editConfigFile','help','quit']
about = 'This plugin allows you to call PEATSA'
calctypes = ['stability','binding','pka']
def main(self, parent=None, DB=None):
if parent == None:
if DB != None:
self.DB = DB
self.setupConnection()
else:
return
else:
self.parent = parent
self.DB = parent.DB
if self.DB == None:
self.displayNoDBWarning()
return
self._doFrame()
self.setupConnection()
print 'Updating jobs table..'
self.updateJobs()
return self
def setupConnection(self):
"""Set up connection"""
homepath = os.path.expanduser("~")
self.confpath = os.path.join(homepath, 'peatsa.conf')
if os.path.exists(self.confpath):
configuration = Core.Environment.Configuration(filename=self.confpath)
else:
configuration = Core.Environment.Configuration(searchDefaultLocations=False)
configuration.add_section('DATABASE')
configuration.set('DATABASE', 'database', 'DBSAInterface')
configuration.set('DATABASE', 'host', 'enzyme.ucd.ie')
configuration.set('DATABASE', 'user', 'peatdb')
configuration.set('DATABASE', 'password', '123')
configuration.writeToFile(self.confpath)
if self.parent != None:
tkMessageBox.showwarning("Connection Error",
'No PEATSA server configured, press configure server'
' to set a server, username and password.')
self.connect(configuration)
return
def _doFrame(self):
self.mainwin = self.parent.createChildFrame(width=460,title='PEATSA Plugin')
#self.mainwin = self.parent.create
methods = self._getmethods()
methods = [m for m in methods if m[0] in self.gui_methods.keys()]
l=Label(self.mainwin, text='PEATSA Interface')
l.pack(side=TOP,fill=BOTH)
self.tf=LabelFrame(self.mainwin,text='Project Calculations')
self.tf.pack(side=TOP,fill=BOTH,expand=1)
self.manageJobsButtons(self.mainwin)
self._createButtons(methods)
self.log = self.createLogWin(self.mainwin)
self.log.pack(side=TOP,fill=BOTH,expand=1)
self.stdout2Log()
self.mainwin.bind("<Destroy>", self.quit)
#self.parent.sidepane.bind("<Destroy>", self.test1)
return
def _createButtons(self, methods):
"""Dynamically create buttons for supplied methods, which is a tuple
of (method name, label)"""
mbutton=Menubutton(self.mainwin, text='Options', width=12,
borderwidth=2, relief=RIDGE,
activeforeground='red')
menu=Menu(mbutton,tearoff=0)
mbutton['menu']=menu
mbutton.pack(side=BOTTOM,fill=BOTH)
for m in methods:
menu.add_radiobutton(label=self.gui_methods[m[0]],
indicatoron=0,
command=m[1])
b=Button(self.mainwin,text='Create Calculation',command=self.createJobDialog)
b.pack(side=BOTTOM,fill=BOTH)
return
def updateJobsTable(self):
"""Show table for current jobs list"""
self.checkJobsDict()
jobdict = self.DB.meta.peatsa_jobs
M = TableModel()
#open job log from file
f=open('jobstates.log','r')
jl = pickle.load(f)
for j in jobdict:
jobid = jobdict[j]
try:
M.addRecord(j,state=jl[jobid]['State'],date=jl[jobid]['Date'])
except:
M.addRecord(j,state='Not in DB')
self.jobstable = TableCanvas(self.tf, model=M, height=100, editable=False)
self.jobstable.createTableFrame()
self.log.yview('moveto', 1)
f.close()
return
def manageJobsButtons(self, parent):
fr1 = Frame(parent)
Button(fr1,text='View Results',command=self.showAllResults,bg='#ccFFFF').pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
Button(fr1,text='Merge Results',command=self.mergeCurrent).pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
fr = Frame(parent)
c='#ADD8E6'
Button(fr,text='Show Details',command=self.viewDetails,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Manage Results',command=self.manageResults,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Remove',command=self.removeJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Resubmit',command=self.resubmitJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
fr.pack(fill=BOTH)
return
def createLogWin(self, parent):
log = Pmw.ScrolledText(parent,
borderframe=1,
labelpos = 'n',
label_text='Log',
usehullsize = 1,
hull_width = 800,
hull_height = 200,
text_wrap='word')
return log
def stdout2Log(self):
"""Redirect stdout to app control"""
sys.stdout = self
sys.stderr = self
return
def log2Stdout(self):
"""return to stdout"""
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
return
def write(self, txt):
"""Handle stdout if required"""
self.log.appendtext(txt)
self.log.update_idletasks()
return
def flush(self):
return
def connect(self, configuration):
"""Create connection"""
self.connection = PEATSA.WebApp.UtilityFunctions.ConnectionFromConfiguration(configuration)
self.jobManager = PEATSA.WebApp.Data.JobManager(self.connection)
self.jobManager.setJobStateLogging('jobstates.log',interval=60)
print '\nConnection to server made sucessfully.\n'
return
def createMutationList(self, filename=None):
self.mutationList = Core.Data.MutationListFile(create=False)
return
def fetchJob(self):
"""Get job from it's db ID and add to list"""
mpDlg = MultipleValDialog(title='Get Job',
initialvalues=('','my job1'),
labels=('ID','Your label',),
types=('string','string'),
parent=self.mainwin)
if mpDlg.result == True:
jobid = mpDlg.results[0]
name = mpDlg.results[1]
else:
return
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
if job != None:
print 'adding job id %s to list' %job.identification
self.storeJob(name, job)
self.updateJobs()
return
def writetempPDB(self,name=None,pdbfile='refprot.pdb'):
if name==None:
name = self.DB.meta.refprotein
pdblines = self.DB[name].Structure
#pdbfile = 'refprot.pdb'
fd=open(pdbfile,'w')
for line in pdblines:
fd.write(line)
fd.close()
return pdbfile
def getrefPDBName(self):
name = self.DB.meta.refprotein
if self.DB[name].has_key('pdbname'):
name = self.DB[name]['pdbname']
return name.split('.')[0]
else:
return ''
def createJobDialog(self):
"""Get details from user using dialog
required: structure, mutations, type of calc and a tag (optional)"""
def validatename(text):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return 1
if text in self.DB.meta.peatsa_jobs:
return -1
else:
return 1
def close():
jobdlg.destroy()
def loadmuts():
filename=tkFileDialog.askopenfilename(initialdir=os.getcwd(),
filetypes=[("All files","*")])
if filename:
mutlist.importfile(filename)
return
def loadmutsfromDB():
for p in self.DB.getRecs():
mut = self.DB.get(p).Mutations
if mut == None or mut=='':
continue
if type(mut) is types.StringType:
mutlist.appendtext(mut+'\n')
else:
mutstring = mut.getMutationString()
if mutstring != None:
mutlist.appendtext(mutstring+'\n')
return
def getstruct():
filename=tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("pdb","*.pdb"),("All files","*.*")])
pdbentry.setvalue(filename)
return
def getligand():
self.ligandfile = tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("mol2","*.mol2"),("All files","*.*")])
def submit():
#if calcmenu.getcurselection() == 'both':
# calcs = ['stability','binding']
if calcmenu.getcurselection() == 'pka':
calcs = ['scan']
else:
calcs = [calcmenu.getcurselection()]
mutationlist = mutlist.getvalue().split('\n')
mutationlist.remove('')
pdbfile=None; pdb = None
quality = mutqualentry.getvalue()
if not hasattr(self.DB.meta, 'refprotein') or self.DB.meta.refprotein == None:
tkMessageBox.showinfo('No ref protein',
'Set a reference (wt) protein first')
return
#if self.useref.get() == 1:
#we use ref pdb by default now
pdbfile = self.writetempPDB()
pdbname = self.getrefPDBName()
if len(mutationlist) == 0 or mutationlist==[u'']:
print 'mutation list is empty'
return
if hasattr(self.DB.meta,'peatsa_jobs') and nameentry.getvalue() in self.DB.meta.peatsa_jobs:
print 'job name already used'
return
name=nameentry.getvalue()
expcol = expcolmenu.getcurselection()
self.submitJob(name=name, pdbname=pdbname,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=mutationlist,
calcs=calcs, mutationquality=quality,
meta={'expcol':expcol,'pdbname':pdbname})
close()
jobdlg = Toplevel()
jobdlg.geometry('+220+220')
jobdlg.title('Create Calculation')
balloon = Pmw.Balloon(jobdlg)
nameentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Name:',
validate = validatename,
value = 'mycalc')
nameentry.pack(fill=BOTH,expand=1)
balloon.bind(nameentry, 'Calculation name can be anything, but should be unique')
expcols = ['']+self.DB.getSimpleFields()
expcolmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Exp. col:',
items = expcols,
initialitem = '',
menubutton_width = 8)
expcolmenu.pack(fill=BOTH,expand=1)
balloon.bind(expcolmenu, 'Field with experimental data to compare, optional')
calcmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Calculation Type:',
items = self.calctypes,
initialitem = 'stability',
menubutton_width = 8)
calcmenu.pack(fill=X,expand=1)
fr=Frame(jobdlg)
fr.pack(fill=X,expand=1)
mutqualentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Quality:',
validate = validatename,
value = '2.0')
mutqualentry.pack(fill=BOTH,expand=1)
Label(jobdlg,text='Using PDB: '+self.getrefPDBName()).pack(fill=BOTH,expand=1)
self.ligandfile=None
mutlist = Pmw.ScrolledText(jobdlg,
labelpos = 'n',
label_text='Mutations:',
usehullsize = 1,
hull_width = 200,
hull_height = 250,
text_wrap='word')
mutlist.pack(fill=BOTH,expand=1)
Button(jobdlg,text='Load Mutations from Project',command=loadmutsfromDB).pack(fill=X,expand=1)
Button(jobdlg,text='Load Mutations from File',command=loadmuts).pack(fill=X,expand=1)
balloon.bind(mutlist, 'Enter one mutation per line in the form\n A:0003:ALA or A3A')
f=Frame(jobdlg); f.pack(fill=X,expand=1)
Button(f,text='Submit',command=submit).pack(side=LEFT,fill=X,expand=1,pady=2)
Button(f,text='Cancel',command=close).pack(fill=X,expand=1,pady=2)
jobdlg.grab_set()
jobdlg.transient(self.parent)
self.parent.wait_window(jobdlg)
return
def submitJob(self, name='mycalc', pdbname=None, pdb=None, pdbfile=None, ligandfile=None,
mutations=[], calcs=['stability'], mutationquality='2.0', meta={}):
"""Submit job to server"""
if 'scan' in calcs and pdbname==None:
print 'You must provide pdb code for pKa calcs'
return
if pdb==None and pdbfile==None:
return
job = self.jobManager.createJob(pdbId=pdbname, calculations=calcs,
dataTable='Data', metadata=meta,
optionArgs={'--mutationQuality':mutationquality})
if pdb != None:
job.setStructure(pdb)
else:
job.setStructureFromFile(pdbfile)
if 'binding' in calcs:
job.setLigandFromFile(ligandfile)
self.mutationList = Core.Data.MutationListFile(filename='tempmutlist', create=True)
sets=[]
for code in mutations:
if code == '': continue
try:
sets.append(Core.Data.MutationSet(code))
except:
print 'mutation code %s incorrect' %code
for s in sets:
self.mutationList.addMutant(s, autoUpdate=False, ignoreDuplicates=True)
self.mutationList.removeDuplicates(autoUpdate=False)
job.setMutationListFile(self.mutationList)
job.setState('Ready')
self.jobManager.logJobStates('jobstates.log')
#add job to peat database
self.storeJob(name, job)
if self.parent != None:
username = self.parent.username
self.updateJobs()
else:
username = None
self.DB.commit(note='peatsa job',user=username)
print 'job submitted successfully'
return
def resubmitJob(self):
"""Resend a job based on new mutations in DB that are not in job already"""
job, name = self.getJob()
if job == None:
return
DB=self.DB
self.matrices = job.data.allMatrices()
for m in matrices:
matrix=matrices[m]
if matrix==None: return
muts = matrix.mutationCodes()
dbmuts = [DB.get(p).Mutations for p in DB.getRecs()]
newmuts = list(set(dbmuts) - set(muts))
print 'the following mutations in the project are not in the job: %s' %newmuts
'''self.submitJob(name=name,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=newmuts,
calcs=calcs, meta={'expcol':expcol}) '''
self.log.yview('moveto', 1)
return
def getJob(self, name=None):
"""Get job from name"""
if name == None:
name = self.jobstable.get_selectedRecordNames()[0]
if name == None:
return None, name
jobid = self.DB.meta.peatsa_jobs[name]
try:
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
except:
#print 'job not in database'
return None,name
return job, name
def removeJob(self):
"""Remove a job from the db"""
job, name = self.getJob()
answer = tkMessageBox.askyesno("Warning",'Remove this job?')
if answer == False:
return
try:
self.jobManager.deleteJob(job)
except:
print 'job not in database, removing from peat'
del self.DB.meta.peatsa_jobs[name]
self.DB.meta.__p__changed = 1
self.updateJobs()
return
def viewDetails(self, name=None):
job, name = self.getJob()
if job==None:
return
jobmeta = job.metadata()
print
print job.data
print 'details for job %s' %name
print 'job status:',job.state()
print 'submitted on ',job.date
if jobmeta.has_key('pdbname'):
print 'original pdb file:', jobmeta['pdbname']
print 'mutations:', len(job.mutationListFile().mutantList())
print '(this job has id %s)' %job.identification
if job.error() != None:
print 'The job had an error..'
print job.error()['ErrorDescription']
print job.error()['DetailedDescription']
print
self.log.yview('moveto', 1)
return
def addColoredText(self, st, tag, word, fg='black', bg='white'):
|
def checkJobsDict(self):
"""Check jobs data structure exists"""
if not hasattr(self.DB.meta,'peatsa_jobs'):
from ZODB.PersistentMapping import PersistentMapping
self.DB.meta.peatsa_jobs = PersistentMapping()
def storeJob(self, name, job):
"""Store job to DB"""
self.checkJobsDict()
self.DB.meta.peatsa_jobs[name] = job.identification
return
def updateJobs(self):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return
self.updateJobsTable()
self.wait=self.mainwin.after(60000, self.updateJobs)
return
def mergeResults(self, job, colname, tablemodel):
"""Merge given job results to tablemodel"""
if job==None:
return
matrices = job.data.allMatrices()
if not colname:
return
nf={'Total':colname}
for m in matrices:
matrix = matrices[m]
if matrix == None: continue
M = self.mergeMatrix(matrix, tablemodel, fields=['Total'], newfields=nf)
return
def mergeCurrent(self):
"""Auto merge selected job results to main table
called from GUI """
job, name = self.getJob()
if job==None:
return
#get field name to use
colname = tkSimpleDialog.askstring("Column name?",
"Name for column:",
initialvalue=name+'_Predictions',
parent=self.mainwin)
M = self.parent.tablemodel
self.mergeResults(job, colname, M)
self.parent.updateTable()
#also send some meta data to peatsa_meta?
'''from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
cc,rmse = C.getStats(pre,exp)
data.append({'name':p,'rmse':rmse,'cc':cc}) '''
return
def manageResults(self, name=None):
"""Get the results back - we can send the matrix to the main peat
table or put results into a labbook sheet.
Also allow user to merge with an existing table"""
job, name = self.getJob(name)
if job.error() != None:
print 'job had an error, use view details'
elif job.state() == 'Finished':
self.showPEATSAResultsDialog(job, name)
else:
print 'Job is not finished yet.'
return
def editConfigFile(self):
"""Edit config file"""
from PEATDB.textFrame import textFrame
tf = textFrame(parent=self.mainwin,
title='PEATSA Conf file')
tf.load_from_file(self.confpath)
self.parent.wait_window(tf.frame)
#reconnect
configuration = Core.Environment.Configuration(filename=self.confpath)
self.connect(configuration)
return
def showPEATSAResultsDialog(self, job, name):
resdlg = Toplevel()
resdlg.geometry('600x450+300+200')
resdlg.title('PEATSA results '+name)
balloon = Pmw.Balloon(resdlg)
self.currname = name
body = Frame(resdlg)
resdlg.initial_focus = body
body.pack(fill=BOTH,expand=1,padx=5, pady=5)
self.matrices = job.data.allMatrices()
fr=Frame(body)
fr.grid(row=0,column=0,sticky='news',rowspan=2)
for m in self.matrices:
if self.matrices[m] != None:
self.showMatrix(fr,self.matrices[m], m)
self.labboklist = self.parent.labbookSheetsSelector(body)
self.labboklist.grid(row=0,column=1,sticky='news')
bf=Frame(body)
bf.grid(row=1,column=1,sticky='ew')
b=Button(bf,text='Merge into main table', command=lambda: self.mergeTable(main=True))
b.pack(fill=X,expand=1)
balloon.bind(b,'Merge results into main DB table')
b=Button(bf,text='Merge into Selected', command=self.mergeTable)
b.pack(fill=X,expand=1)
balloon.bind(b,'Merge results into an existing labbook table by matching the mutations')
b=Button(bf,text='Create new table', command=self.send2Labbook)
b.pack(fill=X,expand=1)
balloon.bind(b,'Send results to a new sheet in the main labbook')
Button(bf,text='Save as CSV', command=self.saveCSV).pack(fill=X,expand=1)
body.columnconfigure(0,weight=1)
body.rowconfigure(0,weight=1)
return
def showMatrix(self, frame, matrix, label=''):
"""Show matrix in table"""
M = self.matrix2Table(matrix)
mtable = self.showTable(frame, M, label)
return mtable
def showTable(self, frame, model, label=''):
"""Show model in table"""
tf=LabelFrame(frame,text=label)
tf.pack(fill=BOTH,expand=1)
mtable = TableCanvas(tf, model=model, cellwidth=70,
editable=False)
mtable.createTableFrame()
return mtable
def mergeTable(self, main=False):
"""Send a matrix to the peat main table or labbook sheet
by merging matching mutations.
Requires that one field in the table stores compatible
mutant format supported by PEATSA"""
if main == False:
try:
name = self.labboklist.getcurselection()[0]
except:
print 'no name selected'
return
if main == True:
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None: continue
M = self.parent.tablemodel
M = self.mergeMatrix(matrix, M)
self.parent.updateTable()
else:
M = self.DB.getLabbookSheet(name)
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None: continue
M = self.mergeMatrix(matrix, M)
if M != None:
self.DB.createLabbookSheet(name, M)
self.parent.startLabbook('ALL')
return
def send2Labbook(self):
"""Send matrix to selected labbook"""
#get name
cols = ['']+self.DB.getSimpleFields()
DB=self.DB
mpDlg = MultipleValDialog(title='Send to Labbook',
initialvalues=(self.currname, cols),
labels=('table name','exp data column'),
types=('string','list'),
parent=self.mainwin)
if mpDlg.result == False:
return
name = mpDlg.results[0]
expcol = mpDlg.results[1]
M = DBActions.sendDB2Labbook(DB,recs=None,cols=['Mutations',expcol],name=name)
for m in self.matrices:
matrix = self.matrices[m]
if matrix != None:
M = self.mergeMatrix(matrix, M)
self.DB.createLabbookSheet(name, M)
self.parent.startLabbook('ALL')
return
def saveCSV(self):
"""Save matrix to csv"""
filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',
initialdir=os.getcwd(),
filetypes=[("csv","*.csv"),("All files","*.*")])
if not filename:
return
for m in self.matrices:
matrix = self.matrices[m]
if matrix != None:
c=matrix.csvRepresentation()
f=open(filename,'w')
f.write(c)
f.close()
return
def matrix2Table(self, matrix):
"""Creates a table model from a peatsa matrix"""
M = TableModel()
M.addColumn('Mutations')
fields = matrix.columnHeaders()
for f in fields:
M.addColumn(f)
i = matrix.indexOfColumnWithHeader('Mutations')
for row in matrix:
mutationSet = Core.Data.MutationSet(row[i])
code = '+'.join(mutationSet.mutationCodes(reduced=True))
M.addRow(code)
for f in fields:
j = matrix.indexOfColumnWithHeader(f)
if f == 'Mutations':
M.data[code]['Mutations'] = code
else:
M.data[code][f] = str(row[j])
return M
def mergeMatrix(self, matrix, model, fields=None, newfields=None):
"""Merge a peatsa matrix with a table, returns merged tablemodel
tablemodel: input tablemodel
fields: which fields from matrix should be included in merge, default all
newfields: a dict that can map matrix names to new col names
"""
M = self.matrix2Table(matrix)
if fields==None:
fields = M.columnNames
key = 'Mutations'
if not key in model.columnNames:
print 'this table has no mutations column, we cannot merge'
return
i = matrix.indexOfColumnWithHeader(key)
for row in model.reclist:
try:
mset1 = Core.Data.MutationSet(model.data[row][key])
except:
continue
for rec in M.reclist:
try:
mset2 = Core.Data.MutationSet(M.data[rec][key])
except:
continue
if mset1 == mset2:
#add this data to table
for f in fields:
if newfields!=None and newfields.has_key(f):
col = newfields[f]
else:
col = f
if not M.data[rec].has_key(f): continue
model.addColumn(col)
try:
model.data[row][col] = float(M.data[rec][f])
except:
model.data[row][col] = M.data[rec][f]
return model
def showAllResults(self):
"""Show results for single or multiple jobs together"""
names = self.jobstable.get_selectedRecordNames()
if len(names)==1:
ax,mh,x,y=self.showResults()
else:
tx=[]; ty=[]
import pylab as plt
f=plt.figure(figsize=(8,8))
ax=f.add_subplot(111)
for n in names:
a,mh,x,y = self.showResults(n,showtable=False, ax=ax,stats=False)
tx.extend(x)
ty.extend(y)
ax.legend()
#add stats for summary
from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
C.addStats(ax,tx,ty)
f.show()
return
def showResults(self, name=None, showtable=True, ax=None, stats=True):
"""Show results with correlation plot from selected job"""
job, name = self.getJob(name)
if job == None:
print 'job not in DB'
return
if job.state() != 'Finished':
print 'job not finished'
return
self.matrices = job.data.allMatrices()
#print self.matrices['ModellingResults'].csvRepresentation()
jobmeta = job.metadata()
cols = self.DB.getSimpleFields()
expcol = None
expdata = None
#print jobmeta
if jobmeta.has_key('expcol'):
expcol = jobmeta['expcol']
if expcol not in cols and jobmeta.has_key('project'):
#we may have stored the exp data in another project
prjdata = jobmeta['project']
print 'trying to loading exp data from external project(s)'
from PEATDB.Base import PDatabase
from PEATTables import PEATTableModel
tmpdb = PDatabase(**prjdata)
print tmpdb
S = PEATTableModel(tmpdb)
expdata = S.simpleCopy(include=['Mutations'])
print expdata
#if exp column not known then ask user
if expcol == '' or expcol == None:
mpDlg = MultipleValDialog(title='Select Experimental Data',
initialvalues=[cols],
labels=['exp data column:'],
types=['list'],
parent=self.mainwin)
if mpDlg.result == True:
expcol = mpDlg.results[0]
else:
return
for m in self.matrices:
matrix = self.matrices[m]
if matrix == None or not 'Total' in matrix.columnHeaders():
continue
ax,mh,x,y = self.plotMerged(matrix, expcol, expdata, m,
showtable, ax, name, stats)
#need to add this for mousehandler to work.. hack
'''from Correlation import MouseHandler
mh = MouseHandler(ax, labels=expcol, key='Mutations')
mh.connect()'''
return ax,mh,x,y
def plotMerged(self, matrix, expcol, expdata=None,
title='', showtable=True, ax=None, name=None,
stats=True):
"""Merge a set of exp vals with predictions and plot"""
if expdata==None:
expdata = self.parent.tablemodel.simpleCopy(include=['Mutations'])
merged = self.mergeMatrix(matrix, expdata)
x,y,names,muts = merged.getColumns(['Total',expcol,'name','Mutations'],allowempty=False)
from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
muts = ['mutation: '+i for i in muts]
labels = zip(names, muts)
ax,frame,mh = C.plotCorrelation(x,y,labels,title=title,ylabel=expcol,
ax=ax,plotname=name,stats=stats,err=4)
x=[round(float(i),2) for i in x]
y=[round(float(i),2) for i in y]
if showtable == True:
table = self.showTable(frame, merged)
mh.table = table
return ax,mh,x,y
def test(self):
job, name = self.getJob('myjob')
if job.error() != None or job.state() != 'Finished':
return
stabmatrix = job.data.stabilityResults
L = self.DB.getLabbookSheet('myjob')
L = self.mergeMatrix(stabmatrix, L, fields=['name'])
print L.columnNames
#L1 = self.DB.getLabbookSheet('myjob3')
#L.merge(L1)
return
def displayNoDBWarning(self):
"""Warn user that no DB is present"""
tkMessageBox.showwarning("Cannot launch plugin",
'No Database is currently open. '
'You should first open a project.')
return
def help(self):
import webbrowser
link='http://enzyme.ucd.ie/main/index.php/PEAT_SA'
webbrowser.open(link,autoraise=1)
return
def quit(self, evt=None):
"""We MUST stop the jobManager"""
self.log2Stdout()
self.jobManager.stopLogging()
self.mainwin.destroy()
print 'closing plugin'
return
def main():
import os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="file",
help="Open a local db")
opts, remainder = parser.parse_args()
#test
if opts.file != None and os.path.exists(opts.file):
path=os.path.abspath(opts.file)
from PEATDB.Base import PDatabase
DB = PDatabase(local=path)
P = PEATSAPlugin()
P.main(DB=DB)
P.test()
if __name__ == '__main__':
main()
| """add a space to the end of the word"""
word = word + " "
st.insert('end', word)
end_index = st.index('end')
begin_index = "%s-%sc" % (end_index, len(word) + 1)
st.tag_add(tag, begin_index, end_index)
st.tag_config(tag, foreground=fg, background=bg)
return | identifier_body |
Weather.py |
import math
import pickle
import datetime, time
import numpy as np
class Weather:
def __init__(self, wfile, lines=0):
""" Weather Constructor
Args:
wfile (str): input weather file
lines (int, optional): number of lines of the input file to read
"""
self.wfile = wfile
self.lines = lines
# load data from file
isLoaded = self._load()
if (isLoaded == -1):
print 'Error reading weather data'
return -1
# add data descriptions
self._setTargetNames()
self._setFeatureNames()
self._setStationData()
# set member data defaults
self.obsStart = [2017,10,23]
return
## GET METHODS
def getNrEntries(self):
|
def getTargetNames(self):
""" Get target names
Returns:
Target names
"""
return self.targetNames
def getNrTargets(self):
""" Get number of targets
Returns:
Number of targets
"""
return self.targetNames.size
def getFeatures(self):
""" Get feature names
Returns:
Feature names
"""
return self.featureNames
def getNrFeatures(self):
""" Get number of features
Returns:
Number of features
"""
return self.featureNames.size
def getFeatureData(self, feature):
""" Get data for chosen feature
Args:
feature (str): selected feature
Returns:
Observation data of the selected feature (list)
"""
return self.data[:,self._getFIdx(feature)]
def getStationData(self, stationId):
""" Get data for chosen station
Args:
stationId (str): selected station
Returns:
Observation data of the selected station (list)
"""
if (stationId == 'all'):
return self.stationData
else:
station = np.where(self.stationData == stationId)[0][0]
return self.stationData[station]
def getNrStations(self):
""" Get number of observation stations
Returns:
Number of observation stations
"""
return len(self.stationData)
## DATA MANIPULATION METHODS
def modify(self, feature, newValues):
""" Replace the data of a chosen feature with a given list of new values
Args:
feature (str): selected feature
newValues (list(str)): New set of values to overwrite old data
Returns:
0 for success
"""
self.data[:,self._getFIdx(feature)] = newValues
return 0
def append(self, featureName, featureData):
""" Append the data with a new feature and list of new values
Args:
featureName (str): name of new feature to add
featureData (list(str)): New set of values to add
Returns:
0 for success
"""
self.data = np.concatenate((self.data, np.array([featureData]).T), axis=1)
self.featureNames = np.append(self.featureNames, featureName)
return 0
def select(self, features):
""" Select a set of features to retain (and remove other features)
Args:
features (list(str)): Selected set of features
Returns:
0 for success
"""
if 'Weather Type' not in features:
features.append('Weather Type')
self.data = self.data[:,[self._getFIdx(f) for f in features]]
self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]
return 0
def discard(self):
""" Discard observations with null data
Returns:
0 for success
"""
for f in self.featureNames:
self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']
return
def delete(self, feature):
""" Delete a feature and assoicated data
Args:
feature (str): name of feature to delete
Returns:
0 for success
"""
if (self._isFIdx(feature)):
self.data = np.delete(self.data, self._getFIdx(feature), axis=1)
self.featureNames = np.delete(self.featureNames, self._getFIdx(feature))
return 0
def export(self, fname):
""" Export object to pickle file
Args:
fname (str): export file name
Returns:
0 for success
"""
# discard any data with null feature values
self.discard()
# set target as last column
self.target = self.getFeatureData('Weather Type')
# remove non-exportable features
for n in ['Station ID', 'Station Name', 'Date', 'Weather Type']:
if self._isFIdx(n):
self.delete(n)
# convert all data to float
self.data = self.data.astype(float)
# export to file
pickle.dump(self, open(fname, 'wb'))
return 0
## STATS UTILITIES
def getObservations(self, stationId='', obsDate='', obsTime='', features=[]):
""" Provide observation data for a chosen feature filtered by station, date, time
Args:
stationId (str): Station ID
obsDate (str): Observation date
obsTime (str): Observation time
features (list(str)): List of chosen features
Returns:
stats (list): List of observation data
"""
# filter data
stats = self.data
if (stationId):
stats = stats[stats[:,self._getFIdx('Station ID')] == stationId]
if (obsDate):
stats = stats[stats[:,self._getFIdx('Date')] == obsDate]
if (obsTime):
stats = stats[stats[:,self._getFIdx('Time since midnight')] == obsTime]
# return features
if (features):
features = [self._getFIdx(f) for f in features]
return stats[:,features]
else:
return stats
def findStations(self, coords=[], offset=[], minThreshold=10, maxThreshold=100):
""" Find the nearet observation station to a given location
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
minThreshold (int): Minimum acceptable distance from chosen location
maxThreshold (int): Maximum acceptable distance from chosen location
Returns:
stations (list): List of nearby stations
"""
nearStations = []
# check for supplied Latitude and Longitude
if not (coords[0] and coords[1]):
return 0
# calculate new coords with offset
if (offset):
if not (offset[0] and offset[1]):
return 0
coords = self._getNewCoords(coords, offset)
# iterate through weather stations
for s in self.stationData:
# get distance between point and station
distance = self._getDistance([float(coords[0]), float(coords[1])], \
[float(s[2]), float(s[3])] )
# add if within threshold
if ((distance > minThreshold) and (distance < maxThreshold)):
nearStations.append([s[0], s[1], s[2], s[3], distance])
return sorted(nearStations, key=lambda x: (x[4]))
def setRelTime(self):
""" Define new feature to track observation time relative to start of sample
Returns:
0 if success
"""
obsRelTime = [self._getRelTime(o) for o in self.data]
self.append('Relative Time', obsRelTime)
return 0
## PRIVATE SET METHODS
def _setTargetNames(self):
""" Set target names based on data stream
Returns:
0 if success
"""
# full target names
if (self.dataStream == 0):
self.targetNames = np.array(['Clear Night', 'Sunny Day', 'Partly cloudy (night)', 'Partly cloudy (day)',\
'Not used', 'Mist', 'Fog', 'Cloudy', 'Overcast', 'Light rain shower (night)', \
'Light rain shower (day)', 'Drizzle', 'Light rain', 'Heavy rain shower (night)', \
'Heavy rain shower (day)', 'Heavy rain', 'Sleet shower (night)', 'Sleet shower (day)', \
'Sleet', 'Hail shower (night)', 'Hail shower (day)', 'Hail', 'Light snow shower (night)', \
'Light snow shower (day)', 'Light snow', 'Heavy snow shower (night)', 'Heavy snow shower (day)', \
'Heavy snow', 'Thunder shower', 'Thunder shower (night)', 'Thunder'])
# main target names
elif (self.dataStream == 1):
self.targetNames = np.array(['Clear', 'Partly Cloudy', 'Mist', 'Fog', 'Cloudy', \
'Overcast', 'Rain', 'Sleet', 'Hail', 'Snow', 'Thunder'])
# basic target names
elif (self.dataStream == 2):
self.targetNames = np.array(['Clear', 'Cloudy', 'Precipitation'])
return 0
def _setFeatureNames(self):
""" Set feature names
Returns:
0 if success
"""
self.featureNames = np.array(['Station ID', 'Station Name', 'Elevation', 'Latitude', 'Longitude', 'Date', \
'Time since midnight', 'Gust', 'Temperature', 'Visibilty', 'Wind Direction', \
'Wind Speed', 'Pressure', 'Pressure Trend', 'Dew Point', 'Humidity', 'Weather Type'])
return 0
def _setStationData(self):
""" Set station data
LIMITATION:
Old version of numpy on desktop PCs which does not accept axis \
argument in np.unique(). Use workaround to reduce array
Returns:
0 if success
"""
self.stationData = self.data[:,[self._getFIdx(f) for f in \
'Station ID', 'Station Name', 'Latitude', 'Longitude']]
# self.stationData = np.unique(self.stationData, axis=0)
self.stationData = self._unique_rows(self.stationData)
return 0
## PRIVATE DATA MANIPULATION METHODS
def _load(self):
""" Load data from file
Returns:
0 if success, -1 if file cannot be read
"""
# number of non-data header details at top of data file
header = 1
# open file
weatherData = []
with open(self.wfile) as myfile:
if (self.lines > 0):
weatherData = [next(myfile) for x in xrange(self.lines + header)]
else:
weatherData = myfile.readlines()
# get data stream from first line
streamHeader = weatherData.pop(0).rstrip()
if (streamHeader == 'FULL'):
self.dataStream = 0
elif (streamHeader == 'ADVANCED'):
self.dataStream = 1
elif (streamHeader == 'BASIC'):
self.dataStream = 2
else:
print "Error: unecognised data stream from file %s" % (self.wfile)
return -1
# read data
inputData = []
for line in weatherData:
entries = line.split()
inputData.append(entries)
# copy all into np array
self.data = np.array(inputData)
return 0
def _getFIdx(self, featureName):
""" Get Feature Index in data numpy array
Args:
featureName (str): Name of feature
Returns:
index
"""
return np.where(self.featureNames == featureName)[0][0]
def _isFIdx(self, featureName):
""" Look up if feature name is indexed in data numpy array
Args:
featureName (str): Name of feature
Returns:
1 if success, 0 if not found
"""
return 1 if (featureName in self.featureNames) else 0
## PRIVATE STATS UTILITIES
def _getDistance(self, source, dest):
""" Get the distance as crow flies between two coordinates
Args:
source (float): Longitude and Latitude of source point
source (float): Longitude and Latitude of destination point
Returns:
distance (float): distance betwen points
"""
lat1 = source[0]
lat2 = dest[0]
lon1 = source[1]
lon2 = dest[1]
# Formula from https://www.movable-type.co.uk/scripts/latlong.html
R = 6370000
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
deltaPhi = math.radians(lat2-lat1)
deltalmb = math.radians(lon2-lon1)
a = math.sin(deltaPhi/2) * math.sin(deltaPhi/2) + \
math.cos(phi1) * math.cos(phi2) * \
math.sin(deltalmb/2) * math.sin(deltalmb/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));
d = (R * c)/1000.
return d
def _getNewCoords(self, coords, offset):
""" Calculate new coordinates after applying offset
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
BUG?:
direction seems to be opposite from what I expect, made correction of 360-x
LIMITATION:
Due E (or W) gives slightly different results for latitude (e.g. 50N over 200km is 49.96N)
Returns:
coords (list(float, float)): New coordinates
"""
oldlat = math.radians(float(coords[0]))
oldlon = math.radians(float(coords[1]))
magnitude = float(offset[0]) / 6370.
direction = math.radians(360.-float(offset[1]))
# Calculate lat/lon given radial and distnace (http://www.edwilliams.org/avform.htm#LL)
lat = math.asin(math.sin(oldlat) * math.cos(magnitude) + math.cos(oldlat) \
* math.sin(magnitude) * math.cos(direction))
lon = (oldlon - math.asin(math.sin(direction) * math.sin(magnitude) / math.cos(lat)) \
+ math.pi) % (2 * math.pi) - math.pi
# print coords, offset, oldlat, oldlon, magnitude, direction, math.degrees(lat), math.degrees(lon)
return (math.degrees(lat), math.degrees(lon))
# Workaround on earlier numpy versions from https://github.com/numpy/numpy/issues/2871
def _unique_rows(self, A, return_index=False, return_inverse=False):
"""
Similar to MATLAB's unique(A, 'rows'), this returns B, I, J
where B is the unique rows of A and I and J satisfy
A = B[J,:] and B = A[I,:]
Returns I if return_index is True
Returns J if return_inverse is True
"""
A = np.require(A, requirements='C')
assert A.ndim == 2, "array must be 2-dim'l"
B = np.unique(A.view([('', A.dtype)]*A.shape[1]),
return_index=return_index,
return_inverse=return_inverse)
if return_index or return_inverse:
return (B[0].view(A.dtype).reshape((-1, A.shape[1]), order='C'),) \
+ B[1:]
else:
return B.view(A.dtype).reshape((-1, A.shape[1]), order='C')
def _getRelTime(self, obsData):
""" Calculate the time relative to set sample start time for a given data point
Args:
obsData (list): Observation data for single time point
Returns:
relTime (str): Time relative to set sample start time (hours)
"""
# get unix time for start of data sample (midnight) as ref point
dt = datetime.datetime(self.obsStart[0], self.obsStart[1], self.obsStart[2], 0, 0)
startOfDay = int(time.mktime(dt.timetuple()))
# strip date string
dateString = [x.strip() for x in obsData[self._getFIdx('Date')].split('-')]
# get unix time for start of observation date
obsStartOfDay = int(time.mktime(datetime.datetime( \
int(dateString[0]), int(dateString[1]), int(dateString[2]), 0, 0).timetuple()))
# calculate relative time (hours)
relTime = int((obsStartOfDay + (int(obsData[self._getFIdx('Time since midnight')])*60) \
- startOfDay)/3600.)
return str(relTime)
| """ Get number of weather observations read from file
Returns:
Number of weather observations
"""
return len(self.data) | identifier_body |
Weather.py |
import math
import pickle
import datetime, time
import numpy as np
class Weather:
def __init__(self, wfile, lines=0):
""" Weather Constructor
Args:
wfile (str): input weather file
lines (int, optional): number of lines of the input file to read
"""
self.wfile = wfile
self.lines = lines
# load data from file
isLoaded = self._load()
if (isLoaded == -1):
print 'Error reading weather data'
return -1
# add data descriptions
self._setTargetNames()
self._setFeatureNames()
self._setStationData()
# set member data defaults
self.obsStart = [2017,10,23]
return
## GET METHODS
def getNrEntries(self):
""" Get number of weather observations read from file
Returns:
Number of weather observations
"""
return len(self.data)
def getTargetNames(self):
""" Get target names
Returns:
Target names
"""
return self.targetNames
def getNrTargets(self):
""" Get number of targets
Returns:
Number of targets
"""
return self.targetNames.size
def getFeatures(self):
""" Get feature names
Returns:
Feature names
"""
return self.featureNames
def getNrFeatures(self):
""" Get number of features
Returns:
Number of features
"""
return self.featureNames.size
def getFeatureData(self, feature):
""" Get data for chosen feature
Args:
feature (str): selected feature
Returns:
Observation data of the selected feature (list)
"""
return self.data[:,self._getFIdx(feature)]
def getStationData(self, stationId):
""" Get data for chosen station
Args:
stationId (str): selected station
Returns:
Observation data of the selected station (list)
"""
if (stationId == 'all'):
return self.stationData
else:
station = np.where(self.stationData == stationId)[0][0]
return self.stationData[station]
def getNrStations(self):
""" Get number of observation stations
Returns:
Number of observation stations
"""
return len(self.stationData)
## DATA MANIPULATION METHODS
def modify(self, feature, newValues):
""" Replace the data of a chosen feature with a given list of new values
Args:
feature (str): selected feature
newValues (list(str)): New set of values to overwrite old data
Returns:
0 for success
"""
self.data[:,self._getFIdx(feature)] = newValues
return 0
def append(self, featureName, featureData):
""" Append the data with a new feature and list of new values
Args:
featureName (str): name of new feature to add
featureData (list(str)): New set of values to add
Returns:
0 for success
"""
self.data = np.concatenate((self.data, np.array([featureData]).T), axis=1)
self.featureNames = np.append(self.featureNames, featureName)
return 0
def select(self, features):
""" Select a set of features to retain (and remove other features)
Args:
features (list(str)): Selected set of features
Returns:
0 for success
"""
if 'Weather Type' not in features:
features.append('Weather Type')
self.data = self.data[:,[self._getFIdx(f) for f in features]]
self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]
return 0
def discard(self):
""" Discard observations with null data
Returns:
0 for success
"""
for f in self.featureNames:
self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']
return
def delete(self, feature):
""" Delete a feature and assoicated data
Args:
feature (str): name of feature to delete
Returns:
0 for success
"""
if (self._isFIdx(feature)):
self.data = np.delete(self.data, self._getFIdx(feature), axis=1)
self.featureNames = np.delete(self.featureNames, self._getFIdx(feature))
return 0
def export(self, fname):
""" Export object to pickle file
Args:
fname (str): export file name
Returns:
0 for success
"""
# discard any data with null feature values
self.discard()
# set target as last column
self.target = self.getFeatureData('Weather Type')
# remove non-exportable features
for n in ['Station ID', 'Station Name', 'Date', 'Weather Type']:
if self._isFIdx(n):
self.delete(n)
# convert all data to float
self.data = self.data.astype(float)
# export to file
pickle.dump(self, open(fname, 'wb'))
return 0
## STATS UTILITIES
def getObservations(self, stationId='', obsDate='', obsTime='', features=[]):
""" Provide observation data for a chosen feature filtered by station, date, time
Args:
stationId (str): Station ID
obsDate (str): Observation date
obsTime (str): Observation time
features (list(str)): List of chosen features
Returns:
stats (list): List of observation data
"""
# filter data
stats = self.data
if (stationId):
stats = stats[stats[:,self._getFIdx('Station ID')] == stationId]
if (obsDate):
stats = stats[stats[:,self._getFIdx('Date')] == obsDate]
if (obsTime):
stats = stats[stats[:,self._getFIdx('Time since midnight')] == obsTime]
# return features
if (features):
features = [self._getFIdx(f) for f in features]
return stats[:,features]
else:
return stats
def findStations(self, coords=[], offset=[], minThreshold=10, maxThreshold=100):
""" Find the nearet observation station to a given location
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
minThreshold (int): Minimum acceptable distance from chosen location
maxThreshold (int): Maximum acceptable distance from chosen location
Returns:
stations (list): List of nearby stations
"""
nearStations = []
# check for supplied Latitude and Longitude
if not (coords[0] and coords[1]):
return 0
# calculate new coords with offset
if (offset):
if not (offset[0] and offset[1]):
return 0
coords = self._getNewCoords(coords, offset)
# iterate through weather stations
for s in self.stationData:
# get distance between point and station
distance = self._getDistance([float(coords[0]), float(coords[1])], \
[float(s[2]), float(s[3])] )
# add if within threshold
if ((distance > minThreshold) and (distance < maxThreshold)):
nearStations.append([s[0], s[1], s[2], s[3], distance])
return sorted(nearStations, key=lambda x: (x[4]))
def setRelTime(self):
""" Define new feature to track observation time relative to start of sample
Returns:
0 if success
"""
obsRelTime = [self._getRelTime(o) for o in self.data]
self.append('Relative Time', obsRelTime)
return 0
## PRIVATE SET METHODS
def _setTargetNames(self):
""" Set target names based on data stream
Returns:
0 if success
"""
# full target names
if (self.dataStream == 0):
self.targetNames = np.array(['Clear Night', 'Sunny Day', 'Partly cloudy (night)', 'Partly cloudy (day)',\
'Not used', 'Mist', 'Fog', 'Cloudy', 'Overcast', 'Light rain shower (night)', \
'Light rain shower (day)', 'Drizzle', 'Light rain', 'Heavy rain shower (night)', \
'Heavy rain shower (day)', 'Heavy rain', 'Sleet shower (night)', 'Sleet shower (day)', \
'Sleet', 'Hail shower (night)', 'Hail shower (day)', 'Hail', 'Light snow shower (night)', \
'Light snow shower (day)', 'Light snow', 'Heavy snow shower (night)', 'Heavy snow shower (day)', \
'Heavy snow', 'Thunder shower', 'Thunder shower (night)', 'Thunder'])
# main target names
elif (self.dataStream == 1):
self.targetNames = np.array(['Clear', 'Partly Cloudy', 'Mist', 'Fog', 'Cloudy', \
'Overcast', 'Rain', 'Sleet', 'Hail', 'Snow', 'Thunder'])
# basic target names
elif (self.dataStream == 2):
self.targetNames = np.array(['Clear', 'Cloudy', 'Precipitation'])
return 0
def _setFeatureNames(self):
""" Set feature names
Returns:
0 if success
"""
self.featureNames = np.array(['Station ID', 'Station Name', 'Elevation', 'Latitude', 'Longitude', 'Date', \
'Time since midnight', 'Gust', 'Temperature', 'Visibilty', 'Wind Direction', \
'Wind Speed', 'Pressure', 'Pressure Trend', 'Dew Point', 'Humidity', 'Weather Type'])
return 0
def _setStationData(self):
""" Set station data
LIMITATION:
Old version of numpy on desktop PCs which does not accept axis \
argument in np.unique(). Use workaround to reduce array
Returns:
0 if success
"""
self.stationData = self.data[:,[self._getFIdx(f) for f in \
'Station ID', 'Station Name', 'Latitude', 'Longitude']]
# self.stationData = np.unique(self.stationData, axis=0)
self.stationData = self._unique_rows(self.stationData)
return 0
## PRIVATE DATA MANIPULATION METHODS
def _load(self):
""" Load data from file
Returns:
0 if success, -1 if file cannot be read
"""
# number of non-data header details at top of data file
header = 1
# open file
weatherData = []
with open(self.wfile) as myfile:
if (self.lines > 0):
weatherData = [next(myfile) for x in xrange(self.lines + header)]
else:
weatherData = myfile.readlines()
# get data stream from first line
streamHeader = weatherData.pop(0).rstrip()
if (streamHeader == 'FULL'):
self.dataStream = 0
elif (streamHeader == 'ADVANCED'):
self.dataStream = 1
elif (streamHeader == 'BASIC'):
self.dataStream = 2
else:
print "Error: unecognised data stream from file %s" % (self.wfile)
return -1
# read data
inputData = []
for line in weatherData:
entries = line.split()
inputData.append(entries)
# copy all into np array
self.data = np.array(inputData)
return 0
def _getFIdx(self, featureName):
""" Get Feature Index in data numpy array
Args:
featureName (str): Name of feature
Returns:
index
"""
return np.where(self.featureNames == featureName)[0][0]
def _isFIdx(self, featureName):
""" Look up if feature name is indexed in data numpy array
Args:
featureName (str): Name of feature
Returns:
1 if success, 0 if not found
"""
return 1 if (featureName in self.featureNames) else 0
## PRIVATE STATS UTILITIES
def _getDistance(self, source, dest):
""" Get the distance as crow flies between two coordinates
Args:
source (float): Longitude and Latitude of source point
source (float): Longitude and Latitude of destination point
Returns:
distance (float): distance betwen points
"""
lat1 = source[0]
lat2 = dest[0]
lon1 = source[1]
lon2 = dest[1]
# Formula from https://www.movable-type.co.uk/scripts/latlong.html
R = 6370000
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
deltaPhi = math.radians(lat2-lat1)
deltalmb = math.radians(lon2-lon1)
a = math.sin(deltaPhi/2) * math.sin(deltaPhi/2) + \
math.cos(phi1) * math.cos(phi2) * \
math.sin(deltalmb/2) * math.sin(deltalmb/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));
d = (R * c)/1000.
return d
def _getNewCoords(self, coords, offset):
""" Calculate new coordinates after applying offset
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
BUG?:
direction seems to be opposite from what I expect, made correction of 360-x
LIMITATION:
Due E (or W) gives slightly different results for latitude (e.g. 50N over 200km is 49.96N)
Returns:
coords (list(float, float)): New coordinates
"""
oldlat = math.radians(float(coords[0]))
oldlon = math.radians(float(coords[1]))
magnitude = float(offset[0]) / 6370.
direction = math.radians(360.-float(offset[1]))
# Calculate lat/lon given radial and distnace (http://www.edwilliams.org/avform.htm#LL)
lat = math.asin(math.sin(oldlat) * math.cos(magnitude) + math.cos(oldlat) \
* math.sin(magnitude) * math.cos(direction))
lon = (oldlon - math.asin(math.sin(direction) * math.sin(magnitude) / math.cos(lat)) \
+ math.pi) % (2 * math.pi) - math.pi
# print coords, offset, oldlat, oldlon, magnitude, direction, math.degrees(lat), math.degrees(lon)
return (math.degrees(lat), math.degrees(lon))
# Workaround on earlier numpy versions from https://github.com/numpy/numpy/issues/2871
def _unique_rows(self, A, return_index=False, return_inverse=False):
"""
Similar to MATLAB's unique(A, 'rows'), this returns B, I, J
where B is the unique rows of A and I and J satisfy
A = B[J,:] and B = A[I,:]
Returns I if return_index is True
Returns J if return_inverse is True
"""
A = np.require(A, requirements='C')
assert A.ndim == 2, "array must be 2-dim'l"
B = np.unique(A.view([('', A.dtype)]*A.shape[1]),
return_index=return_index,
return_inverse=return_inverse)
if return_index or return_inverse:
return (B[0].view(A.dtype).reshape((-1, A.shape[1]), order='C'),) \
+ B[1:]
else:
return B.view(A.dtype).reshape((-1, A.shape[1]), order='C')
def | (self, obsData):
""" Calculate the time relative to set sample start time for a given data point
Args:
obsData (list): Observation data for single time point
Returns:
relTime (str): Time relative to set sample start time (hours)
"""
# get unix time for start of data sample (midnight) as ref point
dt = datetime.datetime(self.obsStart[0], self.obsStart[1], self.obsStart[2], 0, 0)
startOfDay = int(time.mktime(dt.timetuple()))
# strip date string
dateString = [x.strip() for x in obsData[self._getFIdx('Date')].split('-')]
# get unix time for start of observation date
obsStartOfDay = int(time.mktime(datetime.datetime( \
int(dateString[0]), int(dateString[1]), int(dateString[2]), 0, 0).timetuple()))
# calculate relative time (hours)
relTime = int((obsStartOfDay + (int(obsData[self._getFIdx('Time since midnight')])*60) \
- startOfDay)/3600.)
return str(relTime)
| _getRelTime | identifier_name |
Weather.py |
import math
import pickle
import datetime, time
import numpy as np
class Weather:
def __init__(self, wfile, lines=0):
""" Weather Constructor
Args:
wfile (str): input weather file
lines (int, optional): number of lines of the input file to read
"""
self.wfile = wfile
self.lines = lines
# load data from file
isLoaded = self._load()
if (isLoaded == -1):
print 'Error reading weather data'
return -1
# add data descriptions
self._setTargetNames()
self._setFeatureNames()
self._setStationData()
# set member data defaults
self.obsStart = [2017,10,23]
return
## GET METHODS
def getNrEntries(self):
""" Get number of weather observations read from file
Returns:
Number of weather observations
"""
return len(self.data)
def getTargetNames(self):
""" Get target names
Returns:
Target names
"""
return self.targetNames
def getNrTargets(self):
""" Get number of targets
Returns:
Number of targets
"""
return self.targetNames.size
def getFeatures(self):
""" Get feature names
Returns:
Feature names
"""
return self.featureNames
def getNrFeatures(self):
""" Get number of features
Returns:
Number of features
"""
return self.featureNames.size
def getFeatureData(self, feature):
""" Get data for chosen feature
Args:
feature (str): selected feature
Returns:
Observation data of the selected feature (list)
"""
return self.data[:,self._getFIdx(feature)]
def getStationData(self, stationId):
""" Get data for chosen station
Args:
stationId (str): selected station
Returns:
Observation data of the selected station (list)
"""
if (stationId == 'all'):
return self.stationData
else:
station = np.where(self.stationData == stationId)[0][0]
return self.stationData[station]
def getNrStations(self):
""" Get number of observation stations
Returns:
Number of observation stations
"""
return len(self.stationData)
## DATA MANIPULATION METHODS
def modify(self, feature, newValues):
""" Replace the data of a chosen feature with a given list of new values
Args:
feature (str): selected feature
newValues (list(str)): New set of values to overwrite old data
Returns:
0 for success
"""
self.data[:,self._getFIdx(feature)] = newValues
return 0
def append(self, featureName, featureData):
""" Append the data with a new feature and list of new values
Args:
featureName (str): name of new feature to add
featureData (list(str)): New set of values to add
Returns:
0 for success
"""
self.data = np.concatenate((self.data, np.array([featureData]).T), axis=1)
self.featureNames = np.append(self.featureNames, featureName)
return 0
def select(self, features):
""" Select a set of features to retain (and remove other features)
Args:
features (list(str)): Selected set of features
Returns:
0 for success
"""
if 'Weather Type' not in features:
features.append('Weather Type')
self.data = self.data[:,[self._getFIdx(f) for f in features]]
self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]
return 0
def discard(self):
""" Discard observations with null data
Returns:
0 for success
"""
for f in self.featureNames:
self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']
return
def delete(self, feature):
""" Delete a feature and assoicated data
Args:
feature (str): name of feature to delete
Returns:
0 for success
"""
if (self._isFIdx(feature)):
self.data = np.delete(self.data, self._getFIdx(feature), axis=1)
self.featureNames = np.delete(self.featureNames, self._getFIdx(feature))
return 0
def export(self, fname):
""" Export object to pickle file
Args:
fname (str): export file name
Returns:
0 for success
"""
# discard any data with null feature values
self.discard()
# set target as last column
self.target = self.getFeatureData('Weather Type')
# remove non-exportable features
for n in ['Station ID', 'Station Name', 'Date', 'Weather Type']:
if self._isFIdx(n):
self.delete(n)
# convert all data to float
self.data = self.data.astype(float)
# export to file
pickle.dump(self, open(fname, 'wb'))
return 0
## STATS UTILITIES
def getObservations(self, stationId='', obsDate='', obsTime='', features=[]):
""" Provide observation data for a chosen feature filtered by station, date, time
Args:
stationId (str): Station ID
obsDate (str): Observation date
obsTime (str): Observation time
features (list(str)): List of chosen features
Returns:
stats (list): List of observation data
"""
# filter data
stats = self.data
if (stationId):
stats = stats[stats[:,self._getFIdx('Station ID')] == stationId]
if (obsDate):
stats = stats[stats[:,self._getFIdx('Date')] == obsDate]
if (obsTime):
stats = stats[stats[:,self._getFIdx('Time since midnight')] == obsTime]
# return features
if (features):
features = [self._getFIdx(f) for f in features]
return stats[:,features]
else:
return stats
def findStations(self, coords=[], offset=[], minThreshold=10, maxThreshold=100):
""" Find the nearet observation station to a given location
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
minThreshold (int): Minimum acceptable distance from chosen location
maxThreshold (int): Maximum acceptable distance from chosen location
Returns:
stations (list): List of nearby stations
"""
nearStations = []
# check for supplied Latitude and Longitude
if not (coords[0] and coords[1]):
return 0
# calculate new coords with offset
if (offset):
if not (offset[0] and offset[1]):
return 0
coords = self._getNewCoords(coords, offset)
# iterate through weather stations
for s in self.stationData:
# get distance between point and station
distance = self._getDistance([float(coords[0]), float(coords[1])], \
[float(s[2]), float(s[3])] )
# add if within threshold
if ((distance > minThreshold) and (distance < maxThreshold)):
nearStations.append([s[0], s[1], s[2], s[3], distance])
return sorted(nearStations, key=lambda x: (x[4]))
def setRelTime(self):
""" Define new feature to track observation time relative to start of sample
Returns:
0 if success
"""
obsRelTime = [self._getRelTime(o) for o in self.data]
self.append('Relative Time', obsRelTime)
return 0
## PRIVATE SET METHODS
def _setTargetNames(self):
""" Set target names based on data stream
Returns:
0 if success
"""
# full target names
if (self.dataStream == 0):
self.targetNames = np.array(['Clear Night', 'Sunny Day', 'Partly cloudy (night)', 'Partly cloudy (day)',\
'Not used', 'Mist', 'Fog', 'Cloudy', 'Overcast', 'Light rain shower (night)', \
'Light rain shower (day)', 'Drizzle', 'Light rain', 'Heavy rain shower (night)', \
'Heavy rain shower (day)', 'Heavy rain', 'Sleet shower (night)', 'Sleet shower (day)', \
'Sleet', 'Hail shower (night)', 'Hail shower (day)', 'Hail', 'Light snow shower (night)', \
'Light snow shower (day)', 'Light snow', 'Heavy snow shower (night)', 'Heavy snow shower (day)', \
'Heavy snow', 'Thunder shower', 'Thunder shower (night)', 'Thunder'])
# main target names
elif (self.dataStream == 1):
self.targetNames = np.array(['Clear', 'Partly Cloudy', 'Mist', 'Fog', 'Cloudy', \
'Overcast', 'Rain', 'Sleet', 'Hail', 'Snow', 'Thunder'])
# basic target names
elif (self.dataStream == 2):
self.targetNames = np.array(['Clear', 'Cloudy', 'Precipitation'])
return 0
def _setFeatureNames(self):
""" Set feature names
Returns:
0 if success
"""
self.featureNames = np.array(['Station ID', 'Station Name', 'Elevation', 'Latitude', 'Longitude', 'Date', \
'Time since midnight', 'Gust', 'Temperature', 'Visibilty', 'Wind Direction', \
'Wind Speed', 'Pressure', 'Pressure Trend', 'Dew Point', 'Humidity', 'Weather Type'])
return 0
def _setStationData(self):
""" Set station data
LIMITATION:
Old version of numpy on desktop PCs which does not accept axis \
argument in np.unique(). Use workaround to reduce array
Returns:
0 if success
"""
self.stationData = self.data[:,[self._getFIdx(f) for f in \
'Station ID', 'Station Name', 'Latitude', 'Longitude']]
# self.stationData = np.unique(self.stationData, axis=0)
self.stationData = self._unique_rows(self.stationData)
return 0
## PRIVATE DATA MANIPULATION METHODS
def _load(self):
""" Load data from file
Returns:
0 if success, -1 if file cannot be read
"""
# number of non-data header details at top of data file
header = 1
# open file
weatherData = []
with open(self.wfile) as myfile:
if (self.lines > 0):
weatherData = [next(myfile) for x in xrange(self.lines + header)]
else:
weatherData = myfile.readlines()
# get data stream from first line
streamHeader = weatherData.pop(0).rstrip()
if (streamHeader == 'FULL'):
self.dataStream = 0
elif (streamHeader == 'ADVANCED'):
|
elif (streamHeader == 'BASIC'):
self.dataStream = 2
else:
print "Error: unecognised data stream from file %s" % (self.wfile)
return -1
# read data
inputData = []
for line in weatherData:
entries = line.split()
inputData.append(entries)
# copy all into np array
self.data = np.array(inputData)
return 0
def _getFIdx(self, featureName):
""" Get Feature Index in data numpy array
Args:
featureName (str): Name of feature
Returns:
index
"""
return np.where(self.featureNames == featureName)[0][0]
def _isFIdx(self, featureName):
""" Look up if feature name is indexed in data numpy array
Args:
featureName (str): Name of feature
Returns:
1 if success, 0 if not found
"""
return 1 if (featureName in self.featureNames) else 0
## PRIVATE STATS UTILITIES
def _getDistance(self, source, dest):
""" Get the distance as crow flies between two coordinates
Args:
source (float): Longitude and Latitude of source point
source (float): Longitude and Latitude of destination point
Returns:
distance (float): distance betwen points
"""
lat1 = source[0]
lat2 = dest[0]
lon1 = source[1]
lon2 = dest[1]
# Formula from https://www.movable-type.co.uk/scripts/latlong.html
R = 6370000
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
deltaPhi = math.radians(lat2-lat1)
deltalmb = math.radians(lon2-lon1)
a = math.sin(deltaPhi/2) * math.sin(deltaPhi/2) + \
math.cos(phi1) * math.cos(phi2) * \
math.sin(deltalmb/2) * math.sin(deltalmb/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));
d = (R * c)/1000.
return d
def _getNewCoords(self, coords, offset):
""" Calculate new coordinates after applying offset
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
BUG?:
direction seems to be opposite from what I expect, made correction of 360-x
LIMITATION:
Due E (or W) gives slightly different results for latitude (e.g. 50N over 200km is 49.96N)
Returns:
coords (list(float, float)): New coordinates
"""
oldlat = math.radians(float(coords[0]))
oldlon = math.radians(float(coords[1]))
magnitude = float(offset[0]) / 6370.
direction = math.radians(360.-float(offset[1]))
# Calculate lat/lon given radial and distnace (http://www.edwilliams.org/avform.htm#LL)
lat = math.asin(math.sin(oldlat) * math.cos(magnitude) + math.cos(oldlat) \
* math.sin(magnitude) * math.cos(direction))
lon = (oldlon - math.asin(math.sin(direction) * math.sin(magnitude) / math.cos(lat)) \
+ math.pi) % (2 * math.pi) - math.pi
# print coords, offset, oldlat, oldlon, magnitude, direction, math.degrees(lat), math.degrees(lon)
return (math.degrees(lat), math.degrees(lon))
# Workaround on earlier numpy versions from https://github.com/numpy/numpy/issues/2871
def _unique_rows(self, A, return_index=False, return_inverse=False):
"""
Similar to MATLAB's unique(A, 'rows'), this returns B, I, J
where B is the unique rows of A and I and J satisfy
A = B[J,:] and B = A[I,:]
Returns I if return_index is True
Returns J if return_inverse is True
"""
A = np.require(A, requirements='C')
assert A.ndim == 2, "array must be 2-dim'l"
B = np.unique(A.view([('', A.dtype)]*A.shape[1]),
return_index=return_index,
return_inverse=return_inverse)
if return_index or return_inverse:
return (B[0].view(A.dtype).reshape((-1, A.shape[1]), order='C'),) \
+ B[1:]
else:
return B.view(A.dtype).reshape((-1, A.shape[1]), order='C')
def _getRelTime(self, obsData):
""" Calculate the time relative to set sample start time for a given data point
Args:
obsData (list): Observation data for single time point
Returns:
relTime (str): Time relative to set sample start time (hours)
"""
# get unix time for start of data sample (midnight) as ref point
dt = datetime.datetime(self.obsStart[0], self.obsStart[1], self.obsStart[2], 0, 0)
startOfDay = int(time.mktime(dt.timetuple()))
# strip date string
dateString = [x.strip() for x in obsData[self._getFIdx('Date')].split('-')]
# get unix time for start of observation date
obsStartOfDay = int(time.mktime(datetime.datetime( \
int(dateString[0]), int(dateString[1]), int(dateString[2]), 0, 0).timetuple()))
# calculate relative time (hours)
relTime = int((obsStartOfDay + (int(obsData[self._getFIdx('Time since midnight')])*60) \
- startOfDay)/3600.)
return str(relTime)
| self.dataStream = 1 | conditional_block |
Weather.py | import math
import pickle
import datetime, time
import numpy as np
class Weather:
def __init__(self, wfile, lines=0):
""" Weather Constructor
Args:
wfile (str): input weather file
lines (int, optional): number of lines of the input file to read
"""
self.wfile = wfile
self.lines = lines
# load data from file
isLoaded = self._load()
if (isLoaded == -1):
print 'Error reading weather data'
return -1
# add data descriptions
self._setTargetNames()
self._setFeatureNames()
self._setStationData()
# set member data defaults
self.obsStart = [2017,10,23]
return
## GET METHODS
def getNrEntries(self):
""" Get number of weather observations read from file
Returns:
Number of weather observations
"""
return len(self.data)
def getTargetNames(self):
""" Get target names
Returns:
Target names
"""
return self.targetNames
def getNrTargets(self):
""" Get number of targets
Returns:
Number of targets
"""
return self.targetNames.size
def getFeatures(self):
""" Get feature names
Returns:
Feature names
"""
return self.featureNames
def getNrFeatures(self):
""" Get number of features
Returns:
Number of features
"""
return self.featureNames.size
def getFeatureData(self, feature):
""" Get data for chosen feature
Args:
feature (str): selected feature
Returns:
Observation data of the selected feature (list)
"""
return self.data[:,self._getFIdx(feature)]
def getStationData(self, stationId):
""" Get data for chosen station
Args:
stationId (str): selected station
Returns:
Observation data of the selected station (list)
"""
if (stationId == 'all'):
return self.stationData
else:
station = np.where(self.stationData == stationId)[0][0]
return self.stationData[station]
def getNrStations(self):
""" Get number of observation stations
Returns:
Number of observation stations
"""
return len(self.stationData)
## DATA MANIPULATION METHODS
def modify(self, feature, newValues):
""" Replace the data of a chosen feature with a given list of new values
Args:
feature (str): selected feature
newValues (list(str)): New set of values to overwrite old data
Returns:
0 for success
"""
self.data[:,self._getFIdx(feature)] = newValues
return 0
def append(self, featureName, featureData):
""" Append the data with a new feature and list of new values
Args:
featureName (str): name of new feature to add
featureData (list(str)): New set of values to add
Returns:
0 for success
"""
self.data = np.concatenate((self.data, np.array([featureData]).T), axis=1)
self.featureNames = np.append(self.featureNames, featureName)
return 0
def select(self, features):
""" Select a set of features to retain (and remove other features)
Args:
features (list(str)): Selected set of features
Returns:
0 for success
"""
if 'Weather Type' not in features:
features.append('Weather Type')
self.data = self.data[:,[self._getFIdx(f) for f in features]]
self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]
return 0
def discard(self):
""" Discard observations with null data
Returns:
0 for success
"""
for f in self.featureNames:
self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']
return
def delete(self, feature):
""" Delete a feature and assoicated data
Args:
feature (str): name of feature to delete
Returns:
0 for success
"""
if (self._isFIdx(feature)):
self.data = np.delete(self.data, self._getFIdx(feature), axis=1)
self.featureNames = np.delete(self.featureNames, self._getFIdx(feature))
return 0
def export(self, fname):
""" Export object to pickle file
Args:
fname (str): export file name
Returns:
0 for success
"""
# discard any data with null feature values
self.discard()
# set target as last column
self.target = self.getFeatureData('Weather Type')
# remove non-exportable features
for n in ['Station ID', 'Station Name', 'Date', 'Weather Type']:
if self._isFIdx(n):
self.delete(n)
# convert all data to float
self.data = self.data.astype(float)
# export to file
pickle.dump(self, open(fname, 'wb'))
return 0
## STATS UTILITIES
def getObservations(self, stationId='', obsDate='', obsTime='', features=[]):
""" Provide observation data for a chosen feature filtered by station, date, time
Args:
stationId (str): Station ID
obsDate (str): Observation date
obsTime (str): Observation time
features (list(str)): List of chosen features
Returns:
stats (list): List of observation data
"""
# filter data
stats = self.data
if (stationId):
stats = stats[stats[:,self._getFIdx('Station ID')] == stationId]
if (obsDate):
stats = stats[stats[:,self._getFIdx('Date')] == obsDate]
if (obsTime):
stats = stats[stats[:,self._getFIdx('Time since midnight')] == obsTime]
# return features
if (features): | return stats[:,features]
else:
return stats
def findStations(self, coords=[], offset=[], minThreshold=10, maxThreshold=100):
""" Find the nearet observation station to a given location
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
minThreshold (int): Minimum acceptable distance from chosen location
maxThreshold (int): Maximum acceptable distance from chosen location
Returns:
stations (list): List of nearby stations
"""
nearStations = []
# check for supplied Latitude and Longitude
if not (coords[0] and coords[1]):
return 0
# calculate new coords with offset
if (offset):
if not (offset[0] and offset[1]):
return 0
coords = self._getNewCoords(coords, offset)
# iterate through weather stations
for s in self.stationData:
# get distance between point and station
distance = self._getDistance([float(coords[0]), float(coords[1])], \
[float(s[2]), float(s[3])] )
# add if within threshold
if ((distance > minThreshold) and (distance < maxThreshold)):
nearStations.append([s[0], s[1], s[2], s[3], distance])
return sorted(nearStations, key=lambda x: (x[4]))
def setRelTime(self):
""" Define new feature to track observation time relative to start of sample
Returns:
0 if success
"""
obsRelTime = [self._getRelTime(o) for o in self.data]
self.append('Relative Time', obsRelTime)
return 0
## PRIVATE SET METHODS
def _setTargetNames(self):
""" Set target names based on data stream
Returns:
0 if success
"""
# full target names
if (self.dataStream == 0):
self.targetNames = np.array(['Clear Night', 'Sunny Day', 'Partly cloudy (night)', 'Partly cloudy (day)',\
'Not used', 'Mist', 'Fog', 'Cloudy', 'Overcast', 'Light rain shower (night)', \
'Light rain shower (day)', 'Drizzle', 'Light rain', 'Heavy rain shower (night)', \
'Heavy rain shower (day)', 'Heavy rain', 'Sleet shower (night)', 'Sleet shower (day)', \
'Sleet', 'Hail shower (night)', 'Hail shower (day)', 'Hail', 'Light snow shower (night)', \
'Light snow shower (day)', 'Light snow', 'Heavy snow shower (night)', 'Heavy snow shower (day)', \
'Heavy snow', 'Thunder shower', 'Thunder shower (night)', 'Thunder'])
# main target names
elif (self.dataStream == 1):
self.targetNames = np.array(['Clear', 'Partly Cloudy', 'Mist', 'Fog', 'Cloudy', \
'Overcast', 'Rain', 'Sleet', 'Hail', 'Snow', 'Thunder'])
# basic target names
elif (self.dataStream == 2):
self.targetNames = np.array(['Clear', 'Cloudy', 'Precipitation'])
return 0
def _setFeatureNames(self):
""" Set feature names
Returns:
0 if success
"""
self.featureNames = np.array(['Station ID', 'Station Name', 'Elevation', 'Latitude', 'Longitude', 'Date', \
'Time since midnight', 'Gust', 'Temperature', 'Visibilty', 'Wind Direction', \
'Wind Speed', 'Pressure', 'Pressure Trend', 'Dew Point', 'Humidity', 'Weather Type'])
return 0
def _setStationData(self):
""" Set station data
LIMITATION:
Old version of numpy on desktop PCs which does not accept axis \
argument in np.unique(). Use workaround to reduce array
Returns:
0 if success
"""
self.stationData = self.data[:,[self._getFIdx(f) for f in \
'Station ID', 'Station Name', 'Latitude', 'Longitude']]
# self.stationData = np.unique(self.stationData, axis=0)
self.stationData = self._unique_rows(self.stationData)
return 0
## PRIVATE DATA MANIPULATION METHODS
def _load(self):
""" Load data from file
Returns:
0 if success, -1 if file cannot be read
"""
# number of non-data header details at top of data file
header = 1
# open file
weatherData = []
with open(self.wfile) as myfile:
if (self.lines > 0):
weatherData = [next(myfile) for x in xrange(self.lines + header)]
else:
weatherData = myfile.readlines()
# get data stream from first line
streamHeader = weatherData.pop(0).rstrip()
if (streamHeader == 'FULL'):
self.dataStream = 0
elif (streamHeader == 'ADVANCED'):
self.dataStream = 1
elif (streamHeader == 'BASIC'):
self.dataStream = 2
else:
print "Error: unecognised data stream from file %s" % (self.wfile)
return -1
# read data
inputData = []
for line in weatherData:
entries = line.split()
inputData.append(entries)
# copy all into np array
self.data = np.array(inputData)
return 0
def _getFIdx(self, featureName):
""" Get Feature Index in data numpy array
Args:
featureName (str): Name of feature
Returns:
index
"""
return np.where(self.featureNames == featureName)[0][0]
def _isFIdx(self, featureName):
""" Look up if feature name is indexed in data numpy array
Args:
featureName (str): Name of feature
Returns:
1 if success, 0 if not found
"""
return 1 if (featureName in self.featureNames) else 0
## PRIVATE STATS UTILITIES
def _getDistance(self, source, dest):
""" Get the distance as crow flies between two coordinates
Args:
source (float): Longitude and Latitude of source point
source (float): Longitude and Latitude of destination point
Returns:
distance (float): distance betwen points
"""
lat1 = source[0]
lat2 = dest[0]
lon1 = source[1]
lon2 = dest[1]
# Formula from https://www.movable-type.co.uk/scripts/latlong.html
R = 6370000
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
deltaPhi = math.radians(lat2-lat1)
deltalmb = math.radians(lon2-lon1)
a = math.sin(deltaPhi/2) * math.sin(deltaPhi/2) + \
math.cos(phi1) * math.cos(phi2) * \
math.sin(deltalmb/2) * math.sin(deltalmb/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));
d = (R * c)/1000.
return d
def _getNewCoords(self, coords, offset):
""" Calculate new coordinates after applying offset
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
BUG?:
direction seems to be opposite from what I expect, made correction of 360-x
LIMITATION:
Due E (or W) gives slightly different results for latitude (e.g. 50N over 200km is 49.96N)
Returns:
coords (list(float, float)): New coordinates
"""
oldlat = math.radians(float(coords[0]))
oldlon = math.radians(float(coords[1]))
magnitude = float(offset[0]) / 6370.
direction = math.radians(360.-float(offset[1]))
# Calculate lat/lon given radial and distnace (http://www.edwilliams.org/avform.htm#LL)
lat = math.asin(math.sin(oldlat) * math.cos(magnitude) + math.cos(oldlat) \
* math.sin(magnitude) * math.cos(direction))
lon = (oldlon - math.asin(math.sin(direction) * math.sin(magnitude) / math.cos(lat)) \
+ math.pi) % (2 * math.pi) - math.pi
# print coords, offset, oldlat, oldlon, magnitude, direction, math.degrees(lat), math.degrees(lon)
return (math.degrees(lat), math.degrees(lon))
# Workaround on earlier numpy versions from https://github.com/numpy/numpy/issues/2871
def _unique_rows(self, A, return_index=False, return_inverse=False):
"""
Similar to MATLAB's unique(A, 'rows'), this returns B, I, J
where B is the unique rows of A and I and J satisfy
A = B[J,:] and B = A[I,:]
Returns I if return_index is True
Returns J if return_inverse is True
"""
A = np.require(A, requirements='C')
assert A.ndim == 2, "array must be 2-dim'l"
B = np.unique(A.view([('', A.dtype)]*A.shape[1]),
return_index=return_index,
return_inverse=return_inverse)
if return_index or return_inverse:
return (B[0].view(A.dtype).reshape((-1, A.shape[1]), order='C'),) \
+ B[1:]
else:
return B.view(A.dtype).reshape((-1, A.shape[1]), order='C')
def _getRelTime(self, obsData):
""" Calculate the time relative to set sample start time for a given data point
Args:
obsData (list): Observation data for single time point
Returns:
relTime (str): Time relative to set sample start time (hours)
"""
# get unix time for start of data sample (midnight) as ref point
dt = datetime.datetime(self.obsStart[0], self.obsStart[1], self.obsStart[2], 0, 0)
startOfDay = int(time.mktime(dt.timetuple()))
# strip date string
dateString = [x.strip() for x in obsData[self._getFIdx('Date')].split('-')]
# get unix time for start of observation date
obsStartOfDay = int(time.mktime(datetime.datetime( \
int(dateString[0]), int(dateString[1]), int(dateString[2]), 0, 0).timetuple()))
# calculate relative time (hours)
relTime = int((obsStartOfDay + (int(obsData[self._getFIdx('Time since midnight')])*60) \
- startOfDay)/3600.)
return str(relTime) | features = [self._getFIdx(f) for f in features] | random_line_split |
TD_Functions.py | # coding=utf-8
# Copyright (C) 2012 Allis Tauri <allista@gmail.com>
#
# degen_primer is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# indicator_gddccontrol is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jun 24, 2012
@author: Allis Tauri <allista@gmail.com>
All calculations are based on:
1) SantaLucia, J., & Hicks, D. (2004).
The thermodynamics of DNA structural motifs. Annual review of biophysics and
biomolecular structure, 33, 415-40. doi:10.1146/annurev.biophys.32.110601.141800
2) von Ahsen, N., Wittwer, C. T., & Schütz, E. (2001). Oligonucleotide melting
temperatures under PCR conditions: nearest-neighbor corrections for Mg(2+),
deoxynucleotide triphosphate, and dimethyl sulfoxide concentrations with
comparison to alternative empirical formulas. Clinical chemistry, 47(11), 1956-61.
'''
from math import sqrt, log
from UnifiedNN import *
from StringTools import print_exception
try:
from Bio.SeqFeature import SeqFeature, FeatureLocation
except Exception, e:
print_exception(e)
raise ImportError('The BioPython must be installed in your system.')
#utility functions
def print_exception(e):
print "Exception occurred: " + str(type(e)) + " : " + e.__str__()
###############################################################################
#standard PCR conditions
C_Mg = 1.5 #mM
C_Na = 50 #mM; should be above 0.05M and below 1.1M
C_dNTP = 0 #mM
C_DNA = 50 #nM; DNA template concentration
C_Prim = 0.1 #uM; Primer concentration
C_DMSO = 0 #percent
def C_Na_eq():
"""divalent cation correction (Ahsen et al., 2001)"""
global C_Na, C_Mg, C_dNTP
return C_Na + 120*sqrt(C_Mg - C_dNTP)
#end def
def NN_Tr(seq, r):
'''Calculate temperature for primer-template association equilibrium
with 'r' ratio using two-state equilibrium model and the Nearest Neighbor \
TD tables and from the paper of SantaLucia & Hicks (2004).
Note, that two-state equilibrium model used here is based on assumption, that
primer sequence is not self-complementary.'''
#value constraints
if r >=1 or r <=0:
raise ValueError('TD_Functions.NN_Tr: equilibrium ratio should be in the (0;1) interval.')
#definitions
global C_Prim, C_DNA, C_DMSO, R, K0, Sym_Correction
seq_str = str(seq)
rev_com = str(seq.reverse_complement())
seq_len = len(seq)
dH, dS = 0, 0
#concentrations
P = C_Prim*1e-6
D = C_DNA *1e-9
DUP = r*min(P,D)
#equilibrium constant
K = DUP/((P-DUP)*(D-DUP))
#initial corrections
dH += delta_H('ini', 'ini')
dS += delta_S('ini', 'ini')
#test for AT terminals
if seq_str[0] == 'A' or seq_str[0] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
#stacking interactions
for n in range(len(seq_str)-1):
NN = seq_str[n:n+2]
RC = rev_com[seq_len-n-2:seq_len-n]
dH += delta_H(NN, RC)
dS += delta_S(NN, RC)
#salt concentration correction
dS = dS + dS_Na_coefficient * len(seq_str) * log(C_Na_eq()*1e-3) #C_Na mM
#final temperature calculation
return dH * 1000/(dS - R * log(K)) + K0 - 0.75 * C_DMSO #DMSO correction from [2]
#end def
def NN_Tm(seq): return NN_Tr(seq, 0.5)
def source_feature(seq_rec):
feature = None
for f in seq_rec.features:
if f.type == 'source':
feature = f
break
if not feature:
feature = SeqFeature(FeatureLocation(0,len(seq_rec.seq)),
type = 'source')
seq_rec.features.append(feature)
return feature
#end def
def format_PCR_conditions():
conc_str = ''
spacer = max(len(str(C_Na)),
len(str(C_Mg)),
len(str(C_dNTP)),
len(str(C_DNA)),
len(str(C_Prim)),
len(str(C_DMSO)))
conc_str += 'C(Na) = ' + str(C_Na) + ' '*(spacer-len(str(C_Na))) +' mM\n'
conc_str += 'C(Mg) = ' + str(C_Mg) + ' '*(spacer-len(str(C_Mg))) +' mM\n'
conc_str += 'C(dNTP) = ' + str(C_dNTP)+ ' '*(spacer-len(str(C_dNTP))) +' mM\n'
conc_str += 'C(DNA) = ' + str(C_DNA) + ' '*(spacer-len(str(C_DNA))) +' nM\n'
conc_str += 'C(Primer) = ' + str(C_Prim)+ ' '*(spacer-len(str(C_Prim))) +' uM\n'
conc_str += 'C(DMSO) = ' + str(C_DMSO)+ ' '*(spacer-len(str(C_DMSO))) +' %\n'
return conc_str
#end_def
def add_PCR_conditions(feature):
try:
feature.qualifiers['C_Na'] = str(C_Na)+ ' mM'
feature.qualifiers['C_Mg'] = str(C_Mg)+ ' mM'
feature.qualifiers['C_dNTP'] = str(C_dNTP)+' mM'
feature.qualifiers['C_DNA'] = str(C_DNA)+ ' nM'
feature.qualifiers['C_Primer'] = str(C_Prim)+' uM'
feature.qualifiers['C_DMSO'] = str(C_DMSO)+' %'
except Exception, e:
print 'add_PCR_conditions:'
print_exception(e)
#end def
def calculate_Tr(seq_rec, r):
primer_Tr = NN_Tr(seq_rec.seq, r)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['T-'+str(r)] = str(primer_Tr)
return primer_Tr
#end def
def calculate_Tm(seq_rec):
primer_Tm = NN_Tm(seq_rec.seq)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['Tm'] = str(primer_Tm)
return primer_Tm
#end def
def dimer_dG(dimer, seq1, seq2):
fwd_matches = list(dimer[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(dimer[1])
rev_matches.sort()
#e.g. (13,14,15,19,20)
seq_str = str(seq1)
seq_len = len(seq_str)
rev_str = str(seq2[::-1])
rev_len = len(rev_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] > 0: #3' dangling
dG += DanglingNN[rev_str[rev_matches[0]]+'X'][rev_str[rev_matches[0]-1]]
elif rev_matches[0] == 0 and fwd_matches[0] > 0: #5' dangling
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] > 0:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == 0:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#check for 'right' dangling end
if fwd_matches[-1] == seq_len-1 and rev_matches[-1] < rev_len-1: #5' dangling
dG += DanglingNN['X'+rev_str[rev_matches[-1]]][rev_str[rev_matches[-1]+1]]
elif rev_matches[-1] == rev_len-1 and fwd_matches[-1] < seq_len-1: #3' dangling
dG += DanglingNN[seq_str[fwd_matches[-1]]+'X'][seq_str[fwd_matches[-1]+1]]
#check for 'right' terminal mismatch
elif fwd_matches[-1] < seq_len-1 and rev_matches[0] < rev_len-1:
dG += Terminal_mismatch_mean
#check for 'right' terminal AT
elif fwd_matches[-1] == seq_len-1 and rev_matches[-1] == rev_len-1:
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = rev_str[r_match:r_match+2]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = rev_str[r_next-1:r_next+1][::-1]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
return dG
#end def
def hairpin_dG(hairpin, seq):
fwd_matches = list(hairpin[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(hairpin[1])
rev_matches.sort(reverse=True)
#e.g (24,23,22,18,17)
seq_str = str(seq)
seq_len = len(seq_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] < seq_len-1:
dG += DanglingNN['X'+seq_str[rev_matches[0]]][seq_str[rev_matches[0]+1]]
elif fwd_matches[0] > 0 and rev_matches[0] == seq_len-1:
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] < seq_len-1:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == seq_len-1:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = seq_str[r_match-1:r_match+1][::-1]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: conti | #if |x| or |xx|
elif f_next-f_match < 4:
NN1 = seq_str[r_next:r_next+2]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#internal loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
#hairpin loop
hp_len = rev_matches[-1]-fwd_matches[-1]-1
dG += loop_dG(hp_len, 'H')
#3-4 loop
if hp_len < 5:
hp_str = seq_str[fwd_matches[-1]:rev_matches[-1]+1]
if hp_str in Tri_Tetra_Loops:
dG += Tri_Tetra_Loops[hp_str]
if hp_len == 3:
if seq_str[fwd_matches[-1]] == 'A' or seq_str[fwd_matches[-1]] == 'T':
dG += 0.5 #kcal/mol; AT-closing penalty
elif hp_len == 4:
dG += Terminal_mismatch_mean
else: dG += Terminal_mismatch_mean
return dG
#end def | nue
| conditional_block |
TD_Functions.py | # coding=utf-8
# Copyright (C) 2012 Allis Tauri <allista@gmail.com>
#
# degen_primer is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# indicator_gddccontrol is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jun 24, 2012
@author: Allis Tauri <allista@gmail.com>
All calculations are based on:
1) SantaLucia, J., & Hicks, D. (2004).
The thermodynamics of DNA structural motifs. Annual review of biophysics and
biomolecular structure, 33, 415-40. doi:10.1146/annurev.biophys.32.110601.141800
2) von Ahsen, N., Wittwer, C. T., & Schütz, E. (2001). Oligonucleotide melting
temperatures under PCR conditions: nearest-neighbor corrections for Mg(2+),
deoxynucleotide triphosphate, and dimethyl sulfoxide concentrations with
comparison to alternative empirical formulas. Clinical chemistry, 47(11), 1956-61.
'''
from math import sqrt, log
from UnifiedNN import *
from StringTools import print_exception
try:
from Bio.SeqFeature import SeqFeature, FeatureLocation
except Exception, e:
print_exception(e)
raise ImportError('The BioPython must be installed in your system.')
#utility functions
def print_exception(e):
print "Exception occurred: " + str(type(e)) + " : " + e.__str__()
###############################################################################
#standard PCR conditions
C_Mg = 1.5 #mM
C_Na = 50 #mM; should be above 0.05M and below 1.1M
C_dNTP = 0 #mM
C_DNA = 50 #nM; DNA template concentration
C_Prim = 0.1 #uM; Primer concentration
C_DMSO = 0 #percent
def C_Na_eq():
"""divalent cation correction (Ahsen et al., 2001)"""
global C_Na, C_Mg, C_dNTP
return C_Na + 120*sqrt(C_Mg - C_dNTP)
#end def
def NN_Tr(seq, r):
'''Calculate temperature for primer-template association equilibrium
with 'r' ratio using two-state equilibrium model and the Nearest Neighbor \
TD tables and from the paper of SantaLucia & Hicks (2004).
Note, that two-state equilibrium model used here is based on assumption, that
primer sequence is not self-complementary.'''
#value constraints
if r >=1 or r <=0:
raise ValueError('TD_Functions.NN_Tr: equilibrium ratio should be in the (0;1) interval.')
#definitions
global C_Prim, C_DNA, C_DMSO, R, K0, Sym_Correction
seq_str = str(seq)
rev_com = str(seq.reverse_complement())
seq_len = len(seq)
dH, dS = 0, 0
#concentrations
P = C_Prim*1e-6
D = C_DNA *1e-9
DUP = r*min(P,D)
#equilibrium constant
K = DUP/((P-DUP)*(D-DUP))
#initial corrections
dH += delta_H('ini', 'ini')
dS += delta_S('ini', 'ini')
#test for AT terminals
if seq_str[0] == 'A' or seq_str[0] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
#stacking interactions
for n in range(len(seq_str)-1):
NN = seq_str[n:n+2]
RC = rev_com[seq_len-n-2:seq_len-n]
dH += delta_H(NN, RC)
dS += delta_S(NN, RC)
#salt concentration correction
dS = dS + dS_Na_coefficient * len(seq_str) * log(C_Na_eq()*1e-3) #C_Na mM
#final temperature calculation
return dH * 1000/(dS - R * log(K)) + K0 - 0.75 * C_DMSO #DMSO correction from [2]
#end def
def NN_Tm(seq): return NN_Tr(seq, 0.5)
def source_feature(seq_rec):
feature = None
for f in seq_rec.features:
if f.type == 'source':
feature = f
break
if not feature:
feature = SeqFeature(FeatureLocation(0,len(seq_rec.seq)),
type = 'source')
seq_rec.features.append(feature)
return feature
#end def
def format_PCR_conditions():
conc_str = ''
spacer = max(len(str(C_Na)),
len(str(C_Mg)),
len(str(C_dNTP)),
len(str(C_DNA)),
len(str(C_Prim)),
len(str(C_DMSO)))
conc_str += 'C(Na) = ' + str(C_Na) + ' '*(spacer-len(str(C_Na))) +' mM\n'
conc_str += 'C(Mg) = ' + str(C_Mg) + ' '*(spacer-len(str(C_Mg))) +' mM\n'
conc_str += 'C(dNTP) = ' + str(C_dNTP)+ ' '*(spacer-len(str(C_dNTP))) +' mM\n'
conc_str += 'C(DNA) = ' + str(C_DNA) + ' '*(spacer-len(str(C_DNA))) +' nM\n'
conc_str += 'C(Primer) = ' + str(C_Prim)+ ' '*(spacer-len(str(C_Prim))) +' uM\n'
conc_str += 'C(DMSO) = ' + str(C_DMSO)+ ' '*(spacer-len(str(C_DMSO))) +' %\n'
return conc_str
#end_def
def add_PCR_conditions(feature):
try:
feature.qualifiers['C_Na'] = str(C_Na)+ ' mM'
feature.qualifiers['C_Mg'] = str(C_Mg)+ ' mM'
feature.qualifiers['C_dNTP'] = str(C_dNTP)+' mM'
feature.qualifiers['C_DNA'] = str(C_DNA)+ ' nM'
feature.qualifiers['C_Primer'] = str(C_Prim)+' uM'
feature.qualifiers['C_DMSO'] = str(C_DMSO)+' %'
except Exception, e:
print 'add_PCR_conditions:'
print_exception(e)
#end def
def calculate_Tr(seq_rec, r):
primer_Tr = NN_Tr(seq_rec.seq, r)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['T-'+str(r)] = str(primer_Tr)
return primer_Tr
#end def
def calculate_Tm(seq_rec):
primer_Tm = NN_Tm(seq_rec.seq)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['Tm'] = str(primer_Tm)
return primer_Tm
#end def
def dimer_dG(dimer, seq1, seq2):
fwd_matches = list(dimer[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(dimer[1])
rev_matches.sort()
#e.g. (13,14,15,19,20)
seq_str = str(seq1)
seq_len = len(seq_str)
rev_str = str(seq2[::-1])
rev_len = len(rev_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] > 0: #3' dangling
dG += DanglingNN[rev_str[rev_matches[0]]+'X'][rev_str[rev_matches[0]-1]]
elif rev_matches[0] == 0 and fwd_matches[0] > 0: #5' dangling
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] > 0:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == 0:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#check for 'right' dangling end
if fwd_matches[-1] == seq_len-1 and rev_matches[-1] < rev_len-1: #5' dangling
dG += DanglingNN['X'+rev_str[rev_matches[-1]]][rev_str[rev_matches[-1]+1]]
elif rev_matches[-1] == rev_len-1 and fwd_matches[-1] < seq_len-1: #3' dangling
dG += DanglingNN[seq_str[fwd_matches[-1]]+'X'][seq_str[fwd_matches[-1]+1]]
#check for 'right' terminal mismatch
elif fwd_matches[-1] < seq_len-1 and rev_matches[0] < rev_len-1:
dG += Terminal_mismatch_mean
#check for 'right' terminal AT
elif fwd_matches[-1] == seq_len-1 and rev_matches[-1] == rev_len-1:
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = rev_str[r_match:r_match+2]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = rev_str[r_next-1:r_next+1][::-1]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
return dG
#end def
def hairpin_dG(hairpin, seq):
fwd_matches = list(hairpin[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(hairpin[1])
rev_matches.sort(reverse=True)
#e.g (24,23,22,18,17)
seq_str = str(seq)
seq_len = len(seq_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] < seq_len-1:
dG += DanglingNN['X'+seq_str[rev_matches[0]]][seq_str[rev_matches[0]+1]] | elif fwd_matches[0] > 0 and rev_matches[0] == seq_len-1:
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] < seq_len-1:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == seq_len-1:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = seq_str[r_match-1:r_match+1][::-1]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = seq_str[r_next:r_next+2]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#internal loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
#hairpin loop
hp_len = rev_matches[-1]-fwd_matches[-1]-1
dG += loop_dG(hp_len, 'H')
#3-4 loop
if hp_len < 5:
hp_str = seq_str[fwd_matches[-1]:rev_matches[-1]+1]
if hp_str in Tri_Tetra_Loops:
dG += Tri_Tetra_Loops[hp_str]
if hp_len == 3:
if seq_str[fwd_matches[-1]] == 'A' or seq_str[fwd_matches[-1]] == 'T':
dG += 0.5 #kcal/mol; AT-closing penalty
elif hp_len == 4:
dG += Terminal_mismatch_mean
else: dG += Terminal_mismatch_mean
return dG
#end def | random_line_split | |
TD_Functions.py | # coding=utf-8
# Copyright (C) 2012 Allis Tauri <allista@gmail.com>
#
# degen_primer is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# indicator_gddccontrol is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jun 24, 2012
@author: Allis Tauri <allista@gmail.com>
All calculations are based on:
1) SantaLucia, J., & Hicks, D. (2004).
The thermodynamics of DNA structural motifs. Annual review of biophysics and
biomolecular structure, 33, 415-40. doi:10.1146/annurev.biophys.32.110601.141800
2) von Ahsen, N., Wittwer, C. T., & Schütz, E. (2001). Oligonucleotide melting
temperatures under PCR conditions: nearest-neighbor corrections for Mg(2+),
deoxynucleotide triphosphate, and dimethyl sulfoxide concentrations with
comparison to alternative empirical formulas. Clinical chemistry, 47(11), 1956-61.
'''
from math import sqrt, log
from UnifiedNN import *
from StringTools import print_exception
try:
from Bio.SeqFeature import SeqFeature, FeatureLocation
except Exception, e:
print_exception(e)
raise ImportError('The BioPython must be installed in your system.')
#utility functions
def print_exception(e):
print "Exception occurred: " + str(type(e)) + " : " + e.__str__()
###############################################################################
#standard PCR conditions
C_Mg = 1.5 #mM
C_Na = 50 #mM; should be above 0.05M and below 1.1M
C_dNTP = 0 #mM
C_DNA = 50 #nM; DNA template concentration
C_Prim = 0.1 #uM; Primer concentration
C_DMSO = 0 #percent
def C_Na_eq():
"""divalent cation correction (Ahsen et al., 2001)"""
global C_Na, C_Mg, C_dNTP
return C_Na + 120*sqrt(C_Mg - C_dNTP)
#end def
def NN_Tr(seq, r):
'''Calculate temperature for primer-template association equilibrium
with 'r' ratio using two-state equilibrium model and the Nearest Neighbor \
TD tables and from the paper of SantaLucia & Hicks (2004).
Note, that two-state equilibrium model used here is based on assumption, that
primer sequence is not self-complementary.'''
#value constraints
if r >=1 or r <=0:
raise ValueError('TD_Functions.NN_Tr: equilibrium ratio should be in the (0;1) interval.')
#definitions
global C_Prim, C_DNA, C_DMSO, R, K0, Sym_Correction
seq_str = str(seq)
rev_com = str(seq.reverse_complement())
seq_len = len(seq)
dH, dS = 0, 0
#concentrations
P = C_Prim*1e-6
D = C_DNA *1e-9
DUP = r*min(P,D)
#equilibrium constant
K = DUP/((P-DUP)*(D-DUP))
#initial corrections
dH += delta_H('ini', 'ini')
dS += delta_S('ini', 'ini')
#test for AT terminals
if seq_str[0] == 'A' or seq_str[0] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
#stacking interactions
for n in range(len(seq_str)-1):
NN = seq_str[n:n+2]
RC = rev_com[seq_len-n-2:seq_len-n]
dH += delta_H(NN, RC)
dS += delta_S(NN, RC)
#salt concentration correction
dS = dS + dS_Na_coefficient * len(seq_str) * log(C_Na_eq()*1e-3) #C_Na mM
#final temperature calculation
return dH * 1000/(dS - R * log(K)) + K0 - 0.75 * C_DMSO #DMSO correction from [2]
#end def
def NN_Tm(seq): return NN_Tr(seq, 0.5)
def source_feature(seq_rec):
feature = None
for f in seq_rec.features:
if f.type == 'source':
feature = f
break
if not feature:
feature = SeqFeature(FeatureLocation(0,len(seq_rec.seq)),
type = 'source')
seq_rec.features.append(feature)
return feature
#end def
def format_PCR_conditions():
conc_str = ''
spacer = max(len(str(C_Na)),
len(str(C_Mg)),
len(str(C_dNTP)),
len(str(C_DNA)),
len(str(C_Prim)),
len(str(C_DMSO)))
conc_str += 'C(Na) = ' + str(C_Na) + ' '*(spacer-len(str(C_Na))) +' mM\n'
conc_str += 'C(Mg) = ' + str(C_Mg) + ' '*(spacer-len(str(C_Mg))) +' mM\n'
conc_str += 'C(dNTP) = ' + str(C_dNTP)+ ' '*(spacer-len(str(C_dNTP))) +' mM\n'
conc_str += 'C(DNA) = ' + str(C_DNA) + ' '*(spacer-len(str(C_DNA))) +' nM\n'
conc_str += 'C(Primer) = ' + str(C_Prim)+ ' '*(spacer-len(str(C_Prim))) +' uM\n'
conc_str += 'C(DMSO) = ' + str(C_DMSO)+ ' '*(spacer-len(str(C_DMSO))) +' %\n'
return conc_str
#end_def
def add_PCR_conditions(feature):
try:
| def
def calculate_Tr(seq_rec, r):
primer_Tr = NN_Tr(seq_rec.seq, r)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['T-'+str(r)] = str(primer_Tr)
return primer_Tr
#end def
def calculate_Tm(seq_rec):
primer_Tm = NN_Tm(seq_rec.seq)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['Tm'] = str(primer_Tm)
return primer_Tm
#end def
def dimer_dG(dimer, seq1, seq2):
fwd_matches = list(dimer[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(dimer[1])
rev_matches.sort()
#e.g. (13,14,15,19,20)
seq_str = str(seq1)
seq_len = len(seq_str)
rev_str = str(seq2[::-1])
rev_len = len(rev_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] > 0: #3' dangling
dG += DanglingNN[rev_str[rev_matches[0]]+'X'][rev_str[rev_matches[0]-1]]
elif rev_matches[0] == 0 and fwd_matches[0] > 0: #5' dangling
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] > 0:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == 0:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#check for 'right' dangling end
if fwd_matches[-1] == seq_len-1 and rev_matches[-1] < rev_len-1: #5' dangling
dG += DanglingNN['X'+rev_str[rev_matches[-1]]][rev_str[rev_matches[-1]+1]]
elif rev_matches[-1] == rev_len-1 and fwd_matches[-1] < seq_len-1: #3' dangling
dG += DanglingNN[seq_str[fwd_matches[-1]]+'X'][seq_str[fwd_matches[-1]+1]]
#check for 'right' terminal mismatch
elif fwd_matches[-1] < seq_len-1 and rev_matches[0] < rev_len-1:
dG += Terminal_mismatch_mean
#check for 'right' terminal AT
elif fwd_matches[-1] == seq_len-1 and rev_matches[-1] == rev_len-1:
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = rev_str[r_match:r_match+2]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = rev_str[r_next-1:r_next+1][::-1]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
return dG
#end def
def hairpin_dG(hairpin, seq):
fwd_matches = list(hairpin[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(hairpin[1])
rev_matches.sort(reverse=True)
#e.g (24,23,22,18,17)
seq_str = str(seq)
seq_len = len(seq_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] < seq_len-1:
dG += DanglingNN['X'+seq_str[rev_matches[0]]][seq_str[rev_matches[0]+1]]
elif fwd_matches[0] > 0 and rev_matches[0] == seq_len-1:
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] < seq_len-1:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == seq_len-1:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = seq_str[r_match-1:r_match+1][::-1]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = seq_str[r_next:r_next+2]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#internal loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
#hairpin loop
hp_len = rev_matches[-1]-fwd_matches[-1]-1
dG += loop_dG(hp_len, 'H')
#3-4 loop
if hp_len < 5:
hp_str = seq_str[fwd_matches[-1]:rev_matches[-1]+1]
if hp_str in Tri_Tetra_Loops:
dG += Tri_Tetra_Loops[hp_str]
if hp_len == 3:
if seq_str[fwd_matches[-1]] == 'A' or seq_str[fwd_matches[-1]] == 'T':
dG += 0.5 #kcal/mol; AT-closing penalty
elif hp_len == 4:
dG += Terminal_mismatch_mean
else: dG += Terminal_mismatch_mean
return dG
#end def | feature.qualifiers['C_Na'] = str(C_Na)+ ' mM'
feature.qualifiers['C_Mg'] = str(C_Mg)+ ' mM'
feature.qualifiers['C_dNTP'] = str(C_dNTP)+' mM'
feature.qualifiers['C_DNA'] = str(C_DNA)+ ' nM'
feature.qualifiers['C_Primer'] = str(C_Prim)+' uM'
feature.qualifiers['C_DMSO'] = str(C_DMSO)+' %'
except Exception, e:
print 'add_PCR_conditions:'
print_exception(e)
#end | identifier_body |
TD_Functions.py | # coding=utf-8
# Copyright (C) 2012 Allis Tauri <allista@gmail.com>
#
# degen_primer is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# indicator_gddccontrol is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jun 24, 2012
@author: Allis Tauri <allista@gmail.com>
All calculations are based on:
1) SantaLucia, J., & Hicks, D. (2004).
The thermodynamics of DNA structural motifs. Annual review of biophysics and
biomolecular structure, 33, 415-40. doi:10.1146/annurev.biophys.32.110601.141800
2) von Ahsen, N., Wittwer, C. T., & Schütz, E. (2001). Oligonucleotide melting
temperatures under PCR conditions: nearest-neighbor corrections for Mg(2+),
deoxynucleotide triphosphate, and dimethyl sulfoxide concentrations with
comparison to alternative empirical formulas. Clinical chemistry, 47(11), 1956-61.
'''
from math import sqrt, log
from UnifiedNN import *
from StringTools import print_exception
try:
from Bio.SeqFeature import SeqFeature, FeatureLocation
except Exception, e:
print_exception(e)
raise ImportError('The BioPython must be installed in your system.')
#utility functions
def print_exception(e):
print "Exception occurred: " + str(type(e)) + " : " + e.__str__()
###############################################################################
#standard PCR conditions
C_Mg = 1.5 #mM
C_Na = 50 #mM; should be above 0.05M and below 1.1M
C_dNTP = 0 #mM
C_DNA = 50 #nM; DNA template concentration
C_Prim = 0.1 #uM; Primer concentration
C_DMSO = 0 #percent
def C_Na_eq():
"""divalent cation correction (Ahsen et al., 2001)"""
global C_Na, C_Mg, C_dNTP
return C_Na + 120*sqrt(C_Mg - C_dNTP)
#end def
def NN_Tr(seq, r):
'''Calculate temperature for primer-template association equilibrium
with 'r' ratio using two-state equilibrium model and the Nearest Neighbor \
TD tables and from the paper of SantaLucia & Hicks (2004).
Note, that two-state equilibrium model used here is based on assumption, that
primer sequence is not self-complementary.'''
#value constraints
if r >=1 or r <=0:
raise ValueError('TD_Functions.NN_Tr: equilibrium ratio should be in the (0;1) interval.')
#definitions
global C_Prim, C_DNA, C_DMSO, R, K0, Sym_Correction
seq_str = str(seq)
rev_com = str(seq.reverse_complement())
seq_len = len(seq)
dH, dS = 0, 0
#concentrations
P = C_Prim*1e-6
D = C_DNA *1e-9
DUP = r*min(P,D)
#equilibrium constant
K = DUP/((P-DUP)*(D-DUP))
#initial corrections
dH += delta_H('ini', 'ini')
dS += delta_S('ini', 'ini')
#test for AT terminals
if seq_str[0] == 'A' or seq_str[0] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
#stacking interactions
for n in range(len(seq_str)-1):
NN = seq_str[n:n+2]
RC = rev_com[seq_len-n-2:seq_len-n]
dH += delta_H(NN, RC)
dS += delta_S(NN, RC)
#salt concentration correction
dS = dS + dS_Na_coefficient * len(seq_str) * log(C_Na_eq()*1e-3) #C_Na mM
#final temperature calculation
return dH * 1000/(dS - R * log(K)) + K0 - 0.75 * C_DMSO #DMSO correction from [2]
#end def
def NN_Tm(seq): return NN_Tr(seq, 0.5)
def source_feature(seq_rec):
feature = None
for f in seq_rec.features:
if f.type == 'source':
feature = f
break
if not feature:
feature = SeqFeature(FeatureLocation(0,len(seq_rec.seq)),
type = 'source')
seq_rec.features.append(feature)
return feature
#end def
def forma | conc_str = ''
spacer = max(len(str(C_Na)),
len(str(C_Mg)),
len(str(C_dNTP)),
len(str(C_DNA)),
len(str(C_Prim)),
len(str(C_DMSO)))
conc_str += 'C(Na) = ' + str(C_Na) + ' '*(spacer-len(str(C_Na))) +' mM\n'
conc_str += 'C(Mg) = ' + str(C_Mg) + ' '*(spacer-len(str(C_Mg))) +' mM\n'
conc_str += 'C(dNTP) = ' + str(C_dNTP)+ ' '*(spacer-len(str(C_dNTP))) +' mM\n'
conc_str += 'C(DNA) = ' + str(C_DNA) + ' '*(spacer-len(str(C_DNA))) +' nM\n'
conc_str += 'C(Primer) = ' + str(C_Prim)+ ' '*(spacer-len(str(C_Prim))) +' uM\n'
conc_str += 'C(DMSO) = ' + str(C_DMSO)+ ' '*(spacer-len(str(C_DMSO))) +' %\n'
return conc_str
#end_def
def add_PCR_conditions(feature):
try:
feature.qualifiers['C_Na'] = str(C_Na)+ ' mM'
feature.qualifiers['C_Mg'] = str(C_Mg)+ ' mM'
feature.qualifiers['C_dNTP'] = str(C_dNTP)+' mM'
feature.qualifiers['C_DNA'] = str(C_DNA)+ ' nM'
feature.qualifiers['C_Primer'] = str(C_Prim)+' uM'
feature.qualifiers['C_DMSO'] = str(C_DMSO)+' %'
except Exception, e:
print 'add_PCR_conditions:'
print_exception(e)
#end def
def calculate_Tr(seq_rec, r):
primer_Tr = NN_Tr(seq_rec.seq, r)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['T-'+str(r)] = str(primer_Tr)
return primer_Tr
#end def
def calculate_Tm(seq_rec):
primer_Tm = NN_Tm(seq_rec.seq)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['Tm'] = str(primer_Tm)
return primer_Tm
#end def
def dimer_dG(dimer, seq1, seq2):
fwd_matches = list(dimer[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(dimer[1])
rev_matches.sort()
#e.g. (13,14,15,19,20)
seq_str = str(seq1)
seq_len = len(seq_str)
rev_str = str(seq2[::-1])
rev_len = len(rev_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] > 0: #3' dangling
dG += DanglingNN[rev_str[rev_matches[0]]+'X'][rev_str[rev_matches[0]-1]]
elif rev_matches[0] == 0 and fwd_matches[0] > 0: #5' dangling
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] > 0:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == 0:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#check for 'right' dangling end
if fwd_matches[-1] == seq_len-1 and rev_matches[-1] < rev_len-1: #5' dangling
dG += DanglingNN['X'+rev_str[rev_matches[-1]]][rev_str[rev_matches[-1]+1]]
elif rev_matches[-1] == rev_len-1 and fwd_matches[-1] < seq_len-1: #3' dangling
dG += DanglingNN[seq_str[fwd_matches[-1]]+'X'][seq_str[fwd_matches[-1]+1]]
#check for 'right' terminal mismatch
elif fwd_matches[-1] < seq_len-1 and rev_matches[0] < rev_len-1:
dG += Terminal_mismatch_mean
#check for 'right' terminal AT
elif fwd_matches[-1] == seq_len-1 and rev_matches[-1] == rev_len-1:
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = rev_str[r_match:r_match+2]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = rev_str[r_next-1:r_next+1][::-1]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
return dG
#end def
def hairpin_dG(hairpin, seq):
fwd_matches = list(hairpin[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(hairpin[1])
rev_matches.sort(reverse=True)
#e.g (24,23,22,18,17)
seq_str = str(seq)
seq_len = len(seq_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] < seq_len-1:
dG += DanglingNN['X'+seq_str[rev_matches[0]]][seq_str[rev_matches[0]+1]]
elif fwd_matches[0] > 0 and rev_matches[0] == seq_len-1:
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] < seq_len-1:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == seq_len-1:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = seq_str[r_match-1:r_match+1][::-1]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = seq_str[r_next:r_next+2]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#internal loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
#hairpin loop
hp_len = rev_matches[-1]-fwd_matches[-1]-1
dG += loop_dG(hp_len, 'H')
#3-4 loop
if hp_len < 5:
hp_str = seq_str[fwd_matches[-1]:rev_matches[-1]+1]
if hp_str in Tri_Tetra_Loops:
dG += Tri_Tetra_Loops[hp_str]
if hp_len == 3:
if seq_str[fwd_matches[-1]] == 'A' or seq_str[fwd_matches[-1]] == 'T':
dG += 0.5 #kcal/mol; AT-closing penalty
elif hp_len == 4:
dG += Terminal_mismatch_mean
else: dG += Terminal_mismatch_mean
return dG
#end def | t_PCR_conditions():
| identifier_name |
upload.rs | //! The uploading logic was mostly reverse engineered; I wrote it down as
//! documentation at https://warehouse.readthedocs.io/api-reference/legacy/#upload-api
use crate::build_context::hash_file;
use anyhow::{bail, Context, Result};
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use bytesize::ByteSize;
use configparser::ini::Ini;
use fs_err as fs;
use fs_err::File;
use multipart::client::lazy::Multipart;
use regex::Regex;
use serde::Deserialize;
use std::collections::HashMap;
use std::env;
#[cfg(any(feature = "native-tls", feature = "rustls"))]
use std::ffi::OsString;
use std::io;
use std::path::{Path, PathBuf};
use std::time::Duration;
use thiserror::Error;
use tracing::debug;
/// An account with a registry, possibly incomplete
#[derive(Debug, clap::Parser)]
pub struct PublishOpt {
/// The repository (package index) to upload the package to. Should be a section in the config file.
///
/// Can also be set via MATURIN_REPOSITORY environment variable.
#[arg(short = 'r', long, env = "MATURIN_REPOSITORY", default_value = "pypi")]
repository: String,
/// The URL of the registry where the wheels are uploaded to. This overrides --repository.
///
/// Can also be set via MATURIN_REPOSITORY_URL environment variable.
#[arg(long, env = "MATURIN_REPOSITORY_URL", overrides_with = "repository")]
repository_url: Option<String>,
/// Username for pypi or your custom registry.
///
/// Can also be set via MATURIN_USERNAME environment variable.
///
/// Set MATURIN_PYPI_TOKEN variable to use token-based authentication instead
#[arg(short, long, env = "MATURIN_USERNAME")]
username: Option<String>,
/// Password for pypi or your custom registry.
///
/// Can also be set via MATURIN_PASSWORD environment variable.
#[arg(short, long, env = "MATURIN_PASSWORD", hide_env_values = true)]
password: Option<String>,
/// Continue uploading files if one already exists.
/// (Only valid when uploading to PyPI. Other implementations may not support this.)
#[arg(long)]
skip_existing: bool,
/// Do not interactively prompt for username/password if the required credentials are missing.
///
/// Can also be set via MATURIN_NON_INTERACTIVE environment variable.
#[arg(long, env = "MATURIN_NON_INTERACTIVE")]
non_interactive: bool,
}
impl PublishOpt {
const DEFAULT_REPOSITORY_URL: &'static str = "https://upload.pypi.org/legacy/";
const TEST_REPOSITORY_URL: &'static str = "https://test.pypi.org/legacy/";
/// Set to non interactive mode if we're running on CI
pub fn non_interactive_on_ci(&mut self) {
if !self.non_interactive && env::var("CI").map(|v| v == "true").unwrap_or_default() {
eprintln!("🎛️ Running in non-interactive mode on CI");
self.non_interactive = true;
}
}
}
/// Error type for different types of errors that can happen when uploading a
/// wheel.
///
/// The most interesting type is AuthenticationError because it allows asking
/// the user to reenter the password
#[derive(Error, Debug)]
#[error("Uploading to the registry failed")]
pub enum UploadError {
/// Any ureq error
#[error("Http error")]
UreqError(#[source] Box<ureq::Error>),
/// The registry returned a "403 Forbidden"
#[error("Username or password are incorrect")]
AuthenticationError(String),
/// Reading the wheel failed
#[error("IO Error")]
IoError(#[source] io::Error),
/// The registry returned something else than 200
#[error("Failed to upload the wheel with status {0}: {1}")]
StatusCodeError(String, String),
/// File already exists
#[error("File already exists: {0}")]
FileExistsError(String),
/// Read package metadata error
#[error("Could not read the metadata from the package at {0}")]
PkgInfoError(PathBuf, #[source] python_pkginfo::Error),
/// TLS error
#[cfg(feature = "native-tls")]
#[error("TLS Error")]
TlsError(#[source] native_tls::Error),
}
impl From<io::Error> for UploadError {
fn from(error: io::Error) -> Self {
UploadError::IoError(error)
}
}
impl From<ureq::Error> for UploadError {
fn from(error: ureq::Error) -> Self {
UploadError::UreqError(Box::new(error))
}
}
#[cfg(feature = "native-tls")]
impl From<native_tls::Error> for UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
Registry {
username,
password,
url,
}
}
}
/// Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceResponse {
audience: String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in .pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS_CA_BUNDLE"))
.or_else(|| env::var_os("CURL_CA_BUNDLE"))
}
// Prefer rustls if both native-tls and rustls features are enabled
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
let mut tls_builder = native_tls::TlsConnector::builder();
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
for cert in rustls_pemfile::certs(&mut reader)? {
tls_builder.add_root_certificate(native_tls::Certificate::from_pem(&cert)?);
}
}
builder = builder.tls_connector(Arc::new(tls_builder.build()?));
Ok(builder.build())
}
#[cfg(feature = "rustls")]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
let certs = rustls_pemfile::certs(&mut reader)?;
let mut root_certs = rustls::RootCertStore::empty();
root_certs.add_parsable_certificates(&certs);
let client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
Ok(builder.tls_config(Arc::new(client_config)).build())
} else {
Ok(builder.build())
}
}
#[cfg(not(any(feature = "native-tls", feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
Ok(builder.build())
}
/// Uploads a single wheel to the registry
#[allow(clippy::result_large_err)]
pub fn upload(registry: &Registry, wheel_path: &Path) -> Result<(), UploadError> {
let hash_hex = hash_file(wheel_path)?;
let dist = python_pkginfo::Distribution::new(wheel_path)
.map_err(|err| UploadError::PkgInfoError(wheel_path.to_owned(), err))?;
let metadata = dist.metadata();
let mut api_metadata = vec![
(":action", "file_upload".to_string()),
("sha256_digest", hash_hex),
("protocol_version", "1".to_string()),
("metadata_version", metadata.metadata_version.clone()),
("name", canonicalize_name(&metadata.name)),
("version", metadata.version.clone()),
("pyversion", dist.python_version().to_string()),
("filetype", dist.r#type().to_string()),
];
let mut add_option = |name, value: &Option<String>| {
if let Some(some) = value.clone() {
api_metadata.push((name, some));
}
};
// https://github.com/pypa/warehouse/blob/75061540e6ab5aae3f8758b569e926b6355abea8/warehouse/forklift/legacy.py#L424
add_option("summary", &metadata.summary);
add_option("description", &metadata.description);
add_option(
"description_content_type",
&metadata.description_content_type,
);
add_option("author", &metadata.author);
add_option("author_email", &metadata.author_email);
add_option("maintainer", &metadata.maintainer);
add_option("maintainer_email", &metadata.maintainer_email);
add_option("license", &metadata.license);
add_option("keywords", &metadata.keywords);
add_option("home_page", &metadata.home_page);
add_option("download_url", &metadata.download_url);
add_option("requires_python", &metadata.requires_python);
add_option("summary", &metadata.summary);
if metadata.requires_python.is_none() {
// GitLab PyPI repository API implementation requires this metadata field
// and twine always includes it in the request, even when it's empty.
api_metadata.push(("requires_python", "".to_string()));
}
let mut add_vec = |name, values: &[String]| {
for i in values {
api_metadata.push((name, i.clone()));
}
};
add_vec("classifiers", &metadata.classifiers);
add_vec("platform", &metadata.platforms);
add_vec("requires_dist", &metadata.requires_dist);
add_vec("provides_dist", &metadata.provides_dist);
add_vec("obsoletes_dist", &metadata.obsoletes_dist);
add_vec("requires_external", &metadata.requires_external);
add_vec("project_urls", &metadata.project_urls);
let wheel = File::open(wheel_path)?;
let wheel_name = wheel_path
.file_name()
.expect("Wheel path has a file name")
.to_string_lossy();
let mut form = Multipart::new();
for (key, value) in api_metadata {
form.add_text(key, value);
}
form.add_stream("content", &wheel, Some(wheel_name), None);
let multipart_data = form.prepare().map_err(|e| e.error)?;
let encoded = STANDARD.encode(format!("{}:{}", registry.username, registry.password));
let agent = http_agent()?;
let response = agent
.post(registry.url.as_str())
.set(
"Content-Type",
&format!(
"multipart/form-data; boundary={}",
multipart_data.boundary()
),
)
.set(
"User-Agent",
&format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
)
.set("Authorization", &format!("Basic {encoded}"))
.send(multipart_data);
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(status, response)) => {
let err_text = response.into_string().unwrap_or_else(|e| {
format!(
"The registry should return some text, \
even in case of an error, but didn't ({e})"
)
});
debug!("Upload error response: {}", err_text);
// Detect FileExistsError the way twine does
// https://github.com/pypa/twine/blob/87846e5777b380d4704704a69e1f9a7a1231451c/twine/commands/upload.py#L30
if status == 403 {
if err_text.contains("overwrite artifact") {
// Artifactory (https://jfrog.com/artifactory/)
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::AuthenticationError(err_text))
}
} else {
let status_string = status.to_string();
if status == 409 // conflict, pypiserver (https://pypi.org/project/pypiserver)
// PyPI / TestPyPI
|| (status == 400 && err_text.contains("already exists"))
// Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
|| (status == 400 && err_text.contains("updating asset"))
// # Gitlab Enterprise Edition (https://about.gitlab.com)
|| (status == 400 && err_text.contains("already been taken"))
{
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::StatusCodeError(status_string, err_text))
}
}
}
Err(err) => Err(UploadError::UreqError(err.into())),
}
}
/// Handles authentication/keyring integration and retrying of the publish subcommand
pub fn upload_ui(items: &[PathBuf], publish: &PublishOpt) -> Result<()> {
let registry = complete_registry(publish)?;
eprintln!("🚀 Uploading {} packages", items.len());
for i in items {
let upload_result = upload(®istry, i);
match upload_result {
Ok(()) => (),
Err(UploadError::AuthenticationError(msg)) => {
let title_re = regex::Regex::new(r"<title>(.+?)</title>").unwrap();
let title = title_re
.captures(&msg)
.and_then(|c| c.get(1))
.map(|m| m.as_str());
match title {
Some(title) => {
eprintln!("⛔ {title}");
}
None => eprintln!("⛔ Username and/or password are wrong"),
}
#[cfg(feature = "keyring")]
{
// Delete the wrong password from the keyring
let old_username = registry.username;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &old_username)
.and_then(|keyring| keyring.delete_password())
{
Ok(()) => {
eprintln!("🔑 Removed wrong password from keyring")
} | | Err(keyring::Error::PlatformFailure(_)) => {}
Err(err) => {
eprintln!("⚠️ Warning: Failed to remove password from keyring: {err}")
}
}
}
bail!("Username and/or password are possibly wrong");
}
Err(err) => {
let filename = i.file_name().unwrap_or(i.as_os_str());
if let UploadError::FileExistsError(_) = err {
if publish.skip_existing {
eprintln!(
"⚠️ Note: Skipping {filename:?} because it appears to already exist"
);
continue;
}
}
let filesize = fs::metadata(i)
.map(|x| ByteSize(x.len()).to_string())
.unwrap_or_else(|e| format!("Failed to get the filesize of {:?}: {}", &i, e));
return Err(err).context(format!("💥 Failed to upload {filename:?} ({filesize})"));
}
}
}
eprintln!("✨ Packages uploaded successfully");
#[cfg(feature = "keyring")]
{
// We know the password is correct, so we can save it in the keyring
let username = registry.username.clone();
let password = registry.password;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &username)
.and_then(|keyring| keyring.set_password(&password))
{
Ok(())
| Err(keyring::Error::NoStorageAccess(_))
| Err(keyring::Error::PlatformFailure(_)) => {}
Err(err) => {
eprintln!("⚠️ Warning: Failed to store the password in the keyring: {err:?}");
}
}
}
Ok(())
} | Err(keyring::Error::NoEntry)
| Err(keyring::Error::NoStorageAccess(_)) | random_line_split |
upload.rs | //! The uploading logic was mostly reverse engineered; I wrote it down as
//! documentation at https://warehouse.readthedocs.io/api-reference/legacy/#upload-api
use crate::build_context::hash_file;
use anyhow::{bail, Context, Result};
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use bytesize::ByteSize;
use configparser::ini::Ini;
use fs_err as fs;
use fs_err::File;
use multipart::client::lazy::Multipart;
use regex::Regex;
use serde::Deserialize;
use std::collections::HashMap;
use std::env;
#[cfg(any(feature = "native-tls", feature = "rustls"))]
use std::ffi::OsString;
use std::io;
use std::path::{Path, PathBuf};
use std::time::Duration;
use thiserror::Error;
use tracing::debug;
/// An account with a registry, possibly incomplete
#[derive(Debug, clap::Parser)]
pub struct PublishOpt {
/// The repository (package index) to upload the package to. Should be a section in the config file.
///
/// Can also be set via MATURIN_REPOSITORY environment variable.
#[arg(short = 'r', long, env = "MATURIN_REPOSITORY", default_value = "pypi")]
repository: String,
/// The URL of the registry where the wheels are uploaded to. This overrides --repository.
///
/// Can also be set via MATURIN_REPOSITORY_URL environment variable.
#[arg(long, env = "MATURIN_REPOSITORY_URL", overrides_with = "repository")]
repository_url: Option<String>,
/// Username for pypi or your custom registry.
///
/// Can also be set via MATURIN_USERNAME environment variable.
///
/// Set MATURIN_PYPI_TOKEN variable to use token-based authentication instead
#[arg(short, long, env = "MATURIN_USERNAME")]
username: Option<String>,
/// Password for pypi or your custom registry.
///
/// Can also be set via MATURIN_PASSWORD environment variable.
#[arg(short, long, env = "MATURIN_PASSWORD", hide_env_values = true)]
password: Option<String>,
/// Continue uploading files if one already exists.
/// (Only valid when uploading to PyPI. Other implementations may not support this.)
#[arg(long)]
skip_existing: bool,
/// Do not interactively prompt for username/password if the required credentials are missing.
///
/// Can also be set via MATURIN_NON_INTERACTIVE environment variable.
#[arg(long, env = "MATURIN_NON_INTERACTIVE")]
non_interactive: bool,
}
impl PublishOpt {
const DEFAULT_REPOSITORY_URL: &'static str = "https://upload.pypi.org/legacy/";
const TEST_REPOSITORY_URL: &'static str = "https://test.pypi.org/legacy/";
/// Set to non interactive mode if we're running on CI
pub fn non_interactive_on_ci(&mut self) {
if !self.non_interactive && env::var("CI").map(|v| v == "true").unwrap_or_default() {
eprintln!("🎛️ Running in non-interactive mode on CI");
self.non_interactive = true;
}
}
}
/// Error type for different types of errors that can happen when uploading a
/// wheel.
///
/// The most interesting type is AuthenticationError because it allows asking
/// the user to reenter the password
#[derive(Error, Debug)]
#[error("Uploading to the registry failed")]
pub enum UploadError {
/// Any ureq error
#[error("Http error")]
UreqError(#[source] Box<ureq::Error>),
/// The registry returned a "403 Forbidden"
#[error("Username or password are incorrect")]
AuthenticationError(String),
/// Reading the wheel failed
#[error("IO Error")]
IoError(#[source] io::Error),
/// The registry returned something else than 200
#[error("Failed to upload the wheel with status {0}: {1}")]
StatusCodeError(String, String),
/// File already exists
#[error("File already exists: {0}")]
FileExistsError(String),
/// Read package metadata error
#[error("Could not read the metadata from the package at {0}")]
PkgInfoError(PathBuf, #[source] python_pkginfo::Error),
/// TLS error
#[cfg(feature = "native-tls")]
#[error("TLS Error")]
TlsError(#[source] native_tls::Error),
}
impl From<io::Error> for UploadError {
fn from(error: io::Error) -> Self {
UploadError::IoError(error)
}
}
impl From<ureq::Error> for UploadError {
fn from(error: ureq::Error) -> Self {
UploadError::UreqError(Box::new(error))
}
}
#[cfg(feature = "native-tls")]
impl From<native_tls::Error> for UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
Registry {
username,
password,
url,
}
}
}
/// Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
| config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceResponse {
audience: String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in .pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS_CA_BUNDLE"))
.or_else(|| env::var_os("CURL_CA_BUNDLE"))
}
// Prefer rustls if both native-tls and rustls features are enabled
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
let mut tls_builder = native_tls::TlsConnector::builder();
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
for cert in rustls_pemfile::certs(&mut reader)? {
tls_builder.add_root_certificate(native_tls::Certificate::from_pem(&cert)?);
}
}
builder = builder.tls_connector(Arc::new(tls_builder.build()?));
Ok(builder.build())
}
#[cfg(feature = "rustls")]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
let certs = rustls_pemfile::certs(&mut reader)?;
let mut root_certs = rustls::RootCertStore::empty();
root_certs.add_parsable_certificates(&certs);
let client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
Ok(builder.tls_config(Arc::new(client_config)).build())
} else {
Ok(builder.build())
}
}
#[cfg(not(any(feature = "native-tls", feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
Ok(builder.build())
}
/// Uploads a single wheel to the registry
#[allow(clippy::result_large_err)]
pub fn upload(registry: &Registry, wheel_path: &Path) -> Result<(), UploadError> {
let hash_hex = hash_file(wheel_path)?;
let dist = python_pkginfo::Distribution::new(wheel_path)
.map_err(|err| UploadError::PkgInfoError(wheel_path.to_owned(), err))?;
let metadata = dist.metadata();
let mut api_metadata = vec![
(":action", "file_upload".to_string()),
("sha256_digest", hash_hex),
("protocol_version", "1".to_string()),
("metadata_version", metadata.metadata_version.clone()),
("name", canonicalize_name(&metadata.name)),
("version", metadata.version.clone()),
("pyversion", dist.python_version().to_string()),
("filetype", dist.r#type().to_string()),
];
let mut add_option = |name, value: &Option<String>| {
if let Some(some) = value.clone() {
api_metadata.push((name, some));
}
};
// https://github.com/pypa/warehouse/blob/75061540e6ab5aae3f8758b569e926b6355abea8/warehouse/forklift/legacy.py#L424
add_option("summary", &metadata.summary);
add_option("description", &metadata.description);
add_option(
"description_content_type",
&metadata.description_content_type,
);
add_option("author", &metadata.author);
add_option("author_email", &metadata.author_email);
add_option("maintainer", &metadata.maintainer);
add_option("maintainer_email", &metadata.maintainer_email);
add_option("license", &metadata.license);
add_option("keywords", &metadata.keywords);
add_option("home_page", &metadata.home_page);
add_option("download_url", &metadata.download_url);
add_option("requires_python", &metadata.requires_python);
add_option("summary", &metadata.summary);
if metadata.requires_python.is_none() {
// GitLab PyPI repository API implementation requires this metadata field
// and twine always includes it in the request, even when it's empty.
api_metadata.push(("requires_python", "".to_string()));
}
let mut add_vec = |name, values: &[String]| {
for i in values {
api_metadata.push((name, i.clone()));
}
};
add_vec("classifiers", &metadata.classifiers);
add_vec("platform", &metadata.platforms);
add_vec("requires_dist", &metadata.requires_dist);
add_vec("provides_dist", &metadata.provides_dist);
add_vec("obsoletes_dist", &metadata.obsoletes_dist);
add_vec("requires_external", &metadata.requires_external);
add_vec("project_urls", &metadata.project_urls);
let wheel = File::open(wheel_path)?;
let wheel_name = wheel_path
.file_name()
.expect("Wheel path has a file name")
.to_string_lossy();
let mut form = Multipart::new();
for (key, value) in api_metadata {
form.add_text(key, value);
}
form.add_stream("content", &wheel, Some(wheel_name), None);
let multipart_data = form.prepare().map_err(|e| e.error)?;
let encoded = STANDARD.encode(format!("{}:{}", registry.username, registry.password));
let agent = http_agent()?;
let response = agent
.post(registry.url.as_str())
.set(
"Content-Type",
&format!(
"multipart/form-data; boundary={}",
multipart_data.boundary()
),
)
.set(
"User-Agent",
&format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
)
.set("Authorization", &format!("Basic {encoded}"))
.send(multipart_data);
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(status, response)) => {
let err_text = response.into_string().unwrap_or_else(|e| {
format!(
"The registry should return some text, \
even in case of an error, but didn't ({e})"
)
});
debug!("Upload error response: {}", err_text);
// Detect FileExistsError the way twine does
// https://github.com/pypa/twine/blob/87846e5777b380d4704704a69e1f9a7a1231451c/twine/commands/upload.py#L30
if status == 403 {
if err_text.contains("overwrite artifact") {
// Artifactory (https://jfrog.com/artifactory/)
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::AuthenticationError(err_text))
}
} else {
let status_string = status.to_string();
if status == 409 // conflict, pypiserver (https://pypi.org/project/pypiserver)
// PyPI / TestPyPI
|| (status == 400 && err_text.contains("already exists"))
// Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
|| (status == 400 && err_text.contains("updating asset"))
// # Gitlab Enterprise Edition (https://about.gitlab.com)
|| (status == 400 && err_text.contains("already been taken"))
{
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::StatusCodeError(status_string, err_text))
}
}
}
Err(err) => Err(UploadError::UreqError(err.into())),
}
}
/// Handles authentication/keyring integration and retrying of the publish subcommand
pub fn upload_ui(items: &[PathBuf], publish: &PublishOpt) -> Result<()> {
let registry = complete_registry(publish)?;
eprintln!("🚀 Uploading {} packages", items.len());
for i in items {
let upload_result = upload(®istry, i);
match upload_result {
Ok(()) => (),
Err(UploadError::AuthenticationError(msg)) => {
let title_re = regex::Regex::new(r"<title>(.+?)</title>").unwrap();
let title = title_re
.captures(&msg)
.and_then(|c| c.get(1))
.map(|m| m.as_str());
match title {
Some(title) => {
eprintln!("⛔ {title}");
}
None => eprintln!("⛔ Username and/or password are wrong"),
}
#[cfg(feature = "keyring")]
{
// Delete the wrong password from the keyring
let old_username = registry.username;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &old_username)
.and_then(|keyring| keyring.delete_password())
{
Ok(()) => {
eprintln!("🔑 Removed wrong password from keyring")
}
Err(keyring::Error::NoEntry)
| Err(keyring::Error::NoStorageAccess(_))
| Err(keyring::Error::PlatformFailure(_)) => {}
Err(err) => {
eprintln!("⚠️ Warning: Failed to remove password from keyring: {err}")
}
}
}
bail!("Username and/or password are possibly wrong");
}
Err(err) => {
let filename = i.file_name().unwrap_or(i.as_os_str());
if let UploadError::FileExistsError(_) = err {
if publish.skip_existing {
eprintln!(
"⚠️ Note: Skipping {filename:?} because it appears to already exist"
);
continue;
}
}
let filesize = fs::metadata(i)
.map(|x| ByteSize(x.len()).to_string())
.unwrap_or_else(|e| format!("Failed to get the filesize of {:?}: {}", &i, e));
return Err(err).context(format!("💥 Failed to upload {filename:?} ({filesize})"));
}
}
}
eprintln!("✨ Packages uploaded successfully");
#[cfg(feature = "keyring")]
{
// We know the password is correct, so we can save it in the keyring
let username = registry.username.clone();
let password = registry.password;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &username)
.and_then(|keyring| keyring.set_password(&password))
{
Ok(())
| Err(keyring::Error::NoStorageAccess(_))
| Err(keyring::Error::PlatformFailure(_)) => {}
Err(err) => {
eprintln!("⚠️ Warning: Failed to store the password in the keyring: {err:?}");
}
}
}
Ok(())
}
| config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
| conditional_block |
upload.rs | //! The uploading logic was mostly reverse engineered; I wrote it down as
//! documentation at https://warehouse.readthedocs.io/api-reference/legacy/#upload-api
use crate::build_context::hash_file;
use anyhow::{bail, Context, Result};
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use bytesize::ByteSize;
use configparser::ini::Ini;
use fs_err as fs;
use fs_err::File;
use multipart::client::lazy::Multipart;
use regex::Regex;
use serde::Deserialize;
use std::collections::HashMap;
use std::env;
#[cfg(any(feature = "native-tls", feature = "rustls"))]
use std::ffi::OsString;
use std::io;
use std::path::{Path, PathBuf};
use std::time::Duration;
use thiserror::Error;
use tracing::debug;
/// An account with a registry, possibly incomplete
#[derive(Debug, clap::Parser)]
pub struct PublishOpt {
/// The repository (package index) to upload the package to. Should be a section in the config file.
///
/// Can also be set via MATURIN_REPOSITORY environment variable.
#[arg(short = 'r', long, env = "MATURIN_REPOSITORY", default_value = "pypi")]
repository: String,
/// The URL of the registry where the wheels are uploaded to. This overrides --repository.
///
/// Can also be set via MATURIN_REPOSITORY_URL environment variable.
#[arg(long, env = "MATURIN_REPOSITORY_URL", overrides_with = "repository")]
repository_url: Option<String>,
/// Username for pypi or your custom registry.
///
/// Can also be set via MATURIN_USERNAME environment variable.
///
/// Set MATURIN_PYPI_TOKEN variable to use token-based authentication instead
#[arg(short, long, env = "MATURIN_USERNAME")]
username: Option<String>,
/// Password for pypi or your custom registry.
///
/// Can also be set via MATURIN_PASSWORD environment variable.
#[arg(short, long, env = "MATURIN_PASSWORD", hide_env_values = true)]
password: Option<String>,
/// Continue uploading files if one already exists.
/// (Only valid when uploading to PyPI. Other implementations may not support this.)
#[arg(long)]
skip_existing: bool,
/// Do not interactively prompt for username/password if the required credentials are missing.
///
/// Can also be set via MATURIN_NON_INTERACTIVE environment variable.
#[arg(long, env = "MATURIN_NON_INTERACTIVE")]
non_interactive: bool,
}
impl PublishOpt {
const DEFAULT_REPOSITORY_URL: &'static str = "https://upload.pypi.org/legacy/";
const TEST_REPOSITORY_URL: &'static str = "https://test.pypi.org/legacy/";
/// Set to non interactive mode if we're running on CI
pub fn non_interactive_on_ci(&mut self) {
if !self.non_interactive && env::var("CI").map(|v| v == "true").unwrap_or_default() {
eprintln!("🎛️ Running in non-interactive mode on CI");
self.non_interactive = true;
}
}
}
/// Error type for different types of errors that can happen when uploading a
/// wheel.
///
/// The most interesting type is AuthenticationError because it allows asking
/// the user to reenter the password
#[derive(Error, Debug)]
#[error("Uploading to the registry failed")]
pub enum UploadError {
/// Any ureq error
#[error("Http error")]
UreqError(#[source] Box<ureq::Error>),
/// The registry returned a "403 Forbidden"
#[error("Username or password are incorrect")]
AuthenticationError(String),
/// Reading the wheel failed
#[error("IO Error")]
IoError(#[source] io::Error),
/// The registry returned something else than 200
#[error("Failed to upload the wheel with status {0}: {1}")]
StatusCodeError(String, String),
/// File already exists
#[error("File already exists: {0}")]
FileExistsError(String),
/// Read package metadata error
#[error("Could not read the metadata from the package at {0}")]
PkgInfoError(PathBuf, #[source] python_pkginfo::Error),
/// TLS error
#[cfg(feature = "native-tls")]
#[error("TLS Error")]
TlsError(#[source] native_tls::Error),
}
impl From<io::Error> for UploadError {
fn from(error: io::Error) -> Self {
UploadError::IoError(error)
}
}
impl From<ureq::Error> for UploadError {
fn from(error: ureq::Error) -> Self {
UploadError::UreqError(Box::new(error))
}
}
#[cfg(feature = "native-tls")]
impl From<native_tls::Error> for UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
| // Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceResponse {
audience: String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in .pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS_CA_BUNDLE"))
.or_else(|| env::var_os("CURL_CA_BUNDLE"))
}
// Prefer rustls if both native-tls and rustls features are enabled
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
let mut tls_builder = native_tls::TlsConnector::builder();
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
for cert in rustls_pemfile::certs(&mut reader)? {
tls_builder.add_root_certificate(native_tls::Certificate::from_pem(&cert)?);
}
}
builder = builder.tls_connector(Arc::new(tls_builder.build()?));
Ok(builder.build())
}
#[cfg(feature = "rustls")]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
let certs = rustls_pemfile::certs(&mut reader)?;
let mut root_certs = rustls::RootCertStore::empty();
root_certs.add_parsable_certificates(&certs);
let client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
Ok(builder.tls_config(Arc::new(client_config)).build())
} else {
Ok(builder.build())
}
}
#[cfg(not(any(feature = "native-tls", feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
Ok(builder.build())
}
/// Uploads a single wheel to the registry
#[allow(clippy::result_large_err)]
pub fn upload(registry: &Registry, wheel_path: &Path) -> Result<(), UploadError> {
let hash_hex = hash_file(wheel_path)?;
let dist = python_pkginfo::Distribution::new(wheel_path)
.map_err(|err| UploadError::PkgInfoError(wheel_path.to_owned(), err))?;
let metadata = dist.metadata();
let mut api_metadata = vec![
(":action", "file_upload".to_string()),
("sha256_digest", hash_hex),
("protocol_version", "1".to_string()),
("metadata_version", metadata.metadata_version.clone()),
("name", canonicalize_name(&metadata.name)),
("version", metadata.version.clone()),
("pyversion", dist.python_version().to_string()),
("filetype", dist.r#type().to_string()),
];
let mut add_option = |name, value: &Option<String>| {
if let Some(some) = value.clone() {
api_metadata.push((name, some));
}
};
// https://github.com/pypa/warehouse/blob/75061540e6ab5aae3f8758b569e926b6355abea8/warehouse/forklift/legacy.py#L424
add_option("summary", &metadata.summary);
add_option("description", &metadata.description);
add_option(
"description_content_type",
&metadata.description_content_type,
);
add_option("author", &metadata.author);
add_option("author_email", &metadata.author_email);
add_option("maintainer", &metadata.maintainer);
add_option("maintainer_email", &metadata.maintainer_email);
add_option("license", &metadata.license);
add_option("keywords", &metadata.keywords);
add_option("home_page", &metadata.home_page);
add_option("download_url", &metadata.download_url);
add_option("requires_python", &metadata.requires_python);
add_option("summary", &metadata.summary);
if metadata.requires_python.is_none() {
// GitLab PyPI repository API implementation requires this metadata field
// and twine always includes it in the request, even when it's empty.
api_metadata.push(("requires_python", "".to_string()));
}
let mut add_vec = |name, values: &[String]| {
for i in values {
api_metadata.push((name, i.clone()));
}
};
add_vec("classifiers", &metadata.classifiers);
add_vec("platform", &metadata.platforms);
add_vec("requires_dist", &metadata.requires_dist);
add_vec("provides_dist", &metadata.provides_dist);
add_vec("obsoletes_dist", &metadata.obsoletes_dist);
add_vec("requires_external", &metadata.requires_external);
add_vec("project_urls", &metadata.project_urls);
let wheel = File::open(wheel_path)?;
let wheel_name = wheel_path
.file_name()
.expect("Wheel path has a file name")
.to_string_lossy();
let mut form = Multipart::new();
for (key, value) in api_metadata {
form.add_text(key, value);
}
form.add_stream("content", &wheel, Some(wheel_name), None);
let multipart_data = form.prepare().map_err(|e| e.error)?;
let encoded = STANDARD.encode(format!("{}:{}", registry.username, registry.password));
let agent = http_agent()?;
let response = agent
.post(registry.url.as_str())
.set(
"Content-Type",
&format!(
"multipart/form-data; boundary={}",
multipart_data.boundary()
),
)
.set(
"User-Agent",
&format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
)
.set("Authorization", &format!("Basic {encoded}"))
.send(multipart_data);
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(status, response)) => {
let err_text = response.into_string().unwrap_or_else(|e| {
format!(
"The registry should return some text, \
even in case of an error, but didn't ({e})"
)
});
debug!("Upload error response: {}", err_text);
// Detect FileExistsError the way twine does
// https://github.com/pypa/twine/blob/87846e5777b380d4704704a69e1f9a7a1231451c/twine/commands/upload.py#L30
if status == 403 {
if err_text.contains("overwrite artifact") {
// Artifactory (https://jfrog.com/artifactory/)
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::AuthenticationError(err_text))
}
} else {
let status_string = status.to_string();
if status == 409 // conflict, pypiserver (https://pypi.org/project/pypiserver)
// PyPI / TestPyPI
|| (status == 400 && err_text.contains("already exists"))
// Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
|| (status == 400 && err_text.contains("updating asset"))
// # Gitlab Enterprise Edition (https://about.gitlab.com)
|| (status == 400 && err_text.contains("already been taken"))
{
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::StatusCodeError(status_string, err_text))
}
}
}
Err(err) => Err(UploadError::UreqError(err.into())),
}
}
/// Handles authentication/keyring integration and retrying of the publish subcommand
pub fn upload_ui(items: &[PathBuf], publish: &PublishOpt) -> Result<()> {
let registry = complete_registry(publish)?;
eprintln!("🚀 Uploading {} packages", items.len());
for i in items {
let upload_result = upload(®istry, i);
match upload_result {
Ok(()) => (),
Err(UploadError::AuthenticationError(msg)) => {
let title_re = regex::Regex::new(r"<title>(.+?)</title>").unwrap();
let title = title_re
.captures(&msg)
.and_then(|c| c.get(1))
.map(|m| m.as_str());
match title {
Some(title) => {
eprintln!("⛔ {title}");
}
None => eprintln!("⛔ Username and/or password are wrong"),
}
#[cfg(feature = "keyring")]
{
// Delete the wrong password from the keyring
let old_username = registry.username;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &old_username)
.and_then(|keyring| keyring.delete_password())
{
Ok(()) => {
eprintln!("🔑 Removed wrong password from keyring")
}
Err(keyring::Error::NoEntry)
| Err(keyring::Error::NoStorageAccess(_))
| Err(keyring::Error::PlatformFailure(_)) => {}
Err(err) => {
eprintln!("⚠️ Warning: Failed to remove password from keyring: {err}")
}
}
}
bail!("Username and/or password are possibly wrong");
}
Err(err) => {
let filename = i.file_name().unwrap_or(i.as_os_str());
if let UploadError::FileExistsError(_) = err {
if publish.skip_existing {
eprintln!(
"⚠️ Note: Skipping {filename:?} because it appears to already exist"
);
continue;
}
}
let filesize = fs::metadata(i)
.map(|x| ByteSize(x.len()).to_string())
.unwrap_or_else(|e| format!("Failed to get the filesize of {:?}: {}", &i, e));
return Err(err).context(format!("💥 Failed to upload {filename:?} ({filesize})"));
}
}
}
eprintln!("✨ Packages uploaded successfully");
#[cfg(feature = "keyring")]
{
// We know the password is correct, so we can save it in the keyring
let username = registry.username.clone();
let password = registry.password;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &username)
.and_then(|keyring| keyring.set_password(&password))
{
Ok(())
| Err(keyring::Error::NoStorageAccess(_))
| Err(keyring::Error::PlatformFailure(_)) => {}
Err(err) => {
eprintln!("⚠️ Warning: Failed to store the password in the keyring: {err:?}");
}
}
}
Ok(())
}
| Registry {
username,
password,
url,
}
}
}
/ | identifier_body |
upload.rs | //! The uploading logic was mostly reverse engineered; I wrote it down as
//! documentation at https://warehouse.readthedocs.io/api-reference/legacy/#upload-api
use crate::build_context::hash_file;
use anyhow::{bail, Context, Result};
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use bytesize::ByteSize;
use configparser::ini::Ini;
use fs_err as fs;
use fs_err::File;
use multipart::client::lazy::Multipart;
use regex::Regex;
use serde::Deserialize;
use std::collections::HashMap;
use std::env;
#[cfg(any(feature = "native-tls", feature = "rustls"))]
use std::ffi::OsString;
use std::io;
use std::path::{Path, PathBuf};
use std::time::Duration;
use thiserror::Error;
use tracing::debug;
/// An account with a registry, possibly incomplete
#[derive(Debug, clap::Parser)]
pub struct PublishOpt {
/// The repository (package index) to upload the package to. Should be a section in the config file.
///
/// Can also be set via MATURIN_REPOSITORY environment variable.
#[arg(short = 'r', long, env = "MATURIN_REPOSITORY", default_value = "pypi")]
repository: String,
/// The URL of the registry where the wheels are uploaded to. This overrides --repository.
///
/// Can also be set via MATURIN_REPOSITORY_URL environment variable.
#[arg(long, env = "MATURIN_REPOSITORY_URL", overrides_with = "repository")]
repository_url: Option<String>,
/// Username for pypi or your custom registry.
///
/// Can also be set via MATURIN_USERNAME environment variable.
///
/// Set MATURIN_PYPI_TOKEN variable to use token-based authentication instead
#[arg(short, long, env = "MATURIN_USERNAME")]
username: Option<String>,
/// Password for pypi or your custom registry.
///
/// Can also be set via MATURIN_PASSWORD environment variable.
#[arg(short, long, env = "MATURIN_PASSWORD", hide_env_values = true)]
password: Option<String>,
/// Continue uploading files if one already exists.
/// (Only valid when uploading to PyPI. Other implementations may not support this.)
#[arg(long)]
skip_existing: bool,
/// Do not interactively prompt for username/password if the required credentials are missing.
///
/// Can also be set via MATURIN_NON_INTERACTIVE environment variable.
#[arg(long, env = "MATURIN_NON_INTERACTIVE")]
non_interactive: bool,
}
impl PublishOpt {
const DEFAULT_REPOSITORY_URL: &'static str = "https://upload.pypi.org/legacy/";
const TEST_REPOSITORY_URL: &'static str = "https://test.pypi.org/legacy/";
/// Set to non interactive mode if we're running on CI
pub fn non_interactive_on_ci(&mut self) {
if !self.non_interactive && env::var("CI").map(|v| v == "true").unwrap_or_default() {
eprintln!("🎛️ Running in non-interactive mode on CI");
self.non_interactive = true;
}
}
}
/// Error type for different types of errors that can happen when uploading a
/// wheel.
///
/// The most interesting type is AuthenticationError because it allows asking
/// the user to reenter the password
#[derive(Error, Debug)]
#[error("Uploading to the registry failed")]
pub enum UploadError {
/// Any ureq error
#[error("Http error")]
UreqError(#[source] Box<ureq::Error>),
/// The registry returned a "403 Forbidden"
#[error("Username or password are incorrect")]
AuthenticationError(String),
/// Reading the wheel failed
#[error("IO Error")]
IoError(#[source] io::Error),
/// The registry returned something else than 200
#[error("Failed to upload the wheel with status {0}: {1}")]
StatusCodeError(String, String),
/// File already exists
#[error("File already exists: {0}")]
FileExistsError(String),
/// Read package metadata error
#[error("Could not read the metadata from the package at {0}")]
PkgInfoError(PathBuf, #[source] python_pkginfo::Error),
/// TLS error
#[cfg(feature = "native-tls")]
#[error("TLS Error")]
TlsError(#[source] native_tls::Error),
}
impl From<io::Error> for UploadError {
fn from(error: io::Error) -> Self {
UploadError::IoError(error)
}
}
impl From<ureq::Error> for UploadError {
fn from(error: ureq::Error) -> Self {
UploadError::UreqError(Box::new(error))
}
}
#[cfg(feature = "native-tls")]
impl From<native_tls::Error> for UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
Registry {
username,
password,
url,
}
}
}
/// Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceRes | : String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in .pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS_CA_BUNDLE"))
.or_else(|| env::var_os("CURL_CA_BUNDLE"))
}
// Prefer rustls if both native-tls and rustls features are enabled
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
let mut tls_builder = native_tls::TlsConnector::builder();
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
for cert in rustls_pemfile::certs(&mut reader)? {
tls_builder.add_root_certificate(native_tls::Certificate::from_pem(&cert)?);
}
}
builder = builder.tls_connector(Arc::new(tls_builder.build()?));
Ok(builder.build())
}
#[cfg(feature = "rustls")]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
let certs = rustls_pemfile::certs(&mut reader)?;
let mut root_certs = rustls::RootCertStore::empty();
root_certs.add_parsable_certificates(&certs);
let client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
Ok(builder.tls_config(Arc::new(client_config)).build())
} else {
Ok(builder.build())
}
}
#[cfg(not(any(feature = "native-tls", feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
Ok(builder.build())
}
/// Uploads a single wheel to the registry
#[allow(clippy::result_large_err)]
pub fn upload(registry: &Registry, wheel_path: &Path) -> Result<(), UploadError> {
let hash_hex = hash_file(wheel_path)?;
let dist = python_pkginfo::Distribution::new(wheel_path)
.map_err(|err| UploadError::PkgInfoError(wheel_path.to_owned(), err))?;
let metadata = dist.metadata();
let mut api_metadata = vec![
(":action", "file_upload".to_string()),
("sha256_digest", hash_hex),
("protocol_version", "1".to_string()),
("metadata_version", metadata.metadata_version.clone()),
("name", canonicalize_name(&metadata.name)),
("version", metadata.version.clone()),
("pyversion", dist.python_version().to_string()),
("filetype", dist.r#type().to_string()),
];
let mut add_option = |name, value: &Option<String>| {
if let Some(some) = value.clone() {
api_metadata.push((name, some));
}
};
// https://github.com/pypa/warehouse/blob/75061540e6ab5aae3f8758b569e926b6355abea8/warehouse/forklift/legacy.py#L424
add_option("summary", &metadata.summary);
add_option("description", &metadata.description);
add_option(
"description_content_type",
&metadata.description_content_type,
);
add_option("author", &metadata.author);
add_option("author_email", &metadata.author_email);
add_option("maintainer", &metadata.maintainer);
add_option("maintainer_email", &metadata.maintainer_email);
add_option("license", &metadata.license);
add_option("keywords", &metadata.keywords);
add_option("home_page", &metadata.home_page);
add_option("download_url", &metadata.download_url);
add_option("requires_python", &metadata.requires_python);
add_option("summary", &metadata.summary);
if metadata.requires_python.is_none() {
// GitLab PyPI repository API implementation requires this metadata field
// and twine always includes it in the request, even when it's empty.
api_metadata.push(("requires_python", "".to_string()));
}
let mut add_vec = |name, values: &[String]| {
for i in values {
api_metadata.push((name, i.clone()));
}
};
add_vec("classifiers", &metadata.classifiers);
add_vec("platform", &metadata.platforms);
add_vec("requires_dist", &metadata.requires_dist);
add_vec("provides_dist", &metadata.provides_dist);
add_vec("obsoletes_dist", &metadata.obsoletes_dist);
add_vec("requires_external", &metadata.requires_external);
add_vec("project_urls", &metadata.project_urls);
let wheel = File::open(wheel_path)?;
let wheel_name = wheel_path
.file_name()
.expect("Wheel path has a file name")
.to_string_lossy();
let mut form = Multipart::new();
for (key, value) in api_metadata {
form.add_text(key, value);
}
form.add_stream("content", &wheel, Some(wheel_name), None);
let multipart_data = form.prepare().map_err(|e| e.error)?;
let encoded = STANDARD.encode(format!("{}:{}", registry.username, registry.password));
let agent = http_agent()?;
let response = agent
.post(registry.url.as_str())
.set(
"Content-Type",
&format!(
"multipart/form-data; boundary={}",
multipart_data.boundary()
),
)
.set(
"User-Agent",
&format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
)
.set("Authorization", &format!("Basic {encoded}"))
.send(multipart_data);
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(status, response)) => {
let err_text = response.into_string().unwrap_or_else(|e| {
format!(
"The registry should return some text, \
even in case of an error, but didn't ({e})"
)
});
debug!("Upload error response: {}", err_text);
// Detect FileExistsError the way twine does
// https://github.com/pypa/twine/blob/87846e5777b380d4704704a69e1f9a7a1231451c/twine/commands/upload.py#L30
if status == 403 {
if err_text.contains("overwrite artifact") {
// Artifactory (https://jfrog.com/artifactory/)
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::AuthenticationError(err_text))
}
} else {
let status_string = status.to_string();
if status == 409 // conflict, pypiserver (https://pypi.org/project/pypiserver)
// PyPI / TestPyPI
|| (status == 400 && err_text.contains("already exists"))
// Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
|| (status == 400 && err_text.contains("updating asset"))
// # Gitlab Enterprise Edition (https://about.gitlab.com)
|| (status == 400 && err_text.contains("already been taken"))
{
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::StatusCodeError(status_string, err_text))
}
}
}
Err(err) => Err(UploadError::UreqError(err.into())),
}
}
/// Handles authentication/keyring integration and retrying of the publish subcommand
pub fn upload_ui(items: &[PathBuf], publish: &PublishOpt) -> Result<()> {
let registry = complete_registry(publish)?;
eprintln!("🚀 Uploading {} packages", items.len());
for i in items {
let upload_result = upload(®istry, i);
match upload_result {
Ok(()) => (),
Err(UploadError::AuthenticationError(msg)) => {
let title_re = regex::Regex::new(r"<title>(.+?)</title>").unwrap();
let title = title_re
.captures(&msg)
.and_then(|c| c.get(1))
.map(|m| m.as_str());
match title {
Some(title) => {
eprintln!("⛔ {title}");
}
None => eprintln!("⛔ Username and/or password are wrong"),
}
#[cfg(feature = "keyring")]
{
// Delete the wrong password from the keyring
let old_username = registry.username;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &old_username)
.and_then(|keyring| keyring.delete_password())
{
Ok(()) => {
eprintln!("🔑 Removed wrong password from keyring")
}
Err(keyring::Error::NoEntry)
| Err(keyring::Error::NoStorageAccess(_))
| Err(keyring::Error::PlatformFailure(_)) => {}
Err(err) => {
eprintln!("⚠️ Warning: Failed to remove password from keyring: {err}")
}
}
}
bail!("Username and/or password are possibly wrong");
}
Err(err) => {
let filename = i.file_name().unwrap_or(i.as_os_str());
if let UploadError::FileExistsError(_) = err {
if publish.skip_existing {
eprintln!(
"⚠️ Note: Skipping {filename:?} because it appears to already exist"
);
continue;
}
}
let filesize = fs::metadata(i)
.map(|x| ByteSize(x.len()).to_string())
.unwrap_or_else(|e| format!("Failed to get the filesize of {:?}: {}", &i, e));
return Err(err).context(format!("💥 Failed to upload {filename:?} ({filesize})"));
}
}
}
eprintln!("✨ Packages uploaded successfully");
#[cfg(feature = "keyring")]
{
// We know the password is correct, so we can save it in the keyring
let username = registry.username.clone();
let password = registry.password;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &username)
.and_then(|keyring| keyring.set_password(&password))
{
Ok(())
| Err(keyring::Error::NoStorageAccess(_))
| Err(keyring::Error::PlatformFailure(_)) => {}
Err(err) => {
eprintln!("⚠️ Warning: Failed to store the password in the keyring: {err:?}");
}
}
}
Ok(())
}
| ponse {
audience | identifier_name |
armature.py | from .animation import *
from .logger import *
from .package_level import *
import bpy
from math import radians
from mathutils import Vector, Matrix
DEFAULT_LIB_NAME = 'Same as filename'
#===============================================================================
class Bone:
def __init__(self, bpyBone, bpySkeleton, bonesSoFar, skipLogging = False):
self.index = len(bonesSoFar)
if not skipLogging:
Logger.log('processing begun of bone: ' + bpyBone.name + ', index: '+ str(self.index), 2)
self.name = bpyBone.name
self.length = bpyBone.length
self.posedBone = bpyBone # record so can be used by get_matrix, called by append_animation_pose
self.parentBone = bpyBone.parent
self.matrix_world = bpySkeleton.matrix_world
self.matrix = self.get_bone_matrix()
self.parentBoneIndex = Skeleton.get_bone(bpyBone.parent.name, bonesSoFar).index if bpyBone.parent else -1
#animation
if (bpySkeleton.animation_data):
self.animation = Animation(ANIMATIONTYPE_MATRIX, ANIMATIONLOOPMODE_CYCLE, 'anim', '_matrix')
self.previousBoneMatrix = None
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def append_animation_pose(self, frame, force = False):
currentBoneMatrix = self.get_bone_matrix()
if (force or not same_matrix4(currentBoneMatrix, self.previousBoneMatrix)):
self.animation.frames.append(frame)
self.animation.values.append(currentBoneMatrix)
self.previousBoneMatrix = currentBoneMatrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_rest_pose(self, editBone):
self.rest = Bone.get_matrix(editBone, self.matrix_world, True)
# used to calc skeleton restDimensions
self.restHead = editBone.head
self.restTail = editBone.tail
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_bone_matrix(self, doParentMult = True):
|
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def get_matrix(bpyBone, matrix_world, doParentMult):
SystemMatrix = Matrix.Scale(-1, 4, Vector((0, 0, 1))) * Matrix.Rotation(radians(-90), 4, 'X')
if (bpyBone.parent and doParentMult):
return (SystemMatrix * matrix_world * bpyBone.parent.matrix).inverted() * (SystemMatrix * matrix_world * bpyBone.matrix)
else:
return SystemMatrix * matrix_world * bpyBone.matrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: skeleton, bone, animation
def to_script_file(self, file_handler, indent):
parentBone = 'skeleton.bones[' + format_int(self.parentBoneIndex) + ']' if self.parentBone else 'null'
file_handler.write(indent + 'bone = new QI.Bone("' + self.name + '", skeleton,' + parentBone + ', _M(' + format_matrix4(self.matrix) + ')' + ', _M(' + format_matrix4(self.rest) + '));\n')
file_handler.write(indent + 'bone.length = ' + format_f(self.length) + ';\n')
if hasattr(self, 'animation'):
self.animation.to_script_file(file_handler, indent) # declares and set the variable animation
file_handler.write(indent + 'bone.animations.push(animation);\n\n')
#===============================================================================
class Skeleton:
# skipAnimations argument only used when exporting QI.SkeletonPoseLibrary
def __init__(self, bpySkeleton, scene, id, ignoreIKBones, skipAnimations = False):
if not skipAnimations:
Logger.log('processing begun of skeleton: ' + bpySkeleton.name + ', id: '+ str(id))
self.name = bpySkeleton.name
self.id = id
self.bones = []
if bpySkeleton.data.LibraryWithScene:
self.libraryName = bpySkeleton.data.libraryName
self.bpySkeleton = bpySkeleton # needed for call to build library
for bone in bpySkeleton.pose.bones:
if ignoreIKBones and Skeleton.isIkName(bone.name):
if not skipAnimations: Logger.log('Ignoring IK bone: ' + bone.name, 2)
continue
self.bones.append(Bone(bone, bpySkeleton, self.bones, skipAnimations))
if (bpySkeleton.animation_data and not skipAnimations):
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(bpySkeleton, action, FRAME_BASED_ANIMATION, frameOffset)
if animationRange is None:
continue
Logger.log('processing action ' + animationRange.to_string(), 2)
self.ranges.append(animationRange)
nFrames = len(animationRange.frames_in)
for idx in range(nFrames):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
firstOrLast = idx == 0 or idx == nFrames - 1
for bone in self.bones:
bone.append_animation_pose(animationRange.frames_out[idx], firstOrLast)
frameOffset = animationRange.frame_end
# mode_set's only work when there is an active object, switch bones to edit mode to rest position
scene.objects.active = bpySkeleton
bpy.ops.object.mode_set(mode='EDIT')
# you need to access edit_bones from skeleton.data not skeleton.pose when in edit mode
for editBone in bpySkeleton.data.edit_bones:
for myBoneObj in self.bones:
if editBone.name == myBoneObj.name:
myBoneObj.set_rest_pose(editBone)
break
self.dimensions = self.getDimensions()
bpy.ops.object.mode_set(mode='OBJECT')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# do not use .dimensions from blender, it might be including IK bones
def getDimensions(self):
highest = Vector((-10000, -10000, -10000))
lowest = Vector(( 10000, 10000, 10000))
for bone in self.bones:
if highest.x < bone.restHead.x: highest.x = bone.restHead.x
if highest.y < bone.restHead.y: highest.y = bone.restHead.y
if highest.z < bone.restHead.z: highest.z = bone.restHead.z
if highest.x < bone.restTail.x: highest.x = bone.restTail.x
if highest.y < bone.restTail.y: highest.y = bone.restTail.y
if highest.z < bone.restTail.z: highest.z = bone.restTail.z
if lowest .x > bone.restHead.x: lowest .x = bone.restHead.x
if lowest .y > bone.restHead.y: lowest .y = bone.restHead.y
if lowest .z > bone.restHead.z: lowest .z = bone.restHead.z
if lowest .x > bone.restTail.x: lowest .x = bone.restTail.x
if lowest .y > bone.restTail.y: lowest .y = bone.restTail.y
if lowest .z > bone.restTail.z: lowest .z = bone.restTail.z
return Vector((highest.x - lowest.x, highest.y - lowest.y, highest.z - lowest.z))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter; assume skeletion is the active object
def getPose(self, idx):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
bpy.ops.poselib.apply_pose(pose_index = idx)
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getRestAsPose(self):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getBoneLengths(self):
ret = []
for bone in self.bones:
ret.append([bone.name, bone.length])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def isIkName(boneName):
return '.ik' in boneName.lower() or 'ik.' in boneName.lower()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Since IK bones could be being skipped, looking up index of bone in second pass of mesh required
def get_index_of_bone(self, boneName):
return Skeleton.get_bone(boneName, self.bones).index
@staticmethod
def get_bone(boneName, bones):
for bone in bones:
if boneName == bone.name:
return bone
# should not happen, but if it does clearly a bug, so terminate
raise Exception('bone name "' + boneName + '" not found in skeleton')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: scene, skeleton, bone, animation
def to_script_file(self, file_handler, indent, logInBrowserConsole):
# specifying scene gets skeleton added to scene in constructor
if logInBrowserConsole: file_handler.write(indent + "_B.Tools.Log('defining skeleton: " + self.name + "');\n")
file_handler.write(indent + 'skeleton = new QI.Skeleton("' + self.name + '", "' + format_int(self.id) + '", scene);\n') # MUST be String for inline
file_handler.write(indent + 'skeleton.dimensionsAtRest = new _V(' + format_vector(self.dimensions) + ');\n')
for bone in self.bones:
bone.to_script_file(file_handler, indent)
if hasattr(self, 'libraryName'):
file_handler.write(indent +'skeleton.assignPoseLibrary("' + self.libraryName + '");\n')
if hasattr(self, 'ranges'):
for range in self.ranges:
range.to_script_file(file_handler, indent, 'skeleton')
#===============================================================================
# determine all the meshes which are controlled by skeleton, called also by pose_lib
def getMeshesForRig(scene, skeleton, prepForShapekeys = False):
meshes = []
for object in [object for object in scene.objects]:
if object.type == 'MESH' and len(object.vertex_groups) > 0 and skeleton == object.find_armature():
meshes.append(object)
print('meshes with armature: ' + object.name)
# ensure that there is a Basis key
if prepForShapekeys and not object.data.shape_keys:
object.shape_key_add('Basis')
return meshes
#===============================================================================
bpy.types.Armature.libraryName = bpy.props.StringProperty(
name='Library name',
description='Allow the same library in JS to be multiple Blender libraries.',
default = DEFAULT_LIB_NAME
)
bpy.types.Armature.allSkelLibraries = bpy.props.BoolProperty(
name='Include all Blender Pose Libraries',
description='',
default = True
)
bpy.types.Armature.LibraryWithScene = bpy.props.BoolProperty(
name='Include with scene export',
description='',
default = False
)
bpy.types.Armature.shapeKeyName = bpy.props.StringProperty(
name='As Key',
description='Name of the key to use in meshes controlled',
default = 'key'
)
#===============================================================================
class SkeletonPanel(bpy.types.Panel):
bl_label = get_title()
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
@classmethod
def poll(cls, context):
ob = context.object
return ob is not None and isinstance(ob.data, bpy.types.Armature)
def draw(self, context):
ob = context.object
layout = self.layout
box1 = layout.box()
box1.label(text='Export to QI Pose Library:')
box1.prop(ob.data, 'libraryName')
box1.prop(ob.data, 'allSkelLibraries')
box1.prop(ob.data, 'LibraryWithScene')
box1.operator('tob.exportposes')
box2 = layout.box()
box2.label(text='Shape Keys:')
box2.operator('tob.poselibtoshapekeys')
inner_box = box2.box()
inner_box.label(text='Current pose as a shape key')
inner_box.prop(ob.data, 'shapeKeyName')
inner_box.operator('tob.posetoshapekey')
| return Bone.get_matrix(self.posedBone, self.matrix_world, doParentMult) | identifier_body |
armature.py | from .animation import *
from .logger import *
from .package_level import *
import bpy
from math import radians
from mathutils import Vector, Matrix
DEFAULT_LIB_NAME = 'Same as filename'
#===============================================================================
class Bone:
def __init__(self, bpyBone, bpySkeleton, bonesSoFar, skipLogging = False):
self.index = len(bonesSoFar)
if not skipLogging:
Logger.log('processing begun of bone: ' + bpyBone.name + ', index: '+ str(self.index), 2)
self.name = bpyBone.name
self.length = bpyBone.length
self.posedBone = bpyBone # record so can be used by get_matrix, called by append_animation_pose
self.parentBone = bpyBone.parent
self.matrix_world = bpySkeleton.matrix_world
self.matrix = self.get_bone_matrix()
self.parentBoneIndex = Skeleton.get_bone(bpyBone.parent.name, bonesSoFar).index if bpyBone.parent else -1
#animation
if (bpySkeleton.animation_data):
|
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def append_animation_pose(self, frame, force = False):
currentBoneMatrix = self.get_bone_matrix()
if (force or not same_matrix4(currentBoneMatrix, self.previousBoneMatrix)):
self.animation.frames.append(frame)
self.animation.values.append(currentBoneMatrix)
self.previousBoneMatrix = currentBoneMatrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_rest_pose(self, editBone):
self.rest = Bone.get_matrix(editBone, self.matrix_world, True)
# used to calc skeleton restDimensions
self.restHead = editBone.head
self.restTail = editBone.tail
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_bone_matrix(self, doParentMult = True):
return Bone.get_matrix(self.posedBone, self.matrix_world, doParentMult)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def get_matrix(bpyBone, matrix_world, doParentMult):
SystemMatrix = Matrix.Scale(-1, 4, Vector((0, 0, 1))) * Matrix.Rotation(radians(-90), 4, 'X')
if (bpyBone.parent and doParentMult):
return (SystemMatrix * matrix_world * bpyBone.parent.matrix).inverted() * (SystemMatrix * matrix_world * bpyBone.matrix)
else:
return SystemMatrix * matrix_world * bpyBone.matrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: skeleton, bone, animation
def to_script_file(self, file_handler, indent):
parentBone = 'skeleton.bones[' + format_int(self.parentBoneIndex) + ']' if self.parentBone else 'null'
file_handler.write(indent + 'bone = new QI.Bone("' + self.name + '", skeleton,' + parentBone + ', _M(' + format_matrix4(self.matrix) + ')' + ', _M(' + format_matrix4(self.rest) + '));\n')
file_handler.write(indent + 'bone.length = ' + format_f(self.length) + ';\n')
if hasattr(self, 'animation'):
self.animation.to_script_file(file_handler, indent) # declares and set the variable animation
file_handler.write(indent + 'bone.animations.push(animation);\n\n')
#===============================================================================
class Skeleton:
# skipAnimations argument only used when exporting QI.SkeletonPoseLibrary
def __init__(self, bpySkeleton, scene, id, ignoreIKBones, skipAnimations = False):
if not skipAnimations:
Logger.log('processing begun of skeleton: ' + bpySkeleton.name + ', id: '+ str(id))
self.name = bpySkeleton.name
self.id = id
self.bones = []
if bpySkeleton.data.LibraryWithScene:
self.libraryName = bpySkeleton.data.libraryName
self.bpySkeleton = bpySkeleton # needed for call to build library
for bone in bpySkeleton.pose.bones:
if ignoreIKBones and Skeleton.isIkName(bone.name):
if not skipAnimations: Logger.log('Ignoring IK bone: ' + bone.name, 2)
continue
self.bones.append(Bone(bone, bpySkeleton, self.bones, skipAnimations))
if (bpySkeleton.animation_data and not skipAnimations):
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(bpySkeleton, action, FRAME_BASED_ANIMATION, frameOffset)
if animationRange is None:
continue
Logger.log('processing action ' + animationRange.to_string(), 2)
self.ranges.append(animationRange)
nFrames = len(animationRange.frames_in)
for idx in range(nFrames):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
firstOrLast = idx == 0 or idx == nFrames - 1
for bone in self.bones:
bone.append_animation_pose(animationRange.frames_out[idx], firstOrLast)
frameOffset = animationRange.frame_end
# mode_set's only work when there is an active object, switch bones to edit mode to rest position
scene.objects.active = bpySkeleton
bpy.ops.object.mode_set(mode='EDIT')
# you need to access edit_bones from skeleton.data not skeleton.pose when in edit mode
for editBone in bpySkeleton.data.edit_bones:
for myBoneObj in self.bones:
if editBone.name == myBoneObj.name:
myBoneObj.set_rest_pose(editBone)
break
self.dimensions = self.getDimensions()
bpy.ops.object.mode_set(mode='OBJECT')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# do not use .dimensions from blender, it might be including IK bones
def getDimensions(self):
highest = Vector((-10000, -10000, -10000))
lowest = Vector(( 10000, 10000, 10000))
for bone in self.bones:
if highest.x < bone.restHead.x: highest.x = bone.restHead.x
if highest.y < bone.restHead.y: highest.y = bone.restHead.y
if highest.z < bone.restHead.z: highest.z = bone.restHead.z
if highest.x < bone.restTail.x: highest.x = bone.restTail.x
if highest.y < bone.restTail.y: highest.y = bone.restTail.y
if highest.z < bone.restTail.z: highest.z = bone.restTail.z
if lowest .x > bone.restHead.x: lowest .x = bone.restHead.x
if lowest .y > bone.restHead.y: lowest .y = bone.restHead.y
if lowest .z > bone.restHead.z: lowest .z = bone.restHead.z
if lowest .x > bone.restTail.x: lowest .x = bone.restTail.x
if lowest .y > bone.restTail.y: lowest .y = bone.restTail.y
if lowest .z > bone.restTail.z: lowest .z = bone.restTail.z
return Vector((highest.x - lowest.x, highest.y - lowest.y, highest.z - lowest.z))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter; assume skeletion is the active object
def getPose(self, idx):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
bpy.ops.poselib.apply_pose(pose_index = idx)
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getRestAsPose(self):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getBoneLengths(self):
ret = []
for bone in self.bones:
ret.append([bone.name, bone.length])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def isIkName(boneName):
return '.ik' in boneName.lower() or 'ik.' in boneName.lower()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Since IK bones could be being skipped, looking up index of bone in second pass of mesh required
def get_index_of_bone(self, boneName):
return Skeleton.get_bone(boneName, self.bones).index
@staticmethod
def get_bone(boneName, bones):
for bone in bones:
if boneName == bone.name:
return bone
# should not happen, but if it does clearly a bug, so terminate
raise Exception('bone name "' + boneName + '" not found in skeleton')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: scene, skeleton, bone, animation
def to_script_file(self, file_handler, indent, logInBrowserConsole):
# specifying scene gets skeleton added to scene in constructor
if logInBrowserConsole: file_handler.write(indent + "_B.Tools.Log('defining skeleton: " + self.name + "');\n")
file_handler.write(indent + 'skeleton = new QI.Skeleton("' + self.name + '", "' + format_int(self.id) + '", scene);\n') # MUST be String for inline
file_handler.write(indent + 'skeleton.dimensionsAtRest = new _V(' + format_vector(self.dimensions) + ');\n')
for bone in self.bones:
bone.to_script_file(file_handler, indent)
if hasattr(self, 'libraryName'):
file_handler.write(indent +'skeleton.assignPoseLibrary("' + self.libraryName + '");\n')
if hasattr(self, 'ranges'):
for range in self.ranges:
range.to_script_file(file_handler, indent, 'skeleton')
#===============================================================================
# determine all the meshes which are controlled by skeleton, called also by pose_lib
def getMeshesForRig(scene, skeleton, prepForShapekeys = False):
meshes = []
for object in [object for object in scene.objects]:
if object.type == 'MESH' and len(object.vertex_groups) > 0 and skeleton == object.find_armature():
meshes.append(object)
print('meshes with armature: ' + object.name)
# ensure that there is a Basis key
if prepForShapekeys and not object.data.shape_keys:
object.shape_key_add('Basis')
return meshes
#===============================================================================
bpy.types.Armature.libraryName = bpy.props.StringProperty(
name='Library name',
description='Allow the same library in JS to be multiple Blender libraries.',
default = DEFAULT_LIB_NAME
)
bpy.types.Armature.allSkelLibraries = bpy.props.BoolProperty(
name='Include all Blender Pose Libraries',
description='',
default = True
)
bpy.types.Armature.LibraryWithScene = bpy.props.BoolProperty(
name='Include with scene export',
description='',
default = False
)
bpy.types.Armature.shapeKeyName = bpy.props.StringProperty(
name='As Key',
description='Name of the key to use in meshes controlled',
default = 'key'
)
#===============================================================================
class SkeletonPanel(bpy.types.Panel):
bl_label = get_title()
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
@classmethod
def poll(cls, context):
ob = context.object
return ob is not None and isinstance(ob.data, bpy.types.Armature)
def draw(self, context):
ob = context.object
layout = self.layout
box1 = layout.box()
box1.label(text='Export to QI Pose Library:')
box1.prop(ob.data, 'libraryName')
box1.prop(ob.data, 'allSkelLibraries')
box1.prop(ob.data, 'LibraryWithScene')
box1.operator('tob.exportposes')
box2 = layout.box()
box2.label(text='Shape Keys:')
box2.operator('tob.poselibtoshapekeys')
inner_box = box2.box()
inner_box.label(text='Current pose as a shape key')
inner_box.prop(ob.data, 'shapeKeyName')
inner_box.operator('tob.posetoshapekey')
| self.animation = Animation(ANIMATIONTYPE_MATRIX, ANIMATIONLOOPMODE_CYCLE, 'anim', '_matrix')
self.previousBoneMatrix = None | conditional_block |
armature.py | from .animation import *
from .logger import *
from .package_level import *
import bpy
from math import radians
from mathutils import Vector, Matrix
DEFAULT_LIB_NAME = 'Same as filename'
#===============================================================================
class Bone:
def __init__(self, bpyBone, bpySkeleton, bonesSoFar, skipLogging = False):
self.index = len(bonesSoFar)
if not skipLogging:
Logger.log('processing begun of bone: ' + bpyBone.name + ', index: '+ str(self.index), 2)
self.name = bpyBone.name
self.length = bpyBone.length
self.posedBone = bpyBone # record so can be used by get_matrix, called by append_animation_pose
self.parentBone = bpyBone.parent
self.matrix_world = bpySkeleton.matrix_world
self.matrix = self.get_bone_matrix()
self.parentBoneIndex = Skeleton.get_bone(bpyBone.parent.name, bonesSoFar).index if bpyBone.parent else -1
#animation
if (bpySkeleton.animation_data):
self.animation = Animation(ANIMATIONTYPE_MATRIX, ANIMATIONLOOPMODE_CYCLE, 'anim', '_matrix')
self.previousBoneMatrix = None
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def append_animation_pose(self, frame, force = False):
currentBoneMatrix = self.get_bone_matrix()
if (force or not same_matrix4(currentBoneMatrix, self.previousBoneMatrix)):
self.animation.frames.append(frame)
self.animation.values.append(currentBoneMatrix)
self.previousBoneMatrix = currentBoneMatrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_rest_pose(self, editBone):
self.rest = Bone.get_matrix(editBone, self.matrix_world, True)
# used to calc skeleton restDimensions
self.restHead = editBone.head
self.restTail = editBone.tail
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_bone_matrix(self, doParentMult = True):
return Bone.get_matrix(self.posedBone, self.matrix_world, doParentMult)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def | (bpyBone, matrix_world, doParentMult):
SystemMatrix = Matrix.Scale(-1, 4, Vector((0, 0, 1))) * Matrix.Rotation(radians(-90), 4, 'X')
if (bpyBone.parent and doParentMult):
return (SystemMatrix * matrix_world * bpyBone.parent.matrix).inverted() * (SystemMatrix * matrix_world * bpyBone.matrix)
else:
return SystemMatrix * matrix_world * bpyBone.matrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: skeleton, bone, animation
def to_script_file(self, file_handler, indent):
parentBone = 'skeleton.bones[' + format_int(self.parentBoneIndex) + ']' if self.parentBone else 'null'
file_handler.write(indent + 'bone = new QI.Bone("' + self.name + '", skeleton,' + parentBone + ', _M(' + format_matrix4(self.matrix) + ')' + ', _M(' + format_matrix4(self.rest) + '));\n')
file_handler.write(indent + 'bone.length = ' + format_f(self.length) + ';\n')
if hasattr(self, 'animation'):
self.animation.to_script_file(file_handler, indent) # declares and set the variable animation
file_handler.write(indent + 'bone.animations.push(animation);\n\n')
#===============================================================================
class Skeleton:
# skipAnimations argument only used when exporting QI.SkeletonPoseLibrary
def __init__(self, bpySkeleton, scene, id, ignoreIKBones, skipAnimations = False):
if not skipAnimations:
Logger.log('processing begun of skeleton: ' + bpySkeleton.name + ', id: '+ str(id))
self.name = bpySkeleton.name
self.id = id
self.bones = []
if bpySkeleton.data.LibraryWithScene:
self.libraryName = bpySkeleton.data.libraryName
self.bpySkeleton = bpySkeleton # needed for call to build library
for bone in bpySkeleton.pose.bones:
if ignoreIKBones and Skeleton.isIkName(bone.name):
if not skipAnimations: Logger.log('Ignoring IK bone: ' + bone.name, 2)
continue
self.bones.append(Bone(bone, bpySkeleton, self.bones, skipAnimations))
if (bpySkeleton.animation_data and not skipAnimations):
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(bpySkeleton, action, FRAME_BASED_ANIMATION, frameOffset)
if animationRange is None:
continue
Logger.log('processing action ' + animationRange.to_string(), 2)
self.ranges.append(animationRange)
nFrames = len(animationRange.frames_in)
for idx in range(nFrames):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
firstOrLast = idx == 0 or idx == nFrames - 1
for bone in self.bones:
bone.append_animation_pose(animationRange.frames_out[idx], firstOrLast)
frameOffset = animationRange.frame_end
# mode_set's only work when there is an active object, switch bones to edit mode to rest position
scene.objects.active = bpySkeleton
bpy.ops.object.mode_set(mode='EDIT')
# you need to access edit_bones from skeleton.data not skeleton.pose when in edit mode
for editBone in bpySkeleton.data.edit_bones:
for myBoneObj in self.bones:
if editBone.name == myBoneObj.name:
myBoneObj.set_rest_pose(editBone)
break
self.dimensions = self.getDimensions()
bpy.ops.object.mode_set(mode='OBJECT')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# do not use .dimensions from blender, it might be including IK bones
def getDimensions(self):
highest = Vector((-10000, -10000, -10000))
lowest = Vector(( 10000, 10000, 10000))
for bone in self.bones:
if highest.x < bone.restHead.x: highest.x = bone.restHead.x
if highest.y < bone.restHead.y: highest.y = bone.restHead.y
if highest.z < bone.restHead.z: highest.z = bone.restHead.z
if highest.x < bone.restTail.x: highest.x = bone.restTail.x
if highest.y < bone.restTail.y: highest.y = bone.restTail.y
if highest.z < bone.restTail.z: highest.z = bone.restTail.z
if lowest .x > bone.restHead.x: lowest .x = bone.restHead.x
if lowest .y > bone.restHead.y: lowest .y = bone.restHead.y
if lowest .z > bone.restHead.z: lowest .z = bone.restHead.z
if lowest .x > bone.restTail.x: lowest .x = bone.restTail.x
if lowest .y > bone.restTail.y: lowest .y = bone.restTail.y
if lowest .z > bone.restTail.z: lowest .z = bone.restTail.z
return Vector((highest.x - lowest.x, highest.y - lowest.y, highest.z - lowest.z))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter; assume skeletion is the active object
def getPose(self, idx):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
bpy.ops.poselib.apply_pose(pose_index = idx)
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getRestAsPose(self):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getBoneLengths(self):
ret = []
for bone in self.bones:
ret.append([bone.name, bone.length])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def isIkName(boneName):
return '.ik' in boneName.lower() or 'ik.' in boneName.lower()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Since IK bones could be being skipped, looking up index of bone in second pass of mesh required
def get_index_of_bone(self, boneName):
return Skeleton.get_bone(boneName, self.bones).index
@staticmethod
def get_bone(boneName, bones):
for bone in bones:
if boneName == bone.name:
return bone
# should not happen, but if it does clearly a bug, so terminate
raise Exception('bone name "' + boneName + '" not found in skeleton')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: scene, skeleton, bone, animation
def to_script_file(self, file_handler, indent, logInBrowserConsole):
# specifying scene gets skeleton added to scene in constructor
if logInBrowserConsole: file_handler.write(indent + "_B.Tools.Log('defining skeleton: " + self.name + "');\n")
file_handler.write(indent + 'skeleton = new QI.Skeleton("' + self.name + '", "' + format_int(self.id) + '", scene);\n') # MUST be String for inline
file_handler.write(indent + 'skeleton.dimensionsAtRest = new _V(' + format_vector(self.dimensions) + ');\n')
for bone in self.bones:
bone.to_script_file(file_handler, indent)
if hasattr(self, 'libraryName'):
file_handler.write(indent +'skeleton.assignPoseLibrary("' + self.libraryName + '");\n')
if hasattr(self, 'ranges'):
for range in self.ranges:
range.to_script_file(file_handler, indent, 'skeleton')
#===============================================================================
# determine all the meshes which are controlled by skeleton, called also by pose_lib
def getMeshesForRig(scene, skeleton, prepForShapekeys = False):
meshes = []
for object in [object for object in scene.objects]:
if object.type == 'MESH' and len(object.vertex_groups) > 0 and skeleton == object.find_armature():
meshes.append(object)
print('meshes with armature: ' + object.name)
# ensure that there is a Basis key
if prepForShapekeys and not object.data.shape_keys:
object.shape_key_add('Basis')
return meshes
#===============================================================================
bpy.types.Armature.libraryName = bpy.props.StringProperty(
name='Library name',
description='Allow the same library in JS to be multiple Blender libraries.',
default = DEFAULT_LIB_NAME
)
bpy.types.Armature.allSkelLibraries = bpy.props.BoolProperty(
name='Include all Blender Pose Libraries',
description='',
default = True
)
bpy.types.Armature.LibraryWithScene = bpy.props.BoolProperty(
name='Include with scene export',
description='',
default = False
)
bpy.types.Armature.shapeKeyName = bpy.props.StringProperty(
name='As Key',
description='Name of the key to use in meshes controlled',
default = 'key'
)
#===============================================================================
class SkeletonPanel(bpy.types.Panel):
bl_label = get_title()
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
@classmethod
def poll(cls, context):
ob = context.object
return ob is not None and isinstance(ob.data, bpy.types.Armature)
def draw(self, context):
ob = context.object
layout = self.layout
box1 = layout.box()
box1.label(text='Export to QI Pose Library:')
box1.prop(ob.data, 'libraryName')
box1.prop(ob.data, 'allSkelLibraries')
box1.prop(ob.data, 'LibraryWithScene')
box1.operator('tob.exportposes')
box2 = layout.box()
box2.label(text='Shape Keys:')
box2.operator('tob.poselibtoshapekeys')
inner_box = box2.box()
inner_box.label(text='Current pose as a shape key')
inner_box.prop(ob.data, 'shapeKeyName')
inner_box.operator('tob.posetoshapekey')
| get_matrix | identifier_name |
armature.py | from .animation import *
from .logger import *
from .package_level import *
import bpy
from math import radians
from mathutils import Vector, Matrix
DEFAULT_LIB_NAME = 'Same as filename'
#===============================================================================
class Bone:
def __init__(self, bpyBone, bpySkeleton, bonesSoFar, skipLogging = False):
self.index = len(bonesSoFar)
if not skipLogging:
Logger.log('processing begun of bone: ' + bpyBone.name + ', index: '+ str(self.index), 2)
self.name = bpyBone.name
self.length = bpyBone.length
self.posedBone = bpyBone # record so can be used by get_matrix, called by append_animation_pose
self.parentBone = bpyBone.parent
self.matrix_world = bpySkeleton.matrix_world
self.matrix = self.get_bone_matrix()
self.parentBoneIndex = Skeleton.get_bone(bpyBone.parent.name, bonesSoFar).index if bpyBone.parent else -1
#animation
if (bpySkeleton.animation_data):
self.animation = Animation(ANIMATIONTYPE_MATRIX, ANIMATIONLOOPMODE_CYCLE, 'anim', '_matrix')
self.previousBoneMatrix = None
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def append_animation_pose(self, frame, force = False):
currentBoneMatrix = self.get_bone_matrix()
if (force or not same_matrix4(currentBoneMatrix, self.previousBoneMatrix)):
self.animation.frames.append(frame)
self.animation.values.append(currentBoneMatrix)
self.previousBoneMatrix = currentBoneMatrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_rest_pose(self, editBone):
self.rest = Bone.get_matrix(editBone, self.matrix_world, True)
# used to calc skeleton restDimensions
self.restHead = editBone.head
self.restTail = editBone.tail
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_bone_matrix(self, doParentMult = True):
return Bone.get_matrix(self.posedBone, self.matrix_world, doParentMult)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def get_matrix(bpyBone, matrix_world, doParentMult):
SystemMatrix = Matrix.Scale(-1, 4, Vector((0, 0, 1))) * Matrix.Rotation(radians(-90), 4, 'X')
if (bpyBone.parent and doParentMult):
return (SystemMatrix * matrix_world * bpyBone.parent.matrix).inverted() * (SystemMatrix * matrix_world * bpyBone.matrix)
else:
return SystemMatrix * matrix_world * bpyBone.matrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: skeleton, bone, animation
def to_script_file(self, file_handler, indent):
parentBone = 'skeleton.bones[' + format_int(self.parentBoneIndex) + ']' if self.parentBone else 'null'
file_handler.write(indent + 'bone = new QI.Bone("' + self.name + '", skeleton,' + parentBone + ', _M(' + format_matrix4(self.matrix) + ')' + ', _M(' + format_matrix4(self.rest) + '));\n')
file_handler.write(indent + 'bone.length = ' + format_f(self.length) + ';\n')
if hasattr(self, 'animation'):
self.animation.to_script_file(file_handler, indent) # declares and set the variable animation
file_handler.write(indent + 'bone.animations.push(animation);\n\n')
#===============================================================================
class Skeleton:
# skipAnimations argument only used when exporting QI.SkeletonPoseLibrary
def __init__(self, bpySkeleton, scene, id, ignoreIKBones, skipAnimations = False):
if not skipAnimations:
Logger.log('processing begun of skeleton: ' + bpySkeleton.name + ', id: '+ str(id))
self.name = bpySkeleton.name
self.id = id
self.bones = []
if bpySkeleton.data.LibraryWithScene:
self.libraryName = bpySkeleton.data.libraryName
self.bpySkeleton = bpySkeleton # needed for call to build library
for bone in bpySkeleton.pose.bones:
if ignoreIKBones and Skeleton.isIkName(bone.name):
if not skipAnimations: Logger.log('Ignoring IK bone: ' + bone.name, 2)
continue
self.bones.append(Bone(bone, bpySkeleton, self.bones, skipAnimations))
if (bpySkeleton.animation_data and not skipAnimations):
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(bpySkeleton, action, FRAME_BASED_ANIMATION, frameOffset)
if animationRange is None:
continue
Logger.log('processing action ' + animationRange.to_string(), 2)
self.ranges.append(animationRange)
nFrames = len(animationRange.frames_in)
for idx in range(nFrames):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
firstOrLast = idx == 0 or idx == nFrames - 1
for bone in self.bones:
bone.append_animation_pose(animationRange.frames_out[idx], firstOrLast)
frameOffset = animationRange.frame_end
# mode_set's only work when there is an active object, switch bones to edit mode to rest position
scene.objects.active = bpySkeleton
bpy.ops.object.mode_set(mode='EDIT')
# you need to access edit_bones from skeleton.data not skeleton.pose when in edit mode
for editBone in bpySkeleton.data.edit_bones:
for myBoneObj in self.bones:
if editBone.name == myBoneObj.name:
myBoneObj.set_rest_pose(editBone)
break
self.dimensions = self.getDimensions()
bpy.ops.object.mode_set(mode='OBJECT')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# do not use .dimensions from blender, it might be including IK bones
def getDimensions(self):
highest = Vector((-10000, -10000, -10000))
lowest = Vector(( 10000, 10000, 10000))
for bone in self.bones:
if highest.x < bone.restHead.x: highest.x = bone.restHead.x
if highest.y < bone.restHead.y: highest.y = bone.restHead.y
if highest.z < bone.restHead.z: highest.z = bone.restHead.z
if highest.x < bone.restTail.x: highest.x = bone.restTail.x
if highest.y < bone.restTail.y: highest.y = bone.restTail.y
if highest.z < bone.restTail.z: highest.z = bone.restTail.z
if lowest .x > bone.restHead.x: lowest .x = bone.restHead.x
if lowest .y > bone.restHead.y: lowest .y = bone.restHead.y
if lowest .z > bone.restHead.z: lowest .z = bone.restHead.z
if lowest .x > bone.restTail.x: lowest .x = bone.restTail.x
if lowest .y > bone.restTail.y: lowest .y = bone.restTail.y
if lowest .z > bone.restTail.z: lowest .z = bone.restTail.z
return Vector((highest.x - lowest.x, highest.y - lowest.y, highest.z - lowest.z))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter; assume skeletion is the active object
def getPose(self, idx):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
bpy.ops.poselib.apply_pose(pose_index = idx)
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getRestAsPose(self):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getBoneLengths(self):
ret = []
for bone in self.bones:
ret.append([bone.name, bone.length])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def isIkName(boneName):
return '.ik' in boneName.lower() or 'ik.' in boneName.lower()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Since IK bones could be being skipped, looking up index of bone in second pass of mesh required
def get_index_of_bone(self, boneName):
return Skeleton.get_bone(boneName, self.bones).index
@staticmethod
def get_bone(boneName, bones):
for bone in bones:
if boneName == bone.name:
return bone
# should not happen, but if it does clearly a bug, so terminate
raise Exception('bone name "' + boneName + '" not found in skeleton')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: scene, skeleton, bone, animation
def to_script_file(self, file_handler, indent, logInBrowserConsole):
# specifying scene gets skeleton added to scene in constructor
if logInBrowserConsole: file_handler.write(indent + "_B.Tools.Log('defining skeleton: " + self.name + "');\n")
file_handler.write(indent + 'skeleton = new QI.Skeleton("' + self.name + '", "' + format_int(self.id) + '", scene);\n') # MUST be String for inline
file_handler.write(indent + 'skeleton.dimensionsAtRest = new _V(' + format_vector(self.dimensions) + ');\n')
for bone in self.bones:
bone.to_script_file(file_handler, indent)
if hasattr(self, 'libraryName'):
file_handler.write(indent +'skeleton.assignPoseLibrary("' + self.libraryName + '");\n')
if hasattr(self, 'ranges'):
for range in self.ranges:
range.to_script_file(file_handler, indent, 'skeleton')
#===============================================================================
# determine all the meshes which are controlled by skeleton, called also by pose_lib | def getMeshesForRig(scene, skeleton, prepForShapekeys = False):
meshes = []
for object in [object for object in scene.objects]:
if object.type == 'MESH' and len(object.vertex_groups) > 0 and skeleton == object.find_armature():
meshes.append(object)
print('meshes with armature: ' + object.name)
# ensure that there is a Basis key
if prepForShapekeys and not object.data.shape_keys:
object.shape_key_add('Basis')
return meshes
#===============================================================================
bpy.types.Armature.libraryName = bpy.props.StringProperty(
name='Library name',
description='Allow the same library in JS to be multiple Blender libraries.',
default = DEFAULT_LIB_NAME
)
bpy.types.Armature.allSkelLibraries = bpy.props.BoolProperty(
name='Include all Blender Pose Libraries',
description='',
default = True
)
bpy.types.Armature.LibraryWithScene = bpy.props.BoolProperty(
name='Include with scene export',
description='',
default = False
)
bpy.types.Armature.shapeKeyName = bpy.props.StringProperty(
name='As Key',
description='Name of the key to use in meshes controlled',
default = 'key'
)
#===============================================================================
class SkeletonPanel(bpy.types.Panel):
bl_label = get_title()
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
@classmethod
def poll(cls, context):
ob = context.object
return ob is not None and isinstance(ob.data, bpy.types.Armature)
def draw(self, context):
ob = context.object
layout = self.layout
box1 = layout.box()
box1.label(text='Export to QI Pose Library:')
box1.prop(ob.data, 'libraryName')
box1.prop(ob.data, 'allSkelLibraries')
box1.prop(ob.data, 'LibraryWithScene')
box1.operator('tob.exportposes')
box2 = layout.box()
box2.label(text='Shape Keys:')
box2.operator('tob.poselibtoshapekeys')
inner_box = box2.box()
inner_box.label(text='Current pose as a shape key')
inner_box.prop(ob.data, 'shapeKeyName')
inner_box.operator('tob.posetoshapekey') | random_line_split | |
poller.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# jan 2014 bbb garden shield attempt
# AKA
'''
Sensors:
analog level sensor, pin AIN0
TMP102 i2c temperature sensor, address 0x48
(if add0 is grounded) or 0x49 (if pulled up)
Outputs:
Analog RGB LED strip
I2C display(?)
Pump Activate/Deactivate (GPIO pin)
Some measurements as of mid-March 2014:
Tank can be pumped for 15 minutes without sun exposure to liquid.
Seems like after 10 minutes of pumping, the autosiphon engages, though.
Tank takes about 17 minutes to drain from a 15-minute pump
11 gals in reservoir reads as 0.42 on the adc.read scale from 0 to 1
8 gals in reservoir reads as 0.175 on the adc.read scale from 0 to 1
7 gals in reservoir reads as 0.15 on the adc.read scale from 0 to 1
'''
from __future__ import division
import Adafruit_SSD1306 as ssd
import Adafruit_BBIO.UART as uart
import Image
import ImageDraw
import ImageFont
# import Adafruit_GPIO.PWM as pwm
import Adafruit_BBIO.GPIO as gpio
import Adafruit_BBIO.ADC as adc
# import TMP102 as tmp102
import datetime
from dateutil.tz import tzlocal
import time
import serial
import atexit
from math import log
import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
| # temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def do_state_display():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str(readings['bedTemp'])[:4], font=font, fill=255)
# Draw an ellipse.
# draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a rectangle.
# draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a triangle.
# draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
# x += shape_width+padding
# Draw an X.
# draw.line((x, bottom, x+shape_width, top), fill=255)
# draw.line((x, top, x+shape_width, bottom), fill=255)
# x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# so, what will state display be?
# I2C display of tank temp?
def do_pump_toggle():
print 'pump actuate'
'''
this should actually work like:
if currentMinute mod PUMP_DURATION < PUMP_INTERVAL:
activate pump
else:
turn off pump
'''
if (datetime.datetime.today().hour>6 and datetime.datetime.today().hour<23):
print 'within actuating timeframe'
# changed this to just pump for the first PUMP_DURATION minutes every hour
if(datetime.datetime.today().minute <= PUMP_DURATION):
print 'we are in the first %s minutes of the hour, so pump should be on.' % PUMP_DURATION
gpio.output(pumpPin,gpio.HIGH)
else:
print 'shutting off pump at %s' % datetime.datetime.today().minute
gpio.output(pumpPin,gpio.LOW)
else:
print 'it is the actuator quiet period, between 11pm and 6am'
gpio.output(pumpPin,gpio.LOW)
print 'starting sampling at'
print datetime.datetime.now(tzlocal())
logging.basicConfig(filename='example.log',level=logging.DEBUG)
# adc.setup(thermistor1)
# adc.setup(thermistor2)
# adc.setup(photoPin)
adc.setup()
# uart.setup('UART2')
# print 'uart setup'
gpio.setup(pumpPin,gpio.OUT)
# t = tmp102.TMP102()
disp = ssd.SSD1306_128_64(rst=RST,i2c_address=0x3D)
disp.begin()
disp.clear()
disp.display()
# NOTE
# There is currently a bug in the ADC driver.
# You'll need to read the values twice
# in order to get the latest value.
# pwm.start(greenPin, 10.0, 2000.0)
# pwm.start(redPin, 10.0, 2000.0)
# pwm.start(bluePin, 10.0, 2000.0)
atexit.register(exit_handler)
while True:
try:
do_sensor_read()
except Exception, e:
print e
print 'sensor_read error!'
try:
do_db_update()
except Exception, e:
print e
print 'do_db_update error!'
try:
do_state_display()
# pass
except Exception, e:
print e
print 'do_state_display error!'
try:
do_pump_toggle()
except Exception, e:
print e
print 'do_pump_toggle error!'
print 'done with cycle, now waiting %s' % datetime.datetime.today()
time.sleep(interval) | random_line_split | |
poller.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# jan 2014 bbb garden shield attempt
# AKA
'''
Sensors:
analog level sensor, pin AIN0
TMP102 i2c temperature sensor, address 0x48
(if add0 is grounded) or 0x49 (if pulled up)
Outputs:
Analog RGB LED strip
I2C display(?)
Pump Activate/Deactivate (GPIO pin)
Some measurements as of mid-March 2014:
Tank can be pumped for 15 minutes without sun exposure to liquid.
Seems like after 10 minutes of pumping, the autosiphon engages, though.
Tank takes about 17 minutes to drain from a 15-minute pump
11 gals in reservoir reads as 0.42 on the adc.read scale from 0 to 1
8 gals in reservoir reads as 0.175 on the adc.read scale from 0 to 1
7 gals in reservoir reads as 0.15 on the adc.read scale from 0 to 1
'''
from __future__ import division
import Adafruit_SSD1306 as ssd
import Adafruit_BBIO.UART as uart
import Image
import ImageDraw
import ImageFont
# import Adafruit_GPIO.PWM as pwm
import Adafruit_BBIO.GPIO as gpio
import Adafruit_BBIO.ADC as adc
# import TMP102 as tmp102
import datetime
from dateutil.tz import tzlocal
import time
import serial
import atexit
from math import log
import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
# temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def do_state_display():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str(readings['bedTemp'])[:4], font=font, fill=255)
# Draw an ellipse.
# draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a rectangle.
# draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a triangle.
# draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
# x += shape_width+padding
# Draw an X.
# draw.line((x, bottom, x+shape_width, top), fill=255)
# draw.line((x, top, x+shape_width, bottom), fill=255)
# x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# so, what will state display be?
# I2C display of tank temp?
def do_pump_toggle():
print 'pump actuate'
'''
this should actually work like:
if currentMinute mod PUMP_DURATION < PUMP_INTERVAL:
activate pump
else:
turn off pump
'''
if (datetime.datetime.today().hour>6 and datetime.datetime.today().hour<23):
print 'within actuating timeframe'
# changed this to just pump for the first PUMP_DURATION minutes every hour
if(datetime.datetime.today().minute <= PUMP_DURATION):
|
else:
print 'shutting off pump at %s' % datetime.datetime.today().minute
gpio.output(pumpPin,gpio.LOW)
else:
print 'it is the actuator quiet period, between 11pm and 6am'
gpio.output(pumpPin,gpio.LOW)
print 'starting sampling at'
print datetime.datetime.now(tzlocal())
logging.basicConfig(filename='example.log',level=logging.DEBUG)
# adc.setup(thermistor1)
# adc.setup(thermistor2)
# adc.setup(photoPin)
adc.setup()
# uart.setup('UART2')
# print 'uart setup'
gpio.setup(pumpPin,gpio.OUT)
# t = tmp102.TMP102()
disp = ssd.SSD1306_128_64(rst=RST,i2c_address=0x3D)
disp.begin()
disp.clear()
disp.display()
# NOTE
# There is currently a bug in the ADC driver.
# You'll need to read the values twice
# in order to get the latest value.
# pwm.start(greenPin, 10.0, 2000.0)
# pwm.start(redPin, 10.0, 2000.0)
# pwm.start(bluePin, 10.0, 2000.0)
atexit.register(exit_handler)
while True:
try:
do_sensor_read()
except Exception, e:
print e
print 'sensor_read error!'
try:
do_db_update()
except Exception, e:
print e
print 'do_db_update error!'
try:
do_state_display()
# pass
except Exception, e:
print e
print 'do_state_display error!'
try:
do_pump_toggle()
except Exception, e:
print e
print 'do_pump_toggle error!'
print 'done with cycle, now waiting %s' % datetime.datetime.today()
time.sleep(interval)
| print 'we are in the first %s minutes of the hour, so pump should be on.' % PUMP_DURATION
gpio.output(pumpPin,gpio.HIGH) | conditional_block |
poller.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# jan 2014 bbb garden shield attempt
# AKA
'''
Sensors:
analog level sensor, pin AIN0
TMP102 i2c temperature sensor, address 0x48
(if add0 is grounded) or 0x49 (if pulled up)
Outputs:
Analog RGB LED strip
I2C display(?)
Pump Activate/Deactivate (GPIO pin)
Some measurements as of mid-March 2014:
Tank can be pumped for 15 minutes without sun exposure to liquid.
Seems like after 10 minutes of pumping, the autosiphon engages, though.
Tank takes about 17 minutes to drain from a 15-minute pump
11 gals in reservoir reads as 0.42 on the adc.read scale from 0 to 1
8 gals in reservoir reads as 0.175 on the adc.read scale from 0 to 1
7 gals in reservoir reads as 0.15 on the adc.read scale from 0 to 1
'''
from __future__ import division
import Adafruit_SSD1306 as ssd
import Adafruit_BBIO.UART as uart
import Image
import ImageDraw
import ImageFont
# import Adafruit_GPIO.PWM as pwm
import Adafruit_BBIO.GPIO as gpio
import Adafruit_BBIO.ADC as adc
# import TMP102 as tmp102
import datetime
from dateutil.tz import tzlocal
import time
import serial
import atexit
from math import log
import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
|
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def do_state_display():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str(readings['bedTemp'])[:4], font=font, fill=255)
# Draw an ellipse.
# draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a rectangle.
# draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a triangle.
# draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
# x += shape_width+padding
# Draw an X.
# draw.line((x, bottom, x+shape_width, top), fill=255)
# draw.line((x, top, x+shape_width, bottom), fill=255)
# x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# so, what will state display be?
# I2C display of tank temp?
def do_pump_toggle():
print 'pump actuate'
'''
this should actually work like:
if currentMinute mod PUMP_DURATION < PUMP_INTERVAL:
activate pump
else:
turn off pump
'''
if (datetime.datetime.today().hour>6 and datetime.datetime.today().hour<23):
print 'within actuating timeframe'
# changed this to just pump for the first PUMP_DURATION minutes every hour
if(datetime.datetime.today().minute <= PUMP_DURATION):
print 'we are in the first %s minutes of the hour, so pump should be on.' % PUMP_DURATION
gpio.output(pumpPin,gpio.HIGH)
else:
print 'shutting off pump at %s' % datetime.datetime.today().minute
gpio.output(pumpPin,gpio.LOW)
else:
print 'it is the actuator quiet period, between 11pm and 6am'
gpio.output(pumpPin,gpio.LOW)
print 'starting sampling at'
print datetime.datetime.now(tzlocal())
logging.basicConfig(filename='example.log',level=logging.DEBUG)
# adc.setup(thermistor1)
# adc.setup(thermistor2)
# adc.setup(photoPin)
adc.setup()
# uart.setup('UART2')
# print 'uart setup'
gpio.setup(pumpPin,gpio.OUT)
# t = tmp102.TMP102()
disp = ssd.SSD1306_128_64(rst=RST,i2c_address=0x3D)
disp.begin()
disp.clear()
disp.display()
# NOTE
# There is currently a bug in the ADC driver.
# You'll need to read the values twice
# in order to get the latest value.
# pwm.start(greenPin, 10.0, 2000.0)
# pwm.start(redPin, 10.0, 2000.0)
# pwm.start(bluePin, 10.0, 2000.0)
atexit.register(exit_handler)
while True:
try:
do_sensor_read()
except Exception, e:
print e
print 'sensor_read error!'
try:
do_db_update()
except Exception, e:
print e
print 'do_db_update error!'
try:
do_state_display()
# pass
except Exception, e:
print e
print 'do_state_display error!'
try:
do_pump_toggle()
except Exception, e:
print e
print 'do_pump_toggle error!'
print 'done with cycle, now waiting %s' % datetime.datetime.today()
time.sleep(interval)
| print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
# temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell | identifier_body |
poller.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# jan 2014 bbb garden shield attempt
# AKA
'''
Sensors:
analog level sensor, pin AIN0
TMP102 i2c temperature sensor, address 0x48
(if add0 is grounded) or 0x49 (if pulled up)
Outputs:
Analog RGB LED strip
I2C display(?)
Pump Activate/Deactivate (GPIO pin)
Some measurements as of mid-March 2014:
Tank can be pumped for 15 minutes without sun exposure to liquid.
Seems like after 10 minutes of pumping, the autosiphon engages, though.
Tank takes about 17 minutes to drain from a 15-minute pump
11 gals in reservoir reads as 0.42 on the adc.read scale from 0 to 1
8 gals in reservoir reads as 0.175 on the adc.read scale from 0 to 1
7 gals in reservoir reads as 0.15 on the adc.read scale from 0 to 1
'''
from __future__ import division
import Adafruit_SSD1306 as ssd
import Adafruit_BBIO.UART as uart
import Image
import ImageDraw
import ImageFont
# import Adafruit_GPIO.PWM as pwm
import Adafruit_BBIO.GPIO as gpio
import Adafruit_BBIO.ADC as adc
# import TMP102 as tmp102
import datetime
from dateutil.tz import tzlocal
import time
import serial
import atexit
from math import log
import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
# temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def | ():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str(readings['bedTemp'])[:4], font=font, fill=255)
# Draw an ellipse.
# draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a rectangle.
# draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a triangle.
# draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
# x += shape_width+padding
# Draw an X.
# draw.line((x, bottom, x+shape_width, top), fill=255)
# draw.line((x, top, x+shape_width, bottom), fill=255)
# x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# so, what will state display be?
# I2C display of tank temp?
def do_pump_toggle():
print 'pump actuate'
'''
this should actually work like:
if currentMinute mod PUMP_DURATION < PUMP_INTERVAL:
activate pump
else:
turn off pump
'''
if (datetime.datetime.today().hour>6 and datetime.datetime.today().hour<23):
print 'within actuating timeframe'
# changed this to just pump for the first PUMP_DURATION minutes every hour
if(datetime.datetime.today().minute <= PUMP_DURATION):
print 'we are in the first %s minutes of the hour, so pump should be on.' % PUMP_DURATION
gpio.output(pumpPin,gpio.HIGH)
else:
print 'shutting off pump at %s' % datetime.datetime.today().minute
gpio.output(pumpPin,gpio.LOW)
else:
print 'it is the actuator quiet period, between 11pm and 6am'
gpio.output(pumpPin,gpio.LOW)
print 'starting sampling at'
print datetime.datetime.now(tzlocal())
logging.basicConfig(filename='example.log',level=logging.DEBUG)
# adc.setup(thermistor1)
# adc.setup(thermistor2)
# adc.setup(photoPin)
adc.setup()
# uart.setup('UART2')
# print 'uart setup'
gpio.setup(pumpPin,gpio.OUT)
# t = tmp102.TMP102()
disp = ssd.SSD1306_128_64(rst=RST,i2c_address=0x3D)
disp.begin()
disp.clear()
disp.display()
# NOTE
# There is currently a bug in the ADC driver.
# You'll need to read the values twice
# in order to get the latest value.
# pwm.start(greenPin, 10.0, 2000.0)
# pwm.start(redPin, 10.0, 2000.0)
# pwm.start(bluePin, 10.0, 2000.0)
atexit.register(exit_handler)
while True:
try:
do_sensor_read()
except Exception, e:
print e
print 'sensor_read error!'
try:
do_db_update()
except Exception, e:
print e
print 'do_db_update error!'
try:
do_state_display()
# pass
except Exception, e:
print e
print 'do_state_display error!'
try:
do_pump_toggle()
except Exception, e:
print e
print 'do_pump_toggle error!'
print 'done with cycle, now waiting %s' % datetime.datetime.today()
time.sleep(interval)
| do_state_display | identifier_name |
ddpg.py | """
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: Patrick Emami
"""
import tensorflow as tf
import numpy as np
import gym
from gym import wrappers
import tflearn
from replay_buffer import ReplayBuffer
import os
# use correct gpu
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1" # use correct GPU
# ==========================
# Training Parameters
# ==========================
# Max training steps
MAX_EPISODES = 1000
# Max episode length
MAX_EP_STEPS = 200
# Base learning rate for the Actor network
ACTOR_LEARNING_RATE = 0.01
# Base learning rate for the Critic Network
CRITIC_LEARNING_RATE = 0.001
# Discount factor
GAMMA = 0.99
# Soft target update param
TAU = 0.05
STATE_DIM = 4
ACTION_DIM = 1
ACTION_PROB_DIMS = 2
ACTION_BOUND = 1
ACTION_SPACE = [0, 1]
# ===========================
# Utility Parameters
# ===========================
# Render gym env during training
RENDER_ENV = True
# Use Gym Monitor
GYM_MONITOR_EN = True
# Gym environment
ENV_NAME = 'CartPole-v0'
# Directory for storing gym results
MONITOR_DIR = './results/gym_ddpg'
# Directory for storing tensorboard summary results
SUMMARY_DIR = './results/tf_ddpg'
RANDOM_SEED = 1234
# Size of replay buffer
BUFFER_SIZE = 10000
MINIBATCH_SIZE = 64
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -2 and 2
"""
def __init__(self, sess):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.a_prob_dim = ACTION_PROB_DIMS
self.action_bound = ACTION_BOUND
self.learning_rate = ACTOR_LEARNING_RATE
self.tau = TAU
# Actor Network
self.onnet_in_states, self.out = self.create_actor_network()
self.network_params = tf.trainable_variables()
# Target Network
self.target_inputs, self.target_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables()[len(self.network_params):]
# Op for periodically updating target network with online network weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) + \
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
self.actor_gradients = tf.gradients(self.out, self.network_params, -self.action_gradient)
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate). \
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(self.network_params) + len(self.target_network_params)
def create_actor_network(self):
in_states = tflearn.input_data(shape=[None, self.s_dim])
net = tflearn.fully_connected(in_states, 400, activation='relu')
net = tflearn.fully_connected(net, 300, activation='relu')
# Final layer weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out_actions = tflearn.fully_connected(net, ACTION_PROB_DIMS, activation='softmax', weights_init=w_init)
return in_states, out_actions
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.onnet_in_states: inputs,
self.action_gradient: a_gradient
})
def predict(self, inp_states):
out_actions = self.sess.run(self.out, feed_dict={
self.onnet_in_states: inp_states
})
out_actions = out_actions[0]
#print("actor output actions", out_actions)
return out_actions
def predict_target(self, in_states):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: in_states
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, num_actor_vars):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.learning_rate = CRITIC_LEARNING_RATE
self.tau = TAU
# Create the critic network
self.in_states, self.in_actions, self.onnet_out_reward = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(
tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_values = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_values, self.onnet_out_reward)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action
self.action_grads = tf.gradients(self.onnet_out_reward, self.in_actions)
def create_critic_network(self):
inp_state = tflearn.input_data(shape=[None, self.s_dim])
inp_action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inp_state, 400, activation='relu')
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(inp_action, 300)
net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(inp_action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
#out_rewards = tflearn.layers.core.single_unit(net, activation='linear', name='output_rewards')
out_reward = tflearn.fully_connected(net, 1, weights_init=w_init) # FIXME predicts single reward, need string of rewards
return inp_state, inp_action, out_reward
def train(self, observed_states, observed_action, mixed_rewards): # note: replaced predicted_q_value with sum of mixed rewards
return self.sess.run([self.onnet_out_reward, self.optimize], feed_dict={
self.in_states: observed_states,
self.in_actions: observed_action,
self.predicted_q_values: mixed_rewards
})
def predict(self, inputs, action):
return self.sess.run(self.onnet_out_reward, feed_dict={
self.in_states: inputs,
self.in_actions: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.in_states: inputs,
self.in_actions: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
if RENDER_ENV:
env.render()
action_probabilities = actor.predict(np.reshape(s, (1, STATE_DIM)))
#print("action probs", action_probabilities)
action = choose_action(action_probabilities)
#print("action", action)
s2, r, done, info = env.step(action)
replay_buffer.add(np.reshape(s, (actor.s_dim,)), action, r, \
done, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, done_batch, s2_batch = \
replay_buffer.sample_batch(MINIBATCH_SIZE)
# action probs to actions # TODO how to deal with non-determinate policies
# convert actor.predict_target(s2_batch) to actions
# the problem is that critic expects actions to always be determinate, when in fact they are probab
# Calculate targets
# todo can we just feed real a and s batch here, no s2?
# fixme critic predict expects 1D actions not 2D probabilities
a_batch = np.reshape(a_batch, (len(a_batch), 1))
#print("sbshape", np.shape(s_batch), "\n a shape", np.shape(a_batch))
targnet_predicted_reward = critic.predict_target(s_batch, a_batch)
#targnet_predicted_reward = critic.predict_target(s2_batch, actor.predict_target(s2_batch))
# print("targnet prediction", targnet_predicted_reward) # this is a whole reward tensor!!
# actually, we mix observations with predictions by factor gamma
# fixme I think we need to get rid of this block. targ reward is single value?
obs_plus_predicted_rewards = []
for k in range(MINIBATCH_SIZE):
if done_batch[k]:
obs_plus_predicted_rewards.append(r_batch[k]) # final timestep is just the reward
else:
obs_plus_predicted_rewards.append(r_batch[k] + GAMMA * targnet_predicted_reward[k])
obs_plus_predicted_rewards = np.reshape(obs_plus_predicted_rewards, (len(obs_plus_predicted_rewards), 1))
# Update the critic given the targets
predicted_q_value, _ = critic.train(s_batch, a_batch, obs_plus_predicted_rewards)
#predicted_q_value, _ = critic.train(s_batch, a_batch, np.reshape(observed_rewards, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
#a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_batch)
#grads = critic.action_gradients(s_batch, a_outs) # we aren't deterministic
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if done:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
# TODO checkwhich ep reward is being printed
print( # TODO replace maxq with something more interesting
'| Reward: %.2i' % int(ep_reward), " | Episode", i, \
'| Qmax: %.4f' % (ep_ave_max_q / float(j)))
break
def | (probabilities):
choice = int(np.random.choice(ACTION_SPACE, 1, p=probabilities))
return choice
def main(_):
with tf.Session() as sess:
# TODO: reduce network sizes. keep all states stop editing this ver, add dropout in successor
env = gym.make(ENV_NAME)
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
# Ensure action bound is symmetric
# assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess)
critic = CriticNetwork(sess, actor.get_num_trainable_vars())
env = gym.wrappers.Monitor(env, MONITOR_DIR, force=True)
train(sess, env, actor, critic)
if __name__ == '__main__':
tf.app.run() | choose_action | identifier_name |
ddpg.py | """
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: Patrick Emami
"""
import tensorflow as tf
import numpy as np
import gym
from gym import wrappers
import tflearn
from replay_buffer import ReplayBuffer
import os
# use correct gpu
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1" # use correct GPU
# ==========================
# Training Parameters
# ==========================
# Max training steps
MAX_EPISODES = 1000
# Max episode length
MAX_EP_STEPS = 200
# Base learning rate for the Actor network
ACTOR_LEARNING_RATE = 0.01
# Base learning rate for the Critic Network
CRITIC_LEARNING_RATE = 0.001
# Discount factor
GAMMA = 0.99
# Soft target update param
TAU = 0.05
STATE_DIM = 4
ACTION_DIM = 1
ACTION_PROB_DIMS = 2
ACTION_BOUND = 1
ACTION_SPACE = [0, 1]
# ===========================
# Utility Parameters
# ===========================
# Render gym env during training
RENDER_ENV = True
# Use Gym Monitor
GYM_MONITOR_EN = True
# Gym environment
ENV_NAME = 'CartPole-v0'
# Directory for storing gym results
MONITOR_DIR = './results/gym_ddpg'
# Directory for storing tensorboard summary results
SUMMARY_DIR = './results/tf_ddpg'
RANDOM_SEED = 1234
# Size of replay buffer
BUFFER_SIZE = 10000
MINIBATCH_SIZE = 64
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -2 and 2
"""
def __init__(self, sess):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.a_prob_dim = ACTION_PROB_DIMS
self.action_bound = ACTION_BOUND
self.learning_rate = ACTOR_LEARNING_RATE
self.tau = TAU
# Actor Network
self.onnet_in_states, self.out = self.create_actor_network()
self.network_params = tf.trainable_variables()
# Target Network
self.target_inputs, self.target_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables()[len(self.network_params):]
# Op for periodically updating target network with online network weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) + \
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
self.actor_gradients = tf.gradients(self.out, self.network_params, -self.action_gradient)
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate). \
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(self.network_params) + len(self.target_network_params)
def create_actor_network(self):
in_states = tflearn.input_data(shape=[None, self.s_dim])
net = tflearn.fully_connected(in_states, 400, activation='relu')
net = tflearn.fully_connected(net, 300, activation='relu')
# Final layer weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out_actions = tflearn.fully_connected(net, ACTION_PROB_DIMS, activation='softmax', weights_init=w_init)
return in_states, out_actions
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.onnet_in_states: inputs,
self.action_gradient: a_gradient
})
def predict(self, inp_states):
out_actions = self.sess.run(self.out, feed_dict={
self.onnet_in_states: inp_states
})
out_actions = out_actions[0]
#print("actor output actions", out_actions)
return out_actions
def predict_target(self, in_states):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: in_states
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, num_actor_vars):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.learning_rate = CRITIC_LEARNING_RATE
self.tau = TAU
# Create the critic network
self.in_states, self.in_actions, self.onnet_out_reward = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(
tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_values = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_values, self.onnet_out_reward)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action
self.action_grads = tf.gradients(self.onnet_out_reward, self.in_actions)
def create_critic_network(self):
inp_state = tflearn.input_data(shape=[None, self.s_dim])
inp_action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inp_state, 400, activation='relu')
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(inp_action, 300)
net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(inp_action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
#out_rewards = tflearn.layers.core.single_unit(net, activation='linear', name='output_rewards')
out_reward = tflearn.fully_connected(net, 1, weights_init=w_init) # FIXME predicts single reward, need string of rewards
return inp_state, inp_action, out_reward
def train(self, observed_states, observed_action, mixed_rewards): # note: replaced predicted_q_value with sum of mixed rewards
return self.sess.run([self.onnet_out_reward, self.optimize], feed_dict={
self.in_states: observed_states,
self.in_actions: observed_action,
self.predicted_q_values: mixed_rewards
})
def predict(self, inputs, action):
return self.sess.run(self.onnet_out_reward, feed_dict={
self.in_states: inputs,
self.in_actions: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.in_states: inputs,
self.in_actions: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
if RENDER_ENV:
env.render()
action_probabilities = actor.predict(np.reshape(s, (1, STATE_DIM)))
#print("action probs", action_probabilities)
action = choose_action(action_probabilities)
#print("action", action)
s2, r, done, info = env.step(action)
replay_buffer.add(np.reshape(s, (actor.s_dim,)), action, r, \
done, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, done_batch, s2_batch = \
replay_buffer.sample_batch(MINIBATCH_SIZE)
# action probs to actions # TODO how to deal with non-determinate policies
# convert actor.predict_target(s2_batch) to actions
# the problem is that critic expects actions to always be determinate, when in fact they are probab
# Calculate targets
# todo can we just feed real a and s batch here, no s2?
# fixme critic predict expects 1D actions not 2D probabilities
a_batch = np.reshape(a_batch, (len(a_batch), 1))
#print("sbshape", np.shape(s_batch), "\n a shape", np.shape(a_batch))
targnet_predicted_reward = critic.predict_target(s_batch, a_batch)
#targnet_predicted_reward = critic.predict_target(s2_batch, actor.predict_target(s2_batch))
# print("targnet prediction", targnet_predicted_reward) # this is a whole reward tensor!!
# actually, we mix observations with predictions by factor gamma
# fixme I think we need to get rid of this block. targ reward is single value?
obs_plus_predicted_rewards = []
for k in range(MINIBATCH_SIZE):
if done_batch[k]:
obs_plus_predicted_rewards.append(r_batch[k]) # final timestep is just the reward
else:
obs_plus_predicted_rewards.append(r_batch[k] + GAMMA * targnet_predicted_reward[k])
obs_plus_predicted_rewards = np.reshape(obs_plus_predicted_rewards, (len(obs_plus_predicted_rewards), 1))
# Update the critic given the targets
predicted_q_value, _ = critic.train(s_batch, a_batch, obs_plus_predicted_rewards)
#predicted_q_value, _ = critic.train(s_batch, a_batch, np.reshape(observed_rewards, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
#a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_batch)
#grads = critic.action_gradients(s_batch, a_outs) # we aren't deterministic
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if done:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
# TODO checkwhich ep reward is being printed
print( # TODO replace maxq with something more interesting
'| Reward: %.2i' % int(ep_reward), " | Episode", i, \
'| Qmax: %.4f' % (ep_ave_max_q / float(j)))
break
def choose_action(probabilities):
choice = int(np.random.choice(ACTION_SPACE, 1, p=probabilities))
return choice
def main(_):
|
if __name__ == '__main__':
tf.app.run() | with tf.Session() as sess:
# TODO: reduce network sizes. keep all states stop editing this ver, add dropout in successor
env = gym.make(ENV_NAME)
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
# Ensure action bound is symmetric
# assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess)
critic = CriticNetwork(sess, actor.get_num_trainable_vars())
env = gym.wrappers.Monitor(env, MONITOR_DIR, force=True)
train(sess, env, actor, critic) | identifier_body |
ddpg.py | """
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: Patrick Emami
"""
import tensorflow as tf
import numpy as np
import gym
from gym import wrappers
import tflearn
from replay_buffer import ReplayBuffer
import os
# use correct gpu
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1" # use correct GPU
# ==========================
# Training Parameters
# ==========================
# Max training steps
MAX_EPISODES = 1000
# Max episode length
MAX_EP_STEPS = 200
# Base learning rate for the Actor network
ACTOR_LEARNING_RATE = 0.01
# Base learning rate for the Critic Network
CRITIC_LEARNING_RATE = 0.001
# Discount factor
GAMMA = 0.99 | ACTION_PROB_DIMS = 2
ACTION_BOUND = 1
ACTION_SPACE = [0, 1]
# ===========================
# Utility Parameters
# ===========================
# Render gym env during training
RENDER_ENV = True
# Use Gym Monitor
GYM_MONITOR_EN = True
# Gym environment
ENV_NAME = 'CartPole-v0'
# Directory for storing gym results
MONITOR_DIR = './results/gym_ddpg'
# Directory for storing tensorboard summary results
SUMMARY_DIR = './results/tf_ddpg'
RANDOM_SEED = 1234
# Size of replay buffer
BUFFER_SIZE = 10000
MINIBATCH_SIZE = 64
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -2 and 2
"""
def __init__(self, sess):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.a_prob_dim = ACTION_PROB_DIMS
self.action_bound = ACTION_BOUND
self.learning_rate = ACTOR_LEARNING_RATE
self.tau = TAU
# Actor Network
self.onnet_in_states, self.out = self.create_actor_network()
self.network_params = tf.trainable_variables()
# Target Network
self.target_inputs, self.target_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables()[len(self.network_params):]
# Op for periodically updating target network with online network weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) + \
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
self.actor_gradients = tf.gradients(self.out, self.network_params, -self.action_gradient)
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate). \
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(self.network_params) + len(self.target_network_params)
def create_actor_network(self):
in_states = tflearn.input_data(shape=[None, self.s_dim])
net = tflearn.fully_connected(in_states, 400, activation='relu')
net = tflearn.fully_connected(net, 300, activation='relu')
# Final layer weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out_actions = tflearn.fully_connected(net, ACTION_PROB_DIMS, activation='softmax', weights_init=w_init)
return in_states, out_actions
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.onnet_in_states: inputs,
self.action_gradient: a_gradient
})
def predict(self, inp_states):
out_actions = self.sess.run(self.out, feed_dict={
self.onnet_in_states: inp_states
})
out_actions = out_actions[0]
#print("actor output actions", out_actions)
return out_actions
def predict_target(self, in_states):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: in_states
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, num_actor_vars):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.learning_rate = CRITIC_LEARNING_RATE
self.tau = TAU
# Create the critic network
self.in_states, self.in_actions, self.onnet_out_reward = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(
tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_values = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_values, self.onnet_out_reward)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action
self.action_grads = tf.gradients(self.onnet_out_reward, self.in_actions)
def create_critic_network(self):
inp_state = tflearn.input_data(shape=[None, self.s_dim])
inp_action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inp_state, 400, activation='relu')
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(inp_action, 300)
net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(inp_action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
#out_rewards = tflearn.layers.core.single_unit(net, activation='linear', name='output_rewards')
out_reward = tflearn.fully_connected(net, 1, weights_init=w_init) # FIXME predicts single reward, need string of rewards
return inp_state, inp_action, out_reward
def train(self, observed_states, observed_action, mixed_rewards): # note: replaced predicted_q_value with sum of mixed rewards
return self.sess.run([self.onnet_out_reward, self.optimize], feed_dict={
self.in_states: observed_states,
self.in_actions: observed_action,
self.predicted_q_values: mixed_rewards
})
def predict(self, inputs, action):
return self.sess.run(self.onnet_out_reward, feed_dict={
self.in_states: inputs,
self.in_actions: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.in_states: inputs,
self.in_actions: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
if RENDER_ENV:
env.render()
action_probabilities = actor.predict(np.reshape(s, (1, STATE_DIM)))
#print("action probs", action_probabilities)
action = choose_action(action_probabilities)
#print("action", action)
s2, r, done, info = env.step(action)
replay_buffer.add(np.reshape(s, (actor.s_dim,)), action, r, \
done, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, done_batch, s2_batch = \
replay_buffer.sample_batch(MINIBATCH_SIZE)
# action probs to actions # TODO how to deal with non-determinate policies
# convert actor.predict_target(s2_batch) to actions
# the problem is that critic expects actions to always be determinate, when in fact they are probab
# Calculate targets
# todo can we just feed real a and s batch here, no s2?
# fixme critic predict expects 1D actions not 2D probabilities
a_batch = np.reshape(a_batch, (len(a_batch), 1))
#print("sbshape", np.shape(s_batch), "\n a shape", np.shape(a_batch))
targnet_predicted_reward = critic.predict_target(s_batch, a_batch)
#targnet_predicted_reward = critic.predict_target(s2_batch, actor.predict_target(s2_batch))
# print("targnet prediction", targnet_predicted_reward) # this is a whole reward tensor!!
# actually, we mix observations with predictions by factor gamma
# fixme I think we need to get rid of this block. targ reward is single value?
obs_plus_predicted_rewards = []
for k in range(MINIBATCH_SIZE):
if done_batch[k]:
obs_plus_predicted_rewards.append(r_batch[k]) # final timestep is just the reward
else:
obs_plus_predicted_rewards.append(r_batch[k] + GAMMA * targnet_predicted_reward[k])
obs_plus_predicted_rewards = np.reshape(obs_plus_predicted_rewards, (len(obs_plus_predicted_rewards), 1))
# Update the critic given the targets
predicted_q_value, _ = critic.train(s_batch, a_batch, obs_plus_predicted_rewards)
#predicted_q_value, _ = critic.train(s_batch, a_batch, np.reshape(observed_rewards, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
#a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_batch)
#grads = critic.action_gradients(s_batch, a_outs) # we aren't deterministic
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if done:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
# TODO checkwhich ep reward is being printed
print( # TODO replace maxq with something more interesting
'| Reward: %.2i' % int(ep_reward), " | Episode", i, \
'| Qmax: %.4f' % (ep_ave_max_q / float(j)))
break
def choose_action(probabilities):
choice = int(np.random.choice(ACTION_SPACE, 1, p=probabilities))
return choice
def main(_):
with tf.Session() as sess:
# TODO: reduce network sizes. keep all states stop editing this ver, add dropout in successor
env = gym.make(ENV_NAME)
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
# Ensure action bound is symmetric
# assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess)
critic = CriticNetwork(sess, actor.get_num_trainable_vars())
env = gym.wrappers.Monitor(env, MONITOR_DIR, force=True)
train(sess, env, actor, critic)
if __name__ == '__main__':
tf.app.run() | # Soft target update param
TAU = 0.05
STATE_DIM = 4
ACTION_DIM = 1 | random_line_split |
ddpg.py | """
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: Patrick Emami
"""
import tensorflow as tf
import numpy as np
import gym
from gym import wrappers
import tflearn
from replay_buffer import ReplayBuffer
import os
# use correct gpu
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1" # use correct GPU
# ==========================
# Training Parameters
# ==========================
# Max training steps
MAX_EPISODES = 1000
# Max episode length
MAX_EP_STEPS = 200
# Base learning rate for the Actor network
ACTOR_LEARNING_RATE = 0.01
# Base learning rate for the Critic Network
CRITIC_LEARNING_RATE = 0.001
# Discount factor
GAMMA = 0.99
# Soft target update param
TAU = 0.05
STATE_DIM = 4
ACTION_DIM = 1
ACTION_PROB_DIMS = 2
ACTION_BOUND = 1
ACTION_SPACE = [0, 1]
# ===========================
# Utility Parameters
# ===========================
# Render gym env during training
RENDER_ENV = True
# Use Gym Monitor
GYM_MONITOR_EN = True
# Gym environment
ENV_NAME = 'CartPole-v0'
# Directory for storing gym results
MONITOR_DIR = './results/gym_ddpg'
# Directory for storing tensorboard summary results
SUMMARY_DIR = './results/tf_ddpg'
RANDOM_SEED = 1234
# Size of replay buffer
BUFFER_SIZE = 10000
MINIBATCH_SIZE = 64
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -2 and 2
"""
def __init__(self, sess):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.a_prob_dim = ACTION_PROB_DIMS
self.action_bound = ACTION_BOUND
self.learning_rate = ACTOR_LEARNING_RATE
self.tau = TAU
# Actor Network
self.onnet_in_states, self.out = self.create_actor_network()
self.network_params = tf.trainable_variables()
# Target Network
self.target_inputs, self.target_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables()[len(self.network_params):]
# Op for periodically updating target network with online network weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) + \
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
self.actor_gradients = tf.gradients(self.out, self.network_params, -self.action_gradient)
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate). \
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(self.network_params) + len(self.target_network_params)
def create_actor_network(self):
in_states = tflearn.input_data(shape=[None, self.s_dim])
net = tflearn.fully_connected(in_states, 400, activation='relu')
net = tflearn.fully_connected(net, 300, activation='relu')
# Final layer weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out_actions = tflearn.fully_connected(net, ACTION_PROB_DIMS, activation='softmax', weights_init=w_init)
return in_states, out_actions
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.onnet_in_states: inputs,
self.action_gradient: a_gradient
})
def predict(self, inp_states):
out_actions = self.sess.run(self.out, feed_dict={
self.onnet_in_states: inp_states
})
out_actions = out_actions[0]
#print("actor output actions", out_actions)
return out_actions
def predict_target(self, in_states):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: in_states
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, num_actor_vars):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.learning_rate = CRITIC_LEARNING_RATE
self.tau = TAU
# Create the critic network
self.in_states, self.in_actions, self.onnet_out_reward = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(
tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_values = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_values, self.onnet_out_reward)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action
self.action_grads = tf.gradients(self.onnet_out_reward, self.in_actions)
def create_critic_network(self):
inp_state = tflearn.input_data(shape=[None, self.s_dim])
inp_action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inp_state, 400, activation='relu')
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(inp_action, 300)
net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(inp_action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
#out_rewards = tflearn.layers.core.single_unit(net, activation='linear', name='output_rewards')
out_reward = tflearn.fully_connected(net, 1, weights_init=w_init) # FIXME predicts single reward, need string of rewards
return inp_state, inp_action, out_reward
def train(self, observed_states, observed_action, mixed_rewards): # note: replaced predicted_q_value with sum of mixed rewards
return self.sess.run([self.onnet_out_reward, self.optimize], feed_dict={
self.in_states: observed_states,
self.in_actions: observed_action,
self.predicted_q_values: mixed_rewards
})
def predict(self, inputs, action):
return self.sess.run(self.onnet_out_reward, feed_dict={
self.in_states: inputs,
self.in_actions: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.in_states: inputs,
self.in_actions: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
if RENDER_ENV:
env.render()
action_probabilities = actor.predict(np.reshape(s, (1, STATE_DIM)))
#print("action probs", action_probabilities)
action = choose_action(action_probabilities)
#print("action", action)
s2, r, done, info = env.step(action)
replay_buffer.add(np.reshape(s, (actor.s_dim,)), action, r, \
done, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, done_batch, s2_batch = \
replay_buffer.sample_batch(MINIBATCH_SIZE)
# action probs to actions # TODO how to deal with non-determinate policies
# convert actor.predict_target(s2_batch) to actions
# the problem is that critic expects actions to always be determinate, when in fact they are probab
# Calculate targets
# todo can we just feed real a and s batch here, no s2?
# fixme critic predict expects 1D actions not 2D probabilities
a_batch = np.reshape(a_batch, (len(a_batch), 1))
#print("sbshape", np.shape(s_batch), "\n a shape", np.shape(a_batch))
targnet_predicted_reward = critic.predict_target(s_batch, a_batch)
#targnet_predicted_reward = critic.predict_target(s2_batch, actor.predict_target(s2_batch))
# print("targnet prediction", targnet_predicted_reward) # this is a whole reward tensor!!
# actually, we mix observations with predictions by factor gamma
# fixme I think we need to get rid of this block. targ reward is single value?
obs_plus_predicted_rewards = []
for k in range(MINIBATCH_SIZE):
if done_batch[k]:
obs_plus_predicted_rewards.append(r_batch[k]) # final timestep is just the reward
else:
obs_plus_predicted_rewards.append(r_batch[k] + GAMMA * targnet_predicted_reward[k])
obs_plus_predicted_rewards = np.reshape(obs_plus_predicted_rewards, (len(obs_plus_predicted_rewards), 1))
# Update the critic given the targets
predicted_q_value, _ = critic.train(s_batch, a_batch, obs_plus_predicted_rewards)
#predicted_q_value, _ = critic.train(s_batch, a_batch, np.reshape(observed_rewards, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
#a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_batch)
#grads = critic.action_gradients(s_batch, a_outs) # we aren't deterministic
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if done:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
# TODO checkwhich ep reward is being printed
print( # TODO replace maxq with something more interesting
'| Reward: %.2i' % int(ep_reward), " | Episode", i, \
'| Qmax: %.4f' % (ep_ave_max_q / float(j)))
break
def choose_action(probabilities):
choice = int(np.random.choice(ACTION_SPACE, 1, p=probabilities))
return choice
def main(_):
with tf.Session() as sess:
# TODO: reduce network sizes. keep all states stop editing this ver, add dropout in successor
env = gym.make(ENV_NAME)
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
# Ensure action bound is symmetric
# assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess)
critic = CriticNetwork(sess, actor.get_num_trainable_vars())
env = gym.wrappers.Monitor(env, MONITOR_DIR, force=True)
train(sess, env, actor, critic)
if __name__ == '__main__':
| tf.app.run() | conditional_block | |
lab3.py | import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
""" Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
"""
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
|
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S*) (\S* *)" (\d{3}) (\S+)'
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: {}'.format(failed_logs.count())
for line in failed_logs.take(20):
print 'Invalid logline: {}'.format(line)
print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:\/]+\s[+\-]\d{4})\] "(\S+) (\S*)( *\S+ *)*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: {}'.format((
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max()))
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found {} response codes'.format(len(responseCodeToCountList))
print 'Response Code Counts: {}'.format(responseCodeToCountList)
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '{}'.format(value)
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: {}'.format(hostsPick20)
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: {}'.format(topEndpoints)
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20290)], 'incorrect Top Ten Endpoints'
not200 = access_logs.filter(lambda x: x.response_code != 200)
endpointCountPairTuple = not200.map(lambda x: (x.endpoint,1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a,b: a+b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x: -x[1])
print 'Top Ten failed URLs: {}'.format(topTenErrURLs)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
hosts = access_logs.map(lambda x: x.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: {}'.format(uniqueHostCount)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
dayToHostPairTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dayGroupedHosts = dayToHostPairTuple.groupByKey()
dayHostCount = dayGroupedHosts.map(lambda x: (x[0],set(x[1])))
dailyHosts = (dayHostCount.map(lambda x: (x[0],len(x[1]))))
dailyHostsList = dailyHosts.takeOrdered(30)
print 'Unique hosts per day: {}'.format(dailyHostsList)
dailyHosts.cache()
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
daysWithHosts = sorted(dailyHosts.map(lambda x: x[0]).collect())
hosts = [x[1] for x in sorted(dailyHosts.collect())]
print '{}'.format(hosts)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts, [2582, 3222, 4190, 2502, 2537, 4106, 4406, 4317, 4523, 4346, 2864, 2650, 4454, 4214, 4340, 4385, 4168, 2550, 2560, 4134, 4456], 'incorrect hosts')
fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
dayAndHostTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dailyHostsList
dayReqs = access_logs.map(lambda x: (x.date_time.day,1)).reduceByKey(lambda a,b: a+b)
summed = dayReqs.reduceByKey(lambda a,b: a+b)
joined = dayReqs.join(dailyHosts)
avgDailyReqPerHost = joined.map(lambda x: (x[0],x[1][0]/x[1][1]))
avgDailyReqPerHostList = avgDailyReqPerHost.takeOrdered(30)
print 'Average number of daily requests per Hosts is {}'.format(avgDailyReqPerHostList)
avgDailyReqPerHost.cache()
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached, 'incorrect avgDailyReqPerHost.is_cache')
daysWithAvg = sorted(avgDailyReqPerHost.map(lambda x: x[0]).collect())
avgs = [x[1] for x in avgDailyReqPerHost.takeOrdered(30, key=lambda x: x[0])]
print '{}'.format(avgs)
Test.assertEquals(daysWithAvg, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect days')
Test.assertEquals(avgs, [13, 12, 14, 12, 12, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 12], 'incorrect avgs')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
badRecords = (access_logs.filter(lambda x: x.response_code == 404))
print 'Found {} 404 URLs'.format(badRecords.count())
badRecords.cache()
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
badEndpoints = badRecords.map(lambda x: x.endpoint)
badUniqueEndpoints = badEndpoints.distinct()
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: {}'.format(badUniqueEndpointsPick40)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40, 'badUniqueEndpointsPick40 not distinct')
badEndpointsCountPairTuple = badRecords.map(lambda x: (x.endpoint,1))
badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda a,b: a+b)
print 'Top twenty {}'.format(badEndpointsSum)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, key= lambda x: -x[1])
print 'Top Twenty 404 URLs: {}'.format(badEndpointsTop20)
Test.assertEquals(badEndpointsTop20, [(u'/pub/winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif', 44), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
errHostsCountPairTuple = badRecords.map(lambda x: (x.host,1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a,b: a+b)
errHostsTop25 = errHostsSum.takeOrdered(25, key= lambda x: -x[1])
print 'Top 25 hosts that generated errors: {}'.format(errHostsTop25)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
errDateCountPairTuple = badRecords.map(lambda x: (x.date_time.day,1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a,b: a+b)
#print '{}'.format(errDateSum.take(10))
errDateSorted = (errDateSum)
#print errDateSorted
errByDate = errDateSorted.takeOrdered(30)
print '404 Errors by day: {}'.format(errByDate)
errDateSorted.cache()
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
daysWithErrors404 = errDateSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByDay = [x[1] for x in errDateSorted.takeOrdered(30, key= lambda x: x[0])]
print errors404ByDay
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
topErrDate = errDateSorted.takeOrdered(5, key= lambda x: -x[1])
print 'Top Five dates for 404 requests: {}'.format(topErrDate)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
hourCountPairTuple = badRecords.map(lambda x: (x.date_time.hour,1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a,b: a+b)
hourRecordsSorted = hourRecordsSum
errHourList = hourRecordsSorted.takeOrdered(30)
print 'Top hours for 404 requests: {}'.format(errHourList)
hourRecordsSorted.cache()
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList')
Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
hoursWithErrors404 = hourRecordsSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByHours = [x[1] for x in hourRecordsSorted.takeOrdered(30, key = lambda x: -x[0])]
Test.assertEquals(hoursWithErrors404, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours, [175, 171, 422, 272, 102, 95, 93, 122, 199, 185, 329, 263, 438, 397, 318, 347, 373, 330, 268, 269, 270, 241, 234, 272], 'incorrect errors404ByHours')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
| return (logline, 0) | conditional_block |
lab3.py | import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
""" Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
"""
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
return (logline, 0)
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S*) (\S* *)" (\d{3}) (\S+)'
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
|
parsed_logs, access_logs, failed_logs = parseLogs()
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:\/]+\s[+\-]\d{4})\] "(\S+) (\S*)( *\S+ *)*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: {}'.format((
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max()))
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found {} response codes'.format(len(responseCodeToCountList))
print 'Response Code Counts: {}'.format(responseCodeToCountList)
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '{}'.format(value)
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: {}'.format(hostsPick20)
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: {}'.format(topEndpoints)
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20290)], 'incorrect Top Ten Endpoints'
not200 = access_logs.filter(lambda x: x.response_code != 200)
endpointCountPairTuple = not200.map(lambda x: (x.endpoint,1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a,b: a+b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x: -x[1])
print 'Top Ten failed URLs: {}'.format(topTenErrURLs)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
hosts = access_logs.map(lambda x: x.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: {}'.format(uniqueHostCount)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
dayToHostPairTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dayGroupedHosts = dayToHostPairTuple.groupByKey()
dayHostCount = dayGroupedHosts.map(lambda x: (x[0],set(x[1])))
dailyHosts = (dayHostCount.map(lambda x: (x[0],len(x[1]))))
dailyHostsList = dailyHosts.takeOrdered(30)
print 'Unique hosts per day: {}'.format(dailyHostsList)
dailyHosts.cache()
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
daysWithHosts = sorted(dailyHosts.map(lambda x: x[0]).collect())
hosts = [x[1] for x in sorted(dailyHosts.collect())]
print '{}'.format(hosts)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts, [2582, 3222, 4190, 2502, 2537, 4106, 4406, 4317, 4523, 4346, 2864, 2650, 4454, 4214, 4340, 4385, 4168, 2550, 2560, 4134, 4456], 'incorrect hosts')
fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
dayAndHostTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dailyHostsList
dayReqs = access_logs.map(lambda x: (x.date_time.day,1)).reduceByKey(lambda a,b: a+b)
summed = dayReqs.reduceByKey(lambda a,b: a+b)
joined = dayReqs.join(dailyHosts)
avgDailyReqPerHost = joined.map(lambda x: (x[0],x[1][0]/x[1][1]))
avgDailyReqPerHostList = avgDailyReqPerHost.takeOrdered(30)
print 'Average number of daily requests per Hosts is {}'.format(avgDailyReqPerHostList)
avgDailyReqPerHost.cache()
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached, 'incorrect avgDailyReqPerHost.is_cache')
daysWithAvg = sorted(avgDailyReqPerHost.map(lambda x: x[0]).collect())
avgs = [x[1] for x in avgDailyReqPerHost.takeOrdered(30, key=lambda x: x[0])]
print '{}'.format(avgs)
Test.assertEquals(daysWithAvg, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect days')
Test.assertEquals(avgs, [13, 12, 14, 12, 12, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 12], 'incorrect avgs')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
badRecords = (access_logs.filter(lambda x: x.response_code == 404))
print 'Found {} 404 URLs'.format(badRecords.count())
badRecords.cache()
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
badEndpoints = badRecords.map(lambda x: x.endpoint)
badUniqueEndpoints = badEndpoints.distinct()
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: {}'.format(badUniqueEndpointsPick40)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40, 'badUniqueEndpointsPick40 not distinct')
badEndpointsCountPairTuple = badRecords.map(lambda x: (x.endpoint,1))
badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda a,b: a+b)
print 'Top twenty {}'.format(badEndpointsSum)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, key= lambda x: -x[1])
print 'Top Twenty 404 URLs: {}'.format(badEndpointsTop20)
Test.assertEquals(badEndpointsTop20, [(u'/pub/winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif', 44), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
errHostsCountPairTuple = badRecords.map(lambda x: (x.host,1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a,b: a+b)
errHostsTop25 = errHostsSum.takeOrdered(25, key= lambda x: -x[1])
print 'Top 25 hosts that generated errors: {}'.format(errHostsTop25)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
errDateCountPairTuple = badRecords.map(lambda x: (x.date_time.day,1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a,b: a+b)
#print '{}'.format(errDateSum.take(10))
errDateSorted = (errDateSum)
#print errDateSorted
errByDate = errDateSorted.takeOrdered(30)
print '404 Errors by day: {}'.format(errByDate)
errDateSorted.cache()
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
daysWithErrors404 = errDateSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByDay = [x[1] for x in errDateSorted.takeOrdered(30, key= lambda x: x[0])]
print errors404ByDay
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
topErrDate = errDateSorted.takeOrdered(5, key= lambda x: -x[1])
print 'Top Five dates for 404 requests: {}'.format(topErrDate)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
hourCountPairTuple = badRecords.map(lambda x: (x.date_time.hour,1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a,b: a+b)
hourRecordsSorted = hourRecordsSum
errHourList = hourRecordsSorted.takeOrdered(30)
print 'Top hours for 404 requests: {}'.format(errHourList)
hourRecordsSorted.cache()
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList')
Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
hoursWithErrors404 = hourRecordsSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByHours = [x[1] for x in hourRecordsSorted.takeOrdered(30, key = lambda x: -x[0])]
Test.assertEquals(hoursWithErrors404, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours, [175, 171, 422, 272, 102, 95, 93, 122, 199, 185, 329, 263, 438, 397, 318, 347, 373, 330, 268, 269, 270, 241, 234, 272], 'incorrect errors404ByHours')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
| """ Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: {}'.format(failed_logs.count())
for line in failed_logs.take(20):
print 'Invalid logline: {}'.format(line)
print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))
return parsed_logs, access_logs, failed_logs | identifier_body |
lab3.py | import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
""" Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
"""
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
return (logline, 0)
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S*) (\S* *)" (\d{3}) (\S+)'
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: {}'.format(failed_logs.count())
for line in failed_logs.take(20):
print 'Invalid logline: {}'.format(line)
print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:\/]+\s[+\-]\d{4})\] "(\S+) (\S*)( *\S+ *)*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: {}'.format((
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max()))
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found {} response codes'.format(len(responseCodeToCountList))
print 'Response Code Counts: {}'.format(responseCodeToCountList)
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '{}'.format(value)
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: {}'.format(hostsPick20)
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: {}'.format(topEndpoints)
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20290)], 'incorrect Top Ten Endpoints'
not200 = access_logs.filter(lambda x: x.response_code != 200)
endpointCountPairTuple = not200.map(lambda x: (x.endpoint,1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a,b: a+b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x: -x[1])
print 'Top Ten failed URLs: {}'.format(topTenErrURLs)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
hosts = access_logs.map(lambda x: x.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: {}'.format(uniqueHostCount)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
dayToHostPairTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dayGroupedHosts = dayToHostPairTuple.groupByKey()
dayHostCount = dayGroupedHosts.map(lambda x: (x[0],set(x[1])))
dailyHosts = (dayHostCount.map(lambda x: (x[0],len(x[1]))))
dailyHostsList = dailyHosts.takeOrdered(30)
print 'Unique hosts per day: {}'.format(dailyHostsList)
dailyHosts.cache()
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
daysWithHosts = sorted(dailyHosts.map(lambda x: x[0]).collect())
hosts = [x[1] for x in sorted(dailyHosts.collect())]
print '{}'.format(hosts)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts, [2582, 3222, 4190, 2502, 2537, 4106, 4406, 4317, 4523, 4346, 2864, 2650, 4454, 4214, 4340, 4385, 4168, 2550, 2560, 4134, 4456], 'incorrect hosts')
fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
dayAndHostTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dailyHostsList
dayReqs = access_logs.map(lambda x: (x.date_time.day,1)).reduceByKey(lambda a,b: a+b)
summed = dayReqs.reduceByKey(lambda a,b: a+b)
joined = dayReqs.join(dailyHosts)
avgDailyReqPerHost = joined.map(lambda x: (x[0],x[1][0]/x[1][1]))
avgDailyReqPerHostList = avgDailyReqPerHost.takeOrdered(30)
print 'Average number of daily requests per Hosts is {}'.format(avgDailyReqPerHostList)
avgDailyReqPerHost.cache()
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached, 'incorrect avgDailyReqPerHost.is_cache')
daysWithAvg = sorted(avgDailyReqPerHost.map(lambda x: x[0]).collect())
avgs = [x[1] for x in avgDailyReqPerHost.takeOrdered(30, key=lambda x: x[0])]
print '{}'.format(avgs)
Test.assertEquals(daysWithAvg, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect days')
Test.assertEquals(avgs, [13, 12, 14, 12, 12, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 12], 'incorrect avgs')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
badRecords = (access_logs.filter(lambda x: x.response_code == 404))
print 'Found {} 404 URLs'.format(badRecords.count())
badRecords.cache()
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
badEndpoints = badRecords.map(lambda x: x.endpoint)
badUniqueEndpoints = badEndpoints.distinct()
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: {}'.format(badUniqueEndpointsPick40)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40, 'badUniqueEndpointsPick40 not distinct')
badEndpointsCountPairTuple = badRecords.map(lambda x: (x.endpoint,1))
badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda a,b: a+b)
print 'Top twenty {}'.format(badEndpointsSum)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, key= lambda x: -x[1])
print 'Top Twenty 404 URLs: {}'.format(badEndpointsTop20)
Test.assertEquals(badEndpointsTop20, [(u'/pub/winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif', 44), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
errHostsCountPairTuple = badRecords.map(lambda x: (x.host,1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a,b: a+b)
errHostsTop25 = errHostsSum.takeOrdered(25, key= lambda x: -x[1])
print 'Top 25 hosts that generated errors: {}'.format(errHostsTop25)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
errDateCountPairTuple = badRecords.map(lambda x: (x.date_time.day,1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a,b: a+b)
#print '{}'.format(errDateSum.take(10))
errDateSorted = (errDateSum)
#print errDateSorted
errByDate = errDateSorted.takeOrdered(30)
print '404 Errors by day: {}'.format(errByDate)
errDateSorted.cache()
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
daysWithErrors404 = errDateSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByDay = [x[1] for x in errDateSorted.takeOrdered(30, key= lambda x: x[0])]
print errors404ByDay
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
topErrDate = errDateSorted.takeOrdered(5, key= lambda x: -x[1])
print 'Top Five dates for 404 requests: {}'.format(topErrDate)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
hourCountPairTuple = badRecords.map(lambda x: (x.date_time.hour,1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a,b: a+b)
hourRecordsSorted = hourRecordsSum
errHourList = hourRecordsSorted.takeOrdered(30)
print 'Top hours for 404 requests: {}'.format(errHourList)
hourRecordsSorted.cache()
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList') | errors404ByHours = [x[1] for x in hourRecordsSorted.takeOrdered(30, key = lambda x: -x[0])]
Test.assertEquals(hoursWithErrors404, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours, [175, 171, 422, 272, 102, 95, 93, 122, 199, 185, 329, 263, 438, 397, 318, 347, 373, 330, 268, 269, 270, 241, 234, 272], 'incorrect errors404ByHours')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours) | Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
hoursWithErrors404 = hourRecordsSorted.map(lambda x: x[0]).takeOrdered(30) | random_line_split |
lab3.py | import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
""" Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
"""
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
return (logline, 0)
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S*) (\S* *)" (\d{3}) (\S+)'
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def | ():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: {}'.format(failed_logs.count())
for line in failed_logs.take(20):
print 'Invalid logline: {}'.format(line)
print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:\/]+\s[+\-]\d{4})\] "(\S+) (\S*)( *\S+ *)*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: {}'.format((
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max()))
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found {} response codes'.format(len(responseCodeToCountList))
print 'Response Code Counts: {}'.format(responseCodeToCountList)
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '{}'.format(value)
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: {}'.format(hostsPick20)
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: {}'.format(topEndpoints)
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20290)], 'incorrect Top Ten Endpoints'
not200 = access_logs.filter(lambda x: x.response_code != 200)
endpointCountPairTuple = not200.map(lambda x: (x.endpoint,1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a,b: a+b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x: -x[1])
print 'Top Ten failed URLs: {}'.format(topTenErrURLs)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
hosts = access_logs.map(lambda x: x.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: {}'.format(uniqueHostCount)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
dayToHostPairTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dayGroupedHosts = dayToHostPairTuple.groupByKey()
dayHostCount = dayGroupedHosts.map(lambda x: (x[0],set(x[1])))
dailyHosts = (dayHostCount.map(lambda x: (x[0],len(x[1]))))
dailyHostsList = dailyHosts.takeOrdered(30)
print 'Unique hosts per day: {}'.format(dailyHostsList)
dailyHosts.cache()
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
daysWithHosts = sorted(dailyHosts.map(lambda x: x[0]).collect())
hosts = [x[1] for x in sorted(dailyHosts.collect())]
print '{}'.format(hosts)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts, [2582, 3222, 4190, 2502, 2537, 4106, 4406, 4317, 4523, 4346, 2864, 2650, 4454, 4214, 4340, 4385, 4168, 2550, 2560, 4134, 4456], 'incorrect hosts')
fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
dayAndHostTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dailyHostsList
dayReqs = access_logs.map(lambda x: (x.date_time.day,1)).reduceByKey(lambda a,b: a+b)
summed = dayReqs.reduceByKey(lambda a,b: a+b)
joined = dayReqs.join(dailyHosts)
avgDailyReqPerHost = joined.map(lambda x: (x[0],x[1][0]/x[1][1]))
avgDailyReqPerHostList = avgDailyReqPerHost.takeOrdered(30)
print 'Average number of daily requests per Hosts is {}'.format(avgDailyReqPerHostList)
avgDailyReqPerHost.cache()
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached, 'incorrect avgDailyReqPerHost.is_cache')
daysWithAvg = sorted(avgDailyReqPerHost.map(lambda x: x[0]).collect())
avgs = [x[1] for x in avgDailyReqPerHost.takeOrdered(30, key=lambda x: x[0])]
print '{}'.format(avgs)
Test.assertEquals(daysWithAvg, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect days')
Test.assertEquals(avgs, [13, 12, 14, 12, 12, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 12], 'incorrect avgs')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
badRecords = (access_logs.filter(lambda x: x.response_code == 404))
print 'Found {} 404 URLs'.format(badRecords.count())
badRecords.cache()
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
badEndpoints = badRecords.map(lambda x: x.endpoint)
badUniqueEndpoints = badEndpoints.distinct()
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: {}'.format(badUniqueEndpointsPick40)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40, 'badUniqueEndpointsPick40 not distinct')
badEndpointsCountPairTuple = badRecords.map(lambda x: (x.endpoint,1))
badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda a,b: a+b)
print 'Top twenty {}'.format(badEndpointsSum)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, key= lambda x: -x[1])
print 'Top Twenty 404 URLs: {}'.format(badEndpointsTop20)
Test.assertEquals(badEndpointsTop20, [(u'/pub/winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif', 44), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
errHostsCountPairTuple = badRecords.map(lambda x: (x.host,1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a,b: a+b)
errHostsTop25 = errHostsSum.takeOrdered(25, key= lambda x: -x[1])
print 'Top 25 hosts that generated errors: {}'.format(errHostsTop25)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
errDateCountPairTuple = badRecords.map(lambda x: (x.date_time.day,1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a,b: a+b)
#print '{}'.format(errDateSum.take(10))
errDateSorted = (errDateSum)
#print errDateSorted
errByDate = errDateSorted.takeOrdered(30)
print '404 Errors by day: {}'.format(errByDate)
errDateSorted.cache()
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
daysWithErrors404 = errDateSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByDay = [x[1] for x in errDateSorted.takeOrdered(30, key= lambda x: x[0])]
print errors404ByDay
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
topErrDate = errDateSorted.takeOrdered(5, key= lambda x: -x[1])
print 'Top Five dates for 404 requests: {}'.format(topErrDate)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
hourCountPairTuple = badRecords.map(lambda x: (x.date_time.hour,1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a,b: a+b)
hourRecordsSorted = hourRecordsSum
errHourList = hourRecordsSorted.takeOrdered(30)
print 'Top hours for 404 requests: {}'.format(errHourList)
hourRecordsSorted.cache()
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList')
Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
hoursWithErrors404 = hourRecordsSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByHours = [x[1] for x in hourRecordsSorted.takeOrdered(30, key = lambda x: -x[0])]
Test.assertEquals(hoursWithErrors404, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours, [175, 171, 422, 272, 102, 95, 93, 122, 199, 185, 329, 263, 438, 397, 318, 347, 373, 330, 268, 269, 270, 241, 234, 272], 'incorrect errors404ByHours')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
| parseLogs | identifier_name |
lib.rs | use std::ops::{Deref, DerefMut};
use std::any::Any;
use std::fmt::Debug;
use std::fmt;
pub use cod_node_derive::Node;
mod id;
mod context;
mod danger_zone;
#[cfg(test)]
mod test;
pub use id::ID;
use id::new_id;
use context::{CONTEXT, Context, PollReason, Replacement, IDMapUpdate};
use danger_zone::downcast_rc;
/// Can be changed to Arc later. However, the design is not thread-aware
/// when mutating. So appropriate !Send/!Syncs need to be defined before changing.
pub use std::rc::Rc as Rc;
pub use std::rc::Weak as Weak;
pub use im_rc as im;
#[derive(Clone, Debug)]
pub struct Header {
id: ID,
parent_id: Option<ID>,
}
impl Header {
pub fn new() -> Self {
Header {
id: new_id(),
parent_id: None
}
}
}
impl Default for Header {
fn default() -> Self {
Self::new()
}
}
pub trait Node: 'static {
fn header(&self) -> &Header;
fn header_mut(&mut self) -> &mut Header;
/// Optional: You may implement this method for your struct if it does something special.
///
/// For example, you would want to do this if `.clone()` does not actually clone
/// all the `Child` instances in the struct.
///
/// Cod will use this when updating the ancestors of a node that was mutated.
///
/// The implementaion should find the `Child` instance which corresponds to the
/// given ID, and call `.poll_mut()` on it. You should not do anything else
/// with the `Child`s, doing so will **`panic!`**.
///
/// If you do implement this method, also make sure to implement `implements_poll_child`
/// such that it returns true if you want it to be used on `self` specifically.
fn poll_child_mut(&mut self, _id: ID) { }
fn implements_poll_child(&self) -> bool { false }
/// Optional: You may implement this method for your struct if it does something special.
/// This includes:
///
/// - `.clone()` does not actually clone all the `Child` instances in the struct.
/// (also implement `poll_child` in this case)
/// - The struct contains a lot of fields which are expensive to copy and drop.
/// - The struct does not safely fit on the stack. (TODO: there are likely other issues with this)
///
/// Cod will use this when removing nodes from the tree, to find the children of this
/// node. If the implementation is not specialized, Cod will instead clone and then
/// immediately drop the struct to determine the children.
///
/// The implementation should call `.poll()` on each `Child` instance it contains
/// (not recursively!).
///
/// If you do implement this method, also make sure to implement `implements_poll_all`
/// such that it returns true if you want it to be used on `self` specifically.
/// In addition, you should implement `poll_all_mut`.
fn poll_all(&self) { }
/// Optional: See [`poll_all`]. This is the mutable version. The implementation should
/// call `.poll_mut()` on all `Child` instances associated with this node.
fn poll_all_mut(&mut self) { }
fn implements_poll_all(&self) -> bool { false }
}
/// This is a wrapper trait for `Node` which enables cloning through dynamic dispatch and RTTI.
/// It will be automatically implemented for any struct that is `Node + Clone`.
pub trait NodeClone: Node + Any {
fn dyn_clone(&self) -> Rc<dyn NodeClone>;
/// clone, then immediately drop. used for reflection
fn cod(&self);
}
impl<T: Node + Clone> NodeClone for T {
fn dyn_clone(&self) -> Rc<dyn NodeClone> |
fn cod(&self) {
let _ = self.clone();
}
}
pub struct Child<T: NodeClone> {
inner_ref: Rc<T>,
}
pub struct ParentID(ID);
impl From<ID> for ParentID {
fn from(id: ID) -> Self { ParentID(id) }
}
impl From<&Header> for ParentID {
fn from(header: &Header) -> Self { ParentID(header.id) }
}
impl<P: Node> From<&P> for ParentID {
fn from(parent: &P) -> Self { ParentID(parent.header().id) }
}
impl<T: NodeClone + Clone> Child<T> {
pub fn with_parent(parent: impl Into<ParentID>, node: T) -> Self {
Self::with_parent_id(parent.into().0, node)
}
fn with_parent_id(parent_id: ID, mut node: T) -> Self {
node.header_mut().parent_id = Some(parent_id);
let rc = Rc::new(node);
let child = Self {
inner_ref: rc.clone()
};
CONTEXT.with(|c| {
Context::poll(c, PollReason::Construct, rc);
});
child
}
/// TODO. avoid new clone if child has already been accessed during this mutation session.
pub fn make_mut(&mut self) -> MakeMutRef<'_, T> {
CONTEXT.with(|c| {
if Context::mutation_session_active(c) {
// let the context handle cloning (special stuff needs to happen)
if let Some(new_ref) =
Context::poll_mut(c, PollReason::MakeMutPre, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
} else {
Rc::make_mut(&mut self.inner_ref);
}
});
MakeMutRef {
child: self
}
}
pub fn get_ref(&self) -> Rc<T> {
Rc::clone(&self.inner_ref)
}
pub fn get_id(&self) -> ID {
self.inner_ref.header().id
}
pub fn set_parent(&mut self, parent: impl Into<ParentID>) {
self.make_mut().header_mut().parent_id = Some(parent.into().0);
}
/// Deep clone and set new parent. If you do not need to change the parent,
/// you may also use `.clone()` directly.
pub fn deep_clone_to_parent(&self, parent: impl Into<ParentID>) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::DeepCopy(parent.into().0), Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
pub fn poll(&self) {
CONTEXT.with(|c| {
Context::poll(c, PollReason::Manual, Rc::clone(&self.inner_ref));
});
}
pub fn poll_mut(&mut self) {
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::ManualMut, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
});
}
}
impl<T: NodeClone> Deref for Child<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner_ref
}
}
impl<T: NodeClone> Clone for Child<T> {
// TODO: for user-facing cloning, there should (instead) be a separate deep_clone
// method that takes a new parent. similarly, there shold be a helper
// for moving Childs to a different parent.
fn clone(&self) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::Clone, Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
}
impl<T: NodeClone> Drop for Child<T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::Drop, Rc::clone(&self.inner_ref));
});
}
}
pub struct MakeMutRef<'a, T: NodeClone> {
child: &'a mut Child<T>
}
impl<'a, T: NodeClone> Deref for MakeMutRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.child.inner_ref
}
}
impl<'a, T: NodeClone> DerefMut for MakeMutRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the Child is mutably borrowed and
// the Rc was made unique upon creation of self
Rc::get_mut(&mut self.child.inner_ref).unwrap()
}
}
impl<'a, T: NodeClone> Drop for MakeMutRef<'a, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::MakeMutPost, Rc::clone(&self.child.inner_ref));
});
}
}
impl<T: NodeClone + Debug> Debug for Child<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(self.inner_ref), f)
}
}
/// One state of the application.
/// States can be cloned freely and cloning is persistent, so it is very cheap.
///
/// R is the type of the root node.
#[derive(Clone)]
pub struct State<R: NodeClone + Clone> {
root: Rc<R>,
id_lookup: im::HashMap<ID, Weak<dyn NodeClone>>,
}
impl<R: NodeClone + Clone> State<R> {
/// Calls a closure that constructs the tree. No existing nodes can be moved in,
/// they all have to be created during the execution of this closure and on the same
/// thread.
pub fn construct<F: FnOnce() -> R>(construct: F) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
let root = Rc::new(construct());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
/// Due to implementation details, this has to clone the root and all its
/// children.
pub fn new(root: &R) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
// this initiates a deep clone because mutation context is active
let root = Rc::new(root.clone());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
pub fn get_mut<'a, T: NodeClone + Clone>(&'a mut self, mut node: Rc<T>) -> MutRef<'a, R, T> {
Rc::make_mut(&mut node);
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
MutRef {
state: self,
node
}
}
pub fn ref_from_id(&self, id: ID) -> Option<Rc<dyn NodeClone>> {
Weak::upgrade(self.id_lookup.get(&id)?)
}
pub fn root(&self) -> &R {
&self.root
}
pub fn root_ref(&self) -> Rc<R> {
Rc::clone(&self.root)
}
fn apply_updates(&mut self, updates: impl Iterator<Item=IDMapUpdate>) {
for update in updates {
match update {
IDMapUpdate::Set(id, new_ref) => {
self.id_lookup.insert(id, new_ref);
},
IDMapUpdate::Erase(id) => {
self.id_lookup.remove(&id);
},
}
}
}
}
pub struct MutRef<'a, R: NodeClone + Clone, T: NodeClone> {
state: &'a mut State<R>,
node: Rc<T>,
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Deref for MutRef<'a, R, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.node
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> DerefMut for MutRef<'a, R, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the node Rc is mutably borrowed and
// made unique upon creation of self.
Rc::get_mut(&mut self.node).unwrap()
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Drop for MutRef<'a, R, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_mutate(c));
});
self.state.id_lookup.insert(self.node.header().id, Rc::downgrade(&self.node) as Weak<dyn NodeClone>);
let mut prev_node = Rc::clone(&self.node) as Rc<dyn NodeClone>;
while let Some(parent_id) = prev_node.header().parent_id {
let parent = Weak::upgrade(self.state.id_lookup.get(&parent_id).unwrap()).unwrap();
CONTEXT.with(|c| {
Context::set_replacement(c,
Replacement { id: prev_node.header().id, replace_with: Rc::clone(&prev_node) as Rc<dyn NodeClone> }
);
});
prev_node = parent.dyn_clone();
CONTEXT.with(|c| {
if !Context::finish_replacement(c) {
panic!("Cod: Could not find associated `Child` while traversing up")
}
});
}
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_replacement(c));
});
self.state.root = downcast_rc(prev_node).unwrap();
}
}
| {
Rc::new(self.clone())
} | identifier_body |
lib.rs | use std::ops::{Deref, DerefMut};
use std::any::Any;
use std::fmt::Debug;
use std::fmt;
pub use cod_node_derive::Node;
mod id;
mod context;
mod danger_zone;
#[cfg(test)]
mod test;
pub use id::ID;
use id::new_id;
use context::{CONTEXT, Context, PollReason, Replacement, IDMapUpdate};
use danger_zone::downcast_rc;
/// Can be changed to Arc later. However, the design is not thread-aware
/// when mutating. So appropriate !Send/!Syncs need to be defined before changing.
pub use std::rc::Rc as Rc;
pub use std::rc::Weak as Weak;
pub use im_rc as im;
#[derive(Clone, Debug)]
pub struct Header {
id: ID,
parent_id: Option<ID>,
}
impl Header {
pub fn new() -> Self {
Header {
id: new_id(),
parent_id: None
}
}
}
impl Default for Header {
fn default() -> Self {
Self::new()
}
}
pub trait Node: 'static {
fn header(&self) -> &Header;
fn header_mut(&mut self) -> &mut Header;
/// Optional: You may implement this method for your struct if it does something special.
///
/// For example, you would want to do this if `.clone()` does not actually clone
/// all the `Child` instances in the struct.
///
/// Cod will use this when updating the ancestors of a node that was mutated.
///
/// The implementaion should find the `Child` instance which corresponds to the
/// given ID, and call `.poll_mut()` on it. You should not do anything else
/// with the `Child`s, doing so will **`panic!`**.
///
/// If you do implement this method, also make sure to implement `implements_poll_child`
/// such that it returns true if you want it to be used on `self` specifically.
fn poll_child_mut(&mut self, _id: ID) { }
fn implements_poll_child(&self) -> bool { false }
/// Optional: You may implement this method for your struct if it does something special.
/// This includes:
///
/// - `.clone()` does not actually clone all the `Child` instances in the struct.
/// (also implement `poll_child` in this case)
/// - The struct contains a lot of fields which are expensive to copy and drop.
/// - The struct does not safely fit on the stack. (TODO: there are likely other issues with this)
///
/// Cod will use this when removing nodes from the tree, to find the children of this
/// node. If the implementation is not specialized, Cod will instead clone and then
/// immediately drop the struct to determine the children.
///
/// The implementation should call `.poll()` on each `Child` instance it contains
/// (not recursively!).
///
/// If you do implement this method, also make sure to implement `implements_poll_all`
/// such that it returns true if you want it to be used on `self` specifically.
/// In addition, you should implement `poll_all_mut`.
fn poll_all(&self) { }
/// Optional: See [`poll_all`]. This is the mutable version. The implementation should
/// call `.poll_mut()` on all `Child` instances associated with this node.
fn poll_all_mut(&mut self) { }
fn implements_poll_all(&self) -> bool { false }
}
/// This is a wrapper trait for `Node` which enables cloning through dynamic dispatch and RTTI.
/// It will be automatically implemented for any struct that is `Node + Clone`.
pub trait NodeClone: Node + Any {
fn dyn_clone(&self) -> Rc<dyn NodeClone>;
/// clone, then immediately drop. used for reflection
fn cod(&self);
}
impl<T: Node + Clone> NodeClone for T {
fn dyn_clone(&self) -> Rc<dyn NodeClone> {
Rc::new(self.clone())
}
fn cod(&self) {
let _ = self.clone();
}
}
pub struct Child<T: NodeClone> {
inner_ref: Rc<T>,
}
pub struct ParentID(ID);
impl From<ID> for ParentID {
fn from(id: ID) -> Self { ParentID(id) }
}
impl From<&Header> for ParentID {
fn from(header: &Header) -> Self { ParentID(header.id) }
}
impl<P: Node> From<&P> for ParentID {
fn from(parent: &P) -> Self { ParentID(parent.header().id) }
}
impl<T: NodeClone + Clone> Child<T> {
pub fn with_parent(parent: impl Into<ParentID>, node: T) -> Self {
Self::with_parent_id(parent.into().0, node)
}
fn with_parent_id(parent_id: ID, mut node: T) -> Self {
node.header_mut().parent_id = Some(parent_id);
let rc = Rc::new(node);
let child = Self {
inner_ref: rc.clone()
};
CONTEXT.with(|c| {
Context::poll(c, PollReason::Construct, rc);
});
child
}
/// TODO. avoid new clone if child has already been accessed during this mutation session.
pub fn make_mut(&mut self) -> MakeMutRef<'_, T> {
CONTEXT.with(|c| {
if Context::mutation_session_active(c) {
// let the context handle cloning (special stuff needs to happen)
if let Some(new_ref) =
Context::poll_mut(c, PollReason::MakeMutPre, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
} else {
Rc::make_mut(&mut self.inner_ref);
}
});
MakeMutRef {
child: self
}
}
pub fn | (&self) -> Rc<T> {
Rc::clone(&self.inner_ref)
}
pub fn get_id(&self) -> ID {
self.inner_ref.header().id
}
pub fn set_parent(&mut self, parent: impl Into<ParentID>) {
self.make_mut().header_mut().parent_id = Some(parent.into().0);
}
/// Deep clone and set new parent. If you do not need to change the parent,
/// you may also use `.clone()` directly.
pub fn deep_clone_to_parent(&self, parent: impl Into<ParentID>) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::DeepCopy(parent.into().0), Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
pub fn poll(&self) {
CONTEXT.with(|c| {
Context::poll(c, PollReason::Manual, Rc::clone(&self.inner_ref));
});
}
pub fn poll_mut(&mut self) {
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::ManualMut, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
});
}
}
impl<T: NodeClone> Deref for Child<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner_ref
}
}
impl<T: NodeClone> Clone for Child<T> {
// TODO: for user-facing cloning, there should (instead) be a separate deep_clone
// method that takes a new parent. similarly, there shold be a helper
// for moving Childs to a different parent.
fn clone(&self) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::Clone, Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
}
impl<T: NodeClone> Drop for Child<T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::Drop, Rc::clone(&self.inner_ref));
});
}
}
pub struct MakeMutRef<'a, T: NodeClone> {
child: &'a mut Child<T>
}
impl<'a, T: NodeClone> Deref for MakeMutRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.child.inner_ref
}
}
impl<'a, T: NodeClone> DerefMut for MakeMutRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the Child is mutably borrowed and
// the Rc was made unique upon creation of self
Rc::get_mut(&mut self.child.inner_ref).unwrap()
}
}
impl<'a, T: NodeClone> Drop for MakeMutRef<'a, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::MakeMutPost, Rc::clone(&self.child.inner_ref));
});
}
}
impl<T: NodeClone + Debug> Debug for Child<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(self.inner_ref), f)
}
}
/// One state of the application.
/// States can be cloned freely and cloning is persistent, so it is very cheap.
///
/// R is the type of the root node.
#[derive(Clone)]
pub struct State<R: NodeClone + Clone> {
root: Rc<R>,
id_lookup: im::HashMap<ID, Weak<dyn NodeClone>>,
}
impl<R: NodeClone + Clone> State<R> {
/// Calls a closure that constructs the tree. No existing nodes can be moved in,
/// they all have to be created during the execution of this closure and on the same
/// thread.
pub fn construct<F: FnOnce() -> R>(construct: F) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
let root = Rc::new(construct());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
/// Due to implementation details, this has to clone the root and all its
/// children.
pub fn new(root: &R) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
// this initiates a deep clone because mutation context is active
let root = Rc::new(root.clone());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
pub fn get_mut<'a, T: NodeClone + Clone>(&'a mut self, mut node: Rc<T>) -> MutRef<'a, R, T> {
Rc::make_mut(&mut node);
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
MutRef {
state: self,
node
}
}
pub fn ref_from_id(&self, id: ID) -> Option<Rc<dyn NodeClone>> {
Weak::upgrade(self.id_lookup.get(&id)?)
}
pub fn root(&self) -> &R {
&self.root
}
pub fn root_ref(&self) -> Rc<R> {
Rc::clone(&self.root)
}
fn apply_updates(&mut self, updates: impl Iterator<Item=IDMapUpdate>) {
for update in updates {
match update {
IDMapUpdate::Set(id, new_ref) => {
self.id_lookup.insert(id, new_ref);
},
IDMapUpdate::Erase(id) => {
self.id_lookup.remove(&id);
},
}
}
}
}
pub struct MutRef<'a, R: NodeClone + Clone, T: NodeClone> {
state: &'a mut State<R>,
node: Rc<T>,
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Deref for MutRef<'a, R, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.node
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> DerefMut for MutRef<'a, R, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the node Rc is mutably borrowed and
// made unique upon creation of self.
Rc::get_mut(&mut self.node).unwrap()
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Drop for MutRef<'a, R, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_mutate(c));
});
self.state.id_lookup.insert(self.node.header().id, Rc::downgrade(&self.node) as Weak<dyn NodeClone>);
let mut prev_node = Rc::clone(&self.node) as Rc<dyn NodeClone>;
while let Some(parent_id) = prev_node.header().parent_id {
let parent = Weak::upgrade(self.state.id_lookup.get(&parent_id).unwrap()).unwrap();
CONTEXT.with(|c| {
Context::set_replacement(c,
Replacement { id: prev_node.header().id, replace_with: Rc::clone(&prev_node) as Rc<dyn NodeClone> }
);
});
prev_node = parent.dyn_clone();
CONTEXT.with(|c| {
if !Context::finish_replacement(c) {
panic!("Cod: Could not find associated `Child` while traversing up")
}
});
}
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_replacement(c));
});
self.state.root = downcast_rc(prev_node).unwrap();
}
}
| get_ref | identifier_name |
lib.rs | use std::ops::{Deref, DerefMut};
use std::any::Any;
use std::fmt::Debug;
use std::fmt;
pub use cod_node_derive::Node;
mod id;
mod context;
mod danger_zone;
#[cfg(test)]
mod test;
pub use id::ID;
use id::new_id;
use context::{CONTEXT, Context, PollReason, Replacement, IDMapUpdate};
use danger_zone::downcast_rc;
/// Can be changed to Arc later. However, the design is not thread-aware
/// when mutating. So appropriate !Send/!Syncs need to be defined before changing.
pub use std::rc::Rc as Rc;
pub use std::rc::Weak as Weak;
pub use im_rc as im;
#[derive(Clone, Debug)]
pub struct Header {
id: ID,
parent_id: Option<ID>,
}
impl Header {
pub fn new() -> Self {
Header {
id: new_id(),
parent_id: None
}
}
}
impl Default for Header {
fn default() -> Self {
Self::new()
}
}
pub trait Node: 'static {
fn header(&self) -> &Header;
fn header_mut(&mut self) -> &mut Header;
/// Optional: You may implement this method for your struct if it does something special.
///
/// For example, you would want to do this if `.clone()` does not actually clone
/// all the `Child` instances in the struct.
///
/// Cod will use this when updating the ancestors of a node that was mutated.
///
/// The implementaion should find the `Child` instance which corresponds to the
/// given ID, and call `.poll_mut()` on it. You should not do anything else
/// with the `Child`s, doing so will **`panic!`**.
///
/// If you do implement this method, also make sure to implement `implements_poll_child`
/// such that it returns true if you want it to be used on `self` specifically.
fn poll_child_mut(&mut self, _id: ID) { }
fn implements_poll_child(&self) -> bool { false }
/// Optional: You may implement this method for your struct if it does something special.
/// This includes:
///
/// - `.clone()` does not actually clone all the `Child` instances in the struct.
/// (also implement `poll_child` in this case)
/// - The struct contains a lot of fields which are expensive to copy and drop.
/// - The struct does not safely fit on the stack. (TODO: there are likely other issues with this)
///
/// Cod will use this when removing nodes from the tree, to find the children of this
/// node. If the implementation is not specialized, Cod will instead clone and then
/// immediately drop the struct to determine the children.
///
/// The implementation should call `.poll()` on each `Child` instance it contains
/// (not recursively!).
///
/// If you do implement this method, also make sure to implement `implements_poll_all`
/// such that it returns true if you want it to be used on `self` specifically.
/// In addition, you should implement `poll_all_mut`.
fn poll_all(&self) { }
/// Optional: See [`poll_all`]. This is the mutable version. The implementation should
/// call `.poll_mut()` on all `Child` instances associated with this node.
fn poll_all_mut(&mut self) { }
fn implements_poll_all(&self) -> bool { false }
}
/// This is a wrapper trait for `Node` which enables cloning through dynamic dispatch and RTTI.
/// It will be automatically implemented for any struct that is `Node + Clone`.
pub trait NodeClone: Node + Any {
fn dyn_clone(&self) -> Rc<dyn NodeClone>;
/// clone, then immediately drop. used for reflection
fn cod(&self);
}
impl<T: Node + Clone> NodeClone for T {
fn dyn_clone(&self) -> Rc<dyn NodeClone> {
Rc::new(self.clone())
}
fn cod(&self) {
let _ = self.clone();
}
}
pub struct Child<T: NodeClone> {
inner_ref: Rc<T>,
}
pub struct ParentID(ID);
impl From<ID> for ParentID {
fn from(id: ID) -> Self { ParentID(id) }
}
impl From<&Header> for ParentID {
fn from(header: &Header) -> Self { ParentID(header.id) }
}
impl<P: Node> From<&P> for ParentID {
fn from(parent: &P) -> Self { ParentID(parent.header().id) }
}
impl<T: NodeClone + Clone> Child<T> {
pub fn with_parent(parent: impl Into<ParentID>, node: T) -> Self {
Self::with_parent_id(parent.into().0, node)
}
fn with_parent_id(parent_id: ID, mut node: T) -> Self {
node.header_mut().parent_id = Some(parent_id);
let rc = Rc::new(node);
let child = Self {
inner_ref: rc.clone()
};
CONTEXT.with(|c| {
Context::poll(c, PollReason::Construct, rc);
});
child
}
/// TODO. avoid new clone if child has already been accessed during this mutation session.
pub fn make_mut(&mut self) -> MakeMutRef<'_, T> {
CONTEXT.with(|c| {
if Context::mutation_session_active(c) {
// let the context handle cloning (special stuff needs to happen)
if let Some(new_ref) =
Context::poll_mut(c, PollReason::MakeMutPre, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
} else {
Rc::make_mut(&mut self.inner_ref);
}
});
MakeMutRef {
child: self
}
}
pub fn get_ref(&self) -> Rc<T> {
Rc::clone(&self.inner_ref)
}
pub fn get_id(&self) -> ID {
self.inner_ref.header().id
}
pub fn set_parent(&mut self, parent: impl Into<ParentID>) {
self.make_mut().header_mut().parent_id = Some(parent.into().0);
}
/// Deep clone and set new parent. If you do not need to change the parent,
/// you may also use `.clone()` directly.
pub fn deep_clone_to_parent(&self, parent: impl Into<ParentID>) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::DeepCopy(parent.into().0), Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
pub fn poll(&self) {
CONTEXT.with(|c| {
Context::poll(c, PollReason::Manual, Rc::clone(&self.inner_ref));
});
}
pub fn poll_mut(&mut self) {
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::ManualMut, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
});
}
}
impl<T: NodeClone> Deref for Child<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner_ref
}
}
impl<T: NodeClone> Clone for Child<T> {
// TODO: for user-facing cloning, there should (instead) be a separate deep_clone
// method that takes a new parent. similarly, there shold be a helper
// for moving Childs to a different parent.
fn clone(&self) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::Clone, Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
} | // so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::Drop, Rc::clone(&self.inner_ref));
});
}
}
pub struct MakeMutRef<'a, T: NodeClone> {
child: &'a mut Child<T>
}
impl<'a, T: NodeClone> Deref for MakeMutRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.child.inner_ref
}
}
impl<'a, T: NodeClone> DerefMut for MakeMutRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the Child is mutably borrowed and
// the Rc was made unique upon creation of self
Rc::get_mut(&mut self.child.inner_ref).unwrap()
}
}
impl<'a, T: NodeClone> Drop for MakeMutRef<'a, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::MakeMutPost, Rc::clone(&self.child.inner_ref));
});
}
}
impl<T: NodeClone + Debug> Debug for Child<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(self.inner_ref), f)
}
}
/// One state of the application.
/// States can be cloned freely and cloning is persistent, so it is very cheap.
///
/// R is the type of the root node.
#[derive(Clone)]
pub struct State<R: NodeClone + Clone> {
root: Rc<R>,
id_lookup: im::HashMap<ID, Weak<dyn NodeClone>>,
}
impl<R: NodeClone + Clone> State<R> {
/// Calls a closure that constructs the tree. No existing nodes can be moved in,
/// they all have to be created during the execution of this closure and on the same
/// thread.
pub fn construct<F: FnOnce() -> R>(construct: F) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
let root = Rc::new(construct());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
/// Due to implementation details, this has to clone the root and all its
/// children.
pub fn new(root: &R) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
// this initiates a deep clone because mutation context is active
let root = Rc::new(root.clone());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
pub fn get_mut<'a, T: NodeClone + Clone>(&'a mut self, mut node: Rc<T>) -> MutRef<'a, R, T> {
Rc::make_mut(&mut node);
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
MutRef {
state: self,
node
}
}
pub fn ref_from_id(&self, id: ID) -> Option<Rc<dyn NodeClone>> {
Weak::upgrade(self.id_lookup.get(&id)?)
}
pub fn root(&self) -> &R {
&self.root
}
pub fn root_ref(&self) -> Rc<R> {
Rc::clone(&self.root)
}
fn apply_updates(&mut self, updates: impl Iterator<Item=IDMapUpdate>) {
for update in updates {
match update {
IDMapUpdate::Set(id, new_ref) => {
self.id_lookup.insert(id, new_ref);
},
IDMapUpdate::Erase(id) => {
self.id_lookup.remove(&id);
},
}
}
}
}
pub struct MutRef<'a, R: NodeClone + Clone, T: NodeClone> {
state: &'a mut State<R>,
node: Rc<T>,
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Deref for MutRef<'a, R, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.node
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> DerefMut for MutRef<'a, R, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the node Rc is mutably borrowed and
// made unique upon creation of self.
Rc::get_mut(&mut self.node).unwrap()
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Drop for MutRef<'a, R, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_mutate(c));
});
self.state.id_lookup.insert(self.node.header().id, Rc::downgrade(&self.node) as Weak<dyn NodeClone>);
let mut prev_node = Rc::clone(&self.node) as Rc<dyn NodeClone>;
while let Some(parent_id) = prev_node.header().parent_id {
let parent = Weak::upgrade(self.state.id_lookup.get(&parent_id).unwrap()).unwrap();
CONTEXT.with(|c| {
Context::set_replacement(c,
Replacement { id: prev_node.header().id, replace_with: Rc::clone(&prev_node) as Rc<dyn NodeClone> }
);
});
prev_node = parent.dyn_clone();
CONTEXT.with(|c| {
if !Context::finish_replacement(c) {
panic!("Cod: Could not find associated `Child` while traversing up")
}
});
}
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_replacement(c));
});
self.state.root = downcast_rc(prev_node).unwrap();
}
} | }
impl<T: NodeClone> Drop for Child<T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data, | random_line_split |
bundle.py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import subprocess
import sys
from typing import Dict, Optional, Sequence
import monai.bundle
import torch
from monai.bundle import ConfigParser
from monai.data import partition_dataset
from monai.handlers import CheckpointLoader
from monailabel.config import settings
from monailabel.interfaces.datastore import Datastore
from monailabel.interfaces.tasks.train import TrainTask
from monailabel.utils.others.class_utils import unload_module
from monailabel.utils.others.generic import device_list, name_to_device
logger = logging.getLogger(__name__)
class BundleConstants:
def configs(self) -> Sequence[str]:
return ["train.json", "train.yaml"]
def multi_gpu_configs(self) -> Sequence[str]:
return ["multi_gpu_train.json", "multi_gpu_train.yaml"]
def metadata_json(self) -> str:
return "metadata.json"
def model_pytorch(self) -> str:
return "model.pt"
def key_device(self) -> str:
return "device"
def key_bundle_root(self) -> str:
return "bundle_root"
def key_network(self) -> str:
return "network"
def key_network_def(self) -> str:
return "network_def"
def key_train_trainer_max_epochs(self) -> str:
return "train#trainer#max_epochs"
def key_train_dataset_data(self) -> str:
return "train#dataset#data"
def key_train_handlers(self) -> str:
return "train#handlers"
def key_validate_dataset_data(self) -> str:
return "validate#dataset#data"
def key_tracking(self) -> str:
return "tracking"
def key_tracking_uri(self) -> str:
return "tracking_uri"
def | (self) -> str:
return "experiment_name"
def key_run_name(self) -> str:
return "run_name"
def key_displayable_configs(self) -> Sequence[str]:
return ["displayable_configs"]
class BundleTrainTask(TrainTask):
def __init__(
self,
path: str,
conf: Dict[str, str],
const: Optional[BundleConstants] = None,
enable_tracking=False,
model_dict_key="model",
load_strict=False,
):
self.valid: bool = False
self.conf = conf
self.const = const if const else BundleConstants()
self.enable_tracking = enable_tracking
self.model_dict_key = model_dict_key
self.load_strict = load_strict
config_paths = [c for c in self.const.configs() if os.path.exists(os.path.join(path, "configs", c))]
if not config_paths:
logger.warning(f"Ignore {path} as there is no train config {self.const.configs()} exists")
return
self.bundle_path = path
self.bundle_config_path = os.path.join(path, "configs", config_paths[0])
self.bundle_config = self._load_bundle_config(self.bundle_path, self.bundle_config_path)
# https://docs.monai.io/en/latest/mb_specification.html#metadata-json-file
self.bundle_metadata_path = os.path.join(path, "configs", "metadata.json")
with open(os.path.join(path, "configs", self.const.metadata_json())) as fp:
metadata = json.load(fp)
super().__init__(metadata.get("description", ""))
self.valid = True
self.version = metadata.get("version")
def is_valid(self):
return self.valid
def info(self):
i = super().info()
i["version"] = self.version
return i
def config(self):
# Add models and param optiom to train option panel
pytorch_models = [os.path.basename(p) for p in glob.glob(os.path.join(self.bundle_path, "models", "*.pt"))]
pytorch_models.sort(key=len)
config_options = {
"device": device_list(), # DEVICE
"pretrained": True, # USE EXISTING CHECKPOINT/PRETRAINED MODEL
"max_epochs": 50, # TOTAL EPOCHS TO RUN
"val_split": 0.2, # VALIDATION SPLIT; -1 TO USE DEFAULT FROM BUNDLE
"multi_gpu": True, # USE MULTI-GPU
"gpus": "all", # COMMA SEPARATE DEVICE INDEX
"tracking": ["mlflow", "None"]
if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED
else ["None", "mlflow"],
"tracking_uri": settings.MONAI_LABEL_TRACKING_URI,
"tracking_experiment_name": "",
"run_id": "", # bundle run id, if different from default
"model_filename": pytorch_models,
}
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
config_options.update(self.bundle_config.get_parsed_content(k, instantiate=True)) # type: ignore
return config_options
def _fetch_datalist(self, request, datastore: Datastore):
datalist = datastore.datalist()
# only use image and label attributes; skip for other meta info from datastore for now
datalist = [{"image": d["image"], "label": d["label"]} for d in datalist if d]
if "detection" in request.get("model"):
# Generate datalist for detection task, box and label keys are used by default.
# Future: either use box and label keys for all detection models, or set these keys by config.
for idx, d in enumerate(datalist):
with open(d["label"]) as fp:
json_object = json.loads(fp.read()) # load box coordinates from subject JSON
bboxes = [bdict["center"] + bdict["size"] for bdict in json_object["markups"]]
# Only support detection, classification label do not suppot in bundle yet,
# 0 is used for all positive boxes, wait for sync.
datalist[idx] = {"image": d["image"], "box": bboxes, "label": [0] * len(bboxes)}
return datalist
def _partition_datalist(self, datalist, request, shuffle=False):
val_split = request.get("val_split", 0.2)
logger.info(f"Total Records in Dataset: {len(datalist)}; Validation Split: {val_split}")
if val_split > 0.0:
train_datalist, val_datalist = partition_dataset(
datalist, ratios=[(1 - val_split), val_split], shuffle=shuffle
)
else:
train_datalist = datalist
val_datalist = None if val_split < 0 else []
logger.info(f"Total Records for Training: {len(train_datalist)}")
logger.info(f"Total Records for Validation: {len(val_datalist) if val_datalist else ''}")
return train_datalist, val_datalist
def _load_checkpoint(self, model_pytorch, pretrained, train_handlers):
load_path = model_pytorch if pretrained else None
if os.path.exists(load_path):
logger.info(f"Add Checkpoint Loader for Path: {load_path}")
load_dict = {self.model_dict_key: f"$@{self.const.key_network()}"}
if not [t for t in train_handlers if t.get("_target_") == CheckpointLoader.__name__]:
loader = {
"_target_": CheckpointLoader.__name__,
"load_path": load_path,
"load_dict": load_dict,
"strict": self.load_strict,
}
train_handlers.insert(0, loader)
def __call__(self, request, datastore: Datastore):
logger.info(f"Train Request: {request}")
ds = self._fetch_datalist(request, datastore)
train_ds, val_ds = self._partition_datalist(ds, request)
max_epochs = request.get("max_epochs", 50)
pretrained = request.get("pretrained", True)
multi_gpu = request.get("multi_gpu", True)
force_multi_gpu = request.get("force_multi_gpu", False)
run_id = request.get("run_id", "run")
multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False
gpus = request.get("gpus", "all")
gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")]
multi_gpu = True if force_multi_gpu or multi_gpu and len(gpus) > 1 else False
logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}")
logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
device = name_to_device(request.get("device", "cuda"))
logger.info(f"Using device: {device}; Type: {type(device)}")
tracking = request.get(
"tracking", "mlflow" if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED else ""
)
tracking = tracking[0] if isinstance(tracking, list) else tracking
tracking_uri = request.get("tracking_uri")
tracking_uri = tracking_uri if tracking_uri else settings.MONAI_LABEL_TRACKING_URI
tracking_experiment_name = request.get("tracking_experiment_name")
tracking_experiment_name = tracking_experiment_name if tracking_experiment_name else request.get("model")
tracking_run_name = request.get("tracking_run_name")
logger.info(f"(Experiment Management) Tracking: {tracking}")
logger.info(f"(Experiment Management) Tracking URI: {tracking_uri}")
logger.info(f"(Experiment Management) Experiment Name: {tracking_experiment_name}")
logger.info(f"(Experiment Management) Run Name: {tracking_run_name}")
train_handlers = self.bundle_config.get(self.const.key_train_handlers(), [])
model_filename = request.get("model_filename", "model.pt")
model_filename = model_filename if isinstance(model_filename, str) else model_filename[0]
model_pytorch = os.path.join(self.bundle_path, "models", model_filename)
self._load_checkpoint(model_pytorch, pretrained, train_handlers)
overrides = {
self.const.key_bundle_root(): self.bundle_path,
self.const.key_train_trainer_max_epochs(): max_epochs,
self.const.key_train_dataset_data(): train_ds,
self.const.key_device(): device,
self.const.key_train_handlers(): train_handlers,
}
# update config options from user
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
displayable_configs = self.bundle_config.get_parsed_content(k, instantiate=True)
overrides[k] = {c: request[c] for c in displayable_configs.keys()}
if tracking and tracking.lower() != "none":
overrides[self.const.key_tracking()] = tracking
if tracking_uri:
overrides[self.const.key_tracking_uri()] = tracking_uri
if tracking_experiment_name:
overrides[self.const.key_experiment_name()] = tracking_experiment_name
if tracking_run_name:
overrides[self.const.key_run_name()] = tracking_run_name
# external validation datalist supported through bundle itself (pass -1 in the request to use the same)
if val_ds is not None:
overrides[self.const.key_validate_dataset_data()] = val_ds
# allow derived class to update further overrides
self._update_overrides(overrides)
if multi_gpu:
config_paths = [
c
for c in self.const.multi_gpu_configs()
if os.path.exists(os.path.join(self.bundle_path, "configs", c))
]
if not config_paths:
logger.warning(
f"Ignore Multi-GPU Training; No multi-gpu train config {self.const.multi_gpu_configs()} exists"
)
return
train_path = os.path.join(self.bundle_path, "configs", "monailabel_train.json")
multi_gpu_train_path = os.path.join(self.bundle_path, "configs", config_paths[0])
logging_file = os.path.join(self.bundle_path, "configs", "logging.conf")
for k, v in overrides.items():
self.bundle_config.set(v, k)
ConfigParser.export_config_file(self.bundle_config.config, train_path, indent=2) # type: ignore
sys.path.insert(0, self.bundle_path)
unload_module("scripts")
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = ",".join([str(g) for g in gpus])
logger.info(f"Using CUDA_VISIBLE_DEVICES: {env['CUDA_VISIBLE_DEVICES']}")
cmd = [
"torchrun",
"--standalone",
"--nnodes=1",
f"--nproc_per_node={len(gpus)}",
"-m",
"monai.bundle",
"run",
run_id, # run_id, user can pass the arg
"--meta_file",
self.bundle_metadata_path,
"--config_file",
f"['{train_path}','{multi_gpu_train_path}']",
"--logging_file",
logging_file,
]
if tracking:
cmd.extend(["--tracking", tracking])
if tracking_uri:
cmd.extend(["--tracking_uri", tracking_uri])
self.run_multi_gpu(request, cmd, env)
else:
sys.path.insert(0, self.bundle_path)
unload_module("scripts")
self.run_single_gpu(request, overrides)
sys.path.remove(self.bundle_path)
logger.info("Training Finished....")
return {}
def run_single_gpu(self, request, overrides):
run_id = request.get("run_id", "run")
monai.bundle.run(
run_id=run_id,
init_id=None,
final_id=None,
meta_file=self.bundle_metadata_path,
config_file=self.bundle_config_path,
**overrides,
)
def run_multi_gpu(self, request, cmd, env):
self._run_command(cmd, env)
def _run_command(self, cmd, env):
logger.info(f"RUNNING COMMAND:: {cmd}")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True, env=env)
while process.poll() is None:
line = process.stdout.readline()
line = line.rstrip()
if line:
print(line, flush=True)
logger.info(f"Return code: {process.returncode}")
process.stdout.close()
def _load_bundle_config(self, path, config):
bundle_config = ConfigParser()
bundle_config.read_config(config)
bundle_config.config.update({self.const.key_bundle_root(): path}) # type: ignore
return bundle_config
def _update_overrides(self, overrides):
return overrides
| key_experiment_name | identifier_name |
bundle.py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import subprocess
import sys
from typing import Dict, Optional, Sequence
import monai.bundle
import torch
from monai.bundle import ConfigParser
from monai.data import partition_dataset
from monai.handlers import CheckpointLoader
from monailabel.config import settings
from monailabel.interfaces.datastore import Datastore
from monailabel.interfaces.tasks.train import TrainTask
from monailabel.utils.others.class_utils import unload_module
from monailabel.utils.others.generic import device_list, name_to_device
logger = logging.getLogger(__name__)
class BundleConstants:
def configs(self) -> Sequence[str]:
return ["train.json", "train.yaml"]
def multi_gpu_configs(self) -> Sequence[str]:
return ["multi_gpu_train.json", "multi_gpu_train.yaml"]
def metadata_json(self) -> str:
return "metadata.json"
def model_pytorch(self) -> str:
return "model.pt"
def key_device(self) -> str:
return "device"
def key_bundle_root(self) -> str:
return "bundle_root"
def key_network(self) -> str:
return "network"
def key_network_def(self) -> str:
return "network_def"
def key_train_trainer_max_epochs(self) -> str:
return "train#trainer#max_epochs"
def key_train_dataset_data(self) -> str:
return "train#dataset#data"
def key_train_handlers(self) -> str:
return "train#handlers"
def key_validate_dataset_data(self) -> str:
return "validate#dataset#data"
def key_tracking(self) -> str:
return "tracking"
def key_tracking_uri(self) -> str:
return "tracking_uri"
def key_experiment_name(self) -> str:
return "experiment_name"
def key_run_name(self) -> str:
return "run_name"
def key_displayable_configs(self) -> Sequence[str]:
return ["displayable_configs"]
class BundleTrainTask(TrainTask):
def __init__(
self,
path: str,
conf: Dict[str, str],
const: Optional[BundleConstants] = None,
enable_tracking=False,
model_dict_key="model",
load_strict=False,
):
self.valid: bool = False
self.conf = conf
self.const = const if const else BundleConstants()
self.enable_tracking = enable_tracking
self.model_dict_key = model_dict_key
self.load_strict = load_strict
config_paths = [c for c in self.const.configs() if os.path.exists(os.path.join(path, "configs", c))]
if not config_paths:
logger.warning(f"Ignore {path} as there is no train config {self.const.configs()} exists")
return
self.bundle_path = path
self.bundle_config_path = os.path.join(path, "configs", config_paths[0])
self.bundle_config = self._load_bundle_config(self.bundle_path, self.bundle_config_path)
# https://docs.monai.io/en/latest/mb_specification.html#metadata-json-file
self.bundle_metadata_path = os.path.join(path, "configs", "metadata.json")
with open(os.path.join(path, "configs", self.const.metadata_json())) as fp:
metadata = json.load(fp)
super().__init__(metadata.get("description", ""))
self.valid = True
self.version = metadata.get("version")
def is_valid(self):
return self.valid
def info(self):
i = super().info()
i["version"] = self.version
return i
def config(self):
# Add models and param optiom to train option panel
pytorch_models = [os.path.basename(p) for p in glob.glob(os.path.join(self.bundle_path, "models", "*.pt"))]
pytorch_models.sort(key=len)
config_options = {
"device": device_list(), # DEVICE
"pretrained": True, # USE EXISTING CHECKPOINT/PRETRAINED MODEL
"max_epochs": 50, # TOTAL EPOCHS TO RUN
"val_split": 0.2, # VALIDATION SPLIT; -1 TO USE DEFAULT FROM BUNDLE
"multi_gpu": True, # USE MULTI-GPU
"gpus": "all", # COMMA SEPARATE DEVICE INDEX
"tracking": ["mlflow", "None"]
if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED
else ["None", "mlflow"],
"tracking_uri": settings.MONAI_LABEL_TRACKING_URI,
"tracking_experiment_name": "",
"run_id": "", # bundle run id, if different from default
"model_filename": pytorch_models,
}
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
config_options.update(self.bundle_config.get_parsed_content(k, instantiate=True)) # type: ignore
return config_options
def _fetch_datalist(self, request, datastore: Datastore):
datalist = datastore.datalist()
# only use image and label attributes; skip for other meta info from datastore for now
datalist = [{"image": d["image"], "label": d["label"]} for d in datalist if d]
if "detection" in request.get("model"):
# Generate datalist for detection task, box and label keys are used by default.
# Future: either use box and label keys for all detection models, or set these keys by config.
for idx, d in enumerate(datalist):
with open(d["label"]) as fp:
json_object = json.loads(fp.read()) # load box coordinates from subject JSON
bboxes = [bdict["center"] + bdict["size"] for bdict in json_object["markups"]]
# Only support detection, classification label do not suppot in bundle yet,
# 0 is used for all positive boxes, wait for sync.
datalist[idx] = {"image": d["image"], "box": bboxes, "label": [0] * len(bboxes)}
return datalist
def _partition_datalist(self, datalist, request, shuffle=False):
val_split = request.get("val_split", 0.2)
logger.info(f"Total Records in Dataset: {len(datalist)}; Validation Split: {val_split}")
if val_split > 0.0:
train_datalist, val_datalist = partition_dataset(
datalist, ratios=[(1 - val_split), val_split], shuffle=shuffle
)
else:
train_datalist = datalist
val_datalist = None if val_split < 0 else []
logger.info(f"Total Records for Training: {len(train_datalist)}")
logger.info(f"Total Records for Validation: {len(val_datalist) if val_datalist else ''}")
return train_datalist, val_datalist
def _load_checkpoint(self, model_pytorch, pretrained, train_handlers):
load_path = model_pytorch if pretrained else None
if os.path.exists(load_path):
logger.info(f"Add Checkpoint Loader for Path: {load_path}")
load_dict = {self.model_dict_key: f"$@{self.const.key_network()}"}
if not [t for t in train_handlers if t.get("_target_") == CheckpointLoader.__name__]:
loader = {
"_target_": CheckpointLoader.__name__,
"load_path": load_path,
"load_dict": load_dict,
"strict": self.load_strict,
}
train_handlers.insert(0, loader)
def __call__(self, request, datastore: Datastore):
logger.info(f"Train Request: {request}")
ds = self._fetch_datalist(request, datastore)
train_ds, val_ds = self._partition_datalist(ds, request)
max_epochs = request.get("max_epochs", 50)
pretrained = request.get("pretrained", True)
multi_gpu = request.get("multi_gpu", True)
force_multi_gpu = request.get("force_multi_gpu", False)
run_id = request.get("run_id", "run")
multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False
gpus = request.get("gpus", "all")
gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")]
multi_gpu = True if force_multi_gpu or multi_gpu and len(gpus) > 1 else False
logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}")
logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
device = name_to_device(request.get("device", "cuda"))
logger.info(f"Using device: {device}; Type: {type(device)}")
tracking = request.get(
"tracking", "mlflow" if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED else ""
)
tracking = tracking[0] if isinstance(tracking, list) else tracking
tracking_uri = request.get("tracking_uri")
tracking_uri = tracking_uri if tracking_uri else settings.MONAI_LABEL_TRACKING_URI
tracking_experiment_name = request.get("tracking_experiment_name")
tracking_experiment_name = tracking_experiment_name if tracking_experiment_name else request.get("model")
tracking_run_name = request.get("tracking_run_name")
logger.info(f"(Experiment Management) Tracking: {tracking}")
logger.info(f"(Experiment Management) Tracking URI: {tracking_uri}")
logger.info(f"(Experiment Management) Experiment Name: {tracking_experiment_name}")
logger.info(f"(Experiment Management) Run Name: {tracking_run_name}")
train_handlers = self.bundle_config.get(self.const.key_train_handlers(), [])
model_filename = request.get("model_filename", "model.pt")
model_filename = model_filename if isinstance(model_filename, str) else model_filename[0]
model_pytorch = os.path.join(self.bundle_path, "models", model_filename)
self._load_checkpoint(model_pytorch, pretrained, train_handlers)
overrides = {
self.const.key_bundle_root(): self.bundle_path,
self.const.key_train_trainer_max_epochs(): max_epochs,
self.const.key_train_dataset_data(): train_ds,
self.const.key_device(): device,
self.const.key_train_handlers(): train_handlers,
}
# update config options from user
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
displayable_configs = self.bundle_config.get_parsed_content(k, instantiate=True)
overrides[k] = {c: request[c] for c in displayable_configs.keys()}
if tracking and tracking.lower() != "none":
overrides[self.const.key_tracking()] = tracking
if tracking_uri:
overrides[self.const.key_tracking_uri()] = tracking_uri
if tracking_experiment_name:
|
if tracking_run_name:
overrides[self.const.key_run_name()] = tracking_run_name
# external validation datalist supported through bundle itself (pass -1 in the request to use the same)
if val_ds is not None:
overrides[self.const.key_validate_dataset_data()] = val_ds
# allow derived class to update further overrides
self._update_overrides(overrides)
if multi_gpu:
config_paths = [
c
for c in self.const.multi_gpu_configs()
if os.path.exists(os.path.join(self.bundle_path, "configs", c))
]
if not config_paths:
logger.warning(
f"Ignore Multi-GPU Training; No multi-gpu train config {self.const.multi_gpu_configs()} exists"
)
return
train_path = os.path.join(self.bundle_path, "configs", "monailabel_train.json")
multi_gpu_train_path = os.path.join(self.bundle_path, "configs", config_paths[0])
logging_file = os.path.join(self.bundle_path, "configs", "logging.conf")
for k, v in overrides.items():
self.bundle_config.set(v, k)
ConfigParser.export_config_file(self.bundle_config.config, train_path, indent=2) # type: ignore
sys.path.insert(0, self.bundle_path)
unload_module("scripts")
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = ",".join([str(g) for g in gpus])
logger.info(f"Using CUDA_VISIBLE_DEVICES: {env['CUDA_VISIBLE_DEVICES']}")
cmd = [
"torchrun",
"--standalone",
"--nnodes=1",
f"--nproc_per_node={len(gpus)}",
"-m",
"monai.bundle",
"run",
run_id, # run_id, user can pass the arg
"--meta_file",
self.bundle_metadata_path,
"--config_file",
f"['{train_path}','{multi_gpu_train_path}']",
"--logging_file",
logging_file,
]
if tracking:
cmd.extend(["--tracking", tracking])
if tracking_uri:
cmd.extend(["--tracking_uri", tracking_uri])
self.run_multi_gpu(request, cmd, env)
else:
sys.path.insert(0, self.bundle_path)
unload_module("scripts")
self.run_single_gpu(request, overrides)
sys.path.remove(self.bundle_path)
logger.info("Training Finished....")
return {}
def run_single_gpu(self, request, overrides):
run_id = request.get("run_id", "run")
monai.bundle.run(
run_id=run_id,
init_id=None,
final_id=None,
meta_file=self.bundle_metadata_path,
config_file=self.bundle_config_path,
**overrides,
)
def run_multi_gpu(self, request, cmd, env):
self._run_command(cmd, env)
def _run_command(self, cmd, env):
logger.info(f"RUNNING COMMAND:: {cmd}")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True, env=env)
while process.poll() is None:
line = process.stdout.readline()
line = line.rstrip()
if line:
print(line, flush=True)
logger.info(f"Return code: {process.returncode}")
process.stdout.close()
def _load_bundle_config(self, path, config):
bundle_config = ConfigParser()
bundle_config.read_config(config)
bundle_config.config.update({self.const.key_bundle_root(): path}) # type: ignore
return bundle_config
def _update_overrides(self, overrides):
return overrides
| overrides[self.const.key_experiment_name()] = tracking_experiment_name | conditional_block |
bundle.py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import subprocess
import sys
from typing import Dict, Optional, Sequence
import monai.bundle
import torch
from monai.bundle import ConfigParser
from monai.data import partition_dataset
from monai.handlers import CheckpointLoader
from monailabel.config import settings
from monailabel.interfaces.datastore import Datastore
from monailabel.interfaces.tasks.train import TrainTask
from monailabel.utils.others.class_utils import unload_module
from monailabel.utils.others.generic import device_list, name_to_device
logger = logging.getLogger(__name__)
class BundleConstants:
def configs(self) -> Sequence[str]:
return ["train.json", "train.yaml"]
def multi_gpu_configs(self) -> Sequence[str]:
return ["multi_gpu_train.json", "multi_gpu_train.yaml"]
def metadata_json(self) -> str:
return "metadata.json"
def model_pytorch(self) -> str:
return "model.pt"
def key_device(self) -> str:
return "device"
def key_bundle_root(self) -> str:
return "bundle_root"
def key_network(self) -> str:
return "network"
def key_network_def(self) -> str:
return "network_def"
def key_train_trainer_max_epochs(self) -> str:
return "train#trainer#max_epochs"
def key_train_dataset_data(self) -> str:
return "train#dataset#data"
def key_train_handlers(self) -> str:
return "train#handlers"
def key_validate_dataset_data(self) -> str:
return "validate#dataset#data"
def key_tracking(self) -> str:
return "tracking"
def key_tracking_uri(self) -> str:
return "tracking_uri"
def key_experiment_name(self) -> str:
return "experiment_name"
def key_run_name(self) -> str:
return "run_name"
def key_displayable_configs(self) -> Sequence[str]:
return ["displayable_configs"]
class BundleTrainTask(TrainTask):
def __init__(
self,
path: str,
conf: Dict[str, str],
const: Optional[BundleConstants] = None,
enable_tracking=False,
model_dict_key="model",
load_strict=False,
):
self.valid: bool = False
self.conf = conf
self.const = const if const else BundleConstants()
self.enable_tracking = enable_tracking
self.model_dict_key = model_dict_key
self.load_strict = load_strict
config_paths = [c for c in self.const.configs() if os.path.exists(os.path.join(path, "configs", c))]
if not config_paths:
logger.warning(f"Ignore {path} as there is no train config {self.const.configs()} exists")
return
self.bundle_path = path
self.bundle_config_path = os.path.join(path, "configs", config_paths[0])
self.bundle_config = self._load_bundle_config(self.bundle_path, self.bundle_config_path)
# https://docs.monai.io/en/latest/mb_specification.html#metadata-json-file
self.bundle_metadata_path = os.path.join(path, "configs", "metadata.json")
with open(os.path.join(path, "configs", self.const.metadata_json())) as fp:
metadata = json.load(fp)
super().__init__(metadata.get("description", ""))
self.valid = True
self.version = metadata.get("version")
def is_valid(self):
return self.valid
def info(self):
i = super().info()
i["version"] = self.version
return i
def config(self):
# Add models and param optiom to train option panel
pytorch_models = [os.path.basename(p) for p in glob.glob(os.path.join(self.bundle_path, "models", "*.pt"))]
pytorch_models.sort(key=len)
config_options = {
"device": device_list(), # DEVICE
"pretrained": True, # USE EXISTING CHECKPOINT/PRETRAINED MODEL
"max_epochs": 50, # TOTAL EPOCHS TO RUN
"val_split": 0.2, # VALIDATION SPLIT; -1 TO USE DEFAULT FROM BUNDLE
"multi_gpu": True, # USE MULTI-GPU
"gpus": "all", # COMMA SEPARATE DEVICE INDEX
"tracking": ["mlflow", "None"]
if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED
else ["None", "mlflow"],
"tracking_uri": settings.MONAI_LABEL_TRACKING_URI,
"tracking_experiment_name": "",
"run_id": "", # bundle run id, if different from default
"model_filename": pytorch_models,
}
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
config_options.update(self.bundle_config.get_parsed_content(k, instantiate=True)) # type: ignore
return config_options
def _fetch_datalist(self, request, datastore: Datastore):
datalist = datastore.datalist()
# only use image and label attributes; skip for other meta info from datastore for now
datalist = [{"image": d["image"], "label": d["label"]} for d in datalist if d]
if "detection" in request.get("model"):
# Generate datalist for detection task, box and label keys are used by default.
# Future: either use box and label keys for all detection models, or set these keys by config.
for idx, d in enumerate(datalist):
with open(d["label"]) as fp:
json_object = json.loads(fp.read()) # load box coordinates from subject JSON
bboxes = [bdict["center"] + bdict["size"] for bdict in json_object["markups"]]
# Only support detection, classification label do not suppot in bundle yet,
# 0 is used for all positive boxes, wait for sync.
datalist[idx] = {"image": d["image"], "box": bboxes, "label": [0] * len(bboxes)}
return datalist
def _partition_datalist(self, datalist, request, shuffle=False):
val_split = request.get("val_split", 0.2)
logger.info(f"Total Records in Dataset: {len(datalist)}; Validation Split: {val_split}")
if val_split > 0.0:
train_datalist, val_datalist = partition_dataset(
datalist, ratios=[(1 - val_split), val_split], shuffle=shuffle
)
else:
train_datalist = datalist
val_datalist = None if val_split < 0 else []
logger.info(f"Total Records for Training: {len(train_datalist)}")
logger.info(f"Total Records for Validation: {len(val_datalist) if val_datalist else ''}")
return train_datalist, val_datalist
def _load_checkpoint(self, model_pytorch, pretrained, train_handlers):
load_path = model_pytorch if pretrained else None
if os.path.exists(load_path):
logger.info(f"Add Checkpoint Loader for Path: {load_path}")
load_dict = {self.model_dict_key: f"$@{self.const.key_network()}"}
if not [t for t in train_handlers if t.get("_target_") == CheckpointLoader.__name__]:
loader = {
"_target_": CheckpointLoader.__name__,
"load_path": load_path,
"load_dict": load_dict,
"strict": self.load_strict,
}
train_handlers.insert(0, loader)
def __call__(self, request, datastore: Datastore):
logger.info(f"Train Request: {request}")
ds = self._fetch_datalist(request, datastore)
train_ds, val_ds = self._partition_datalist(ds, request)
max_epochs = request.get("max_epochs", 50)
pretrained = request.get("pretrained", True)
multi_gpu = request.get("multi_gpu", True)
force_multi_gpu = request.get("force_multi_gpu", False)
run_id = request.get("run_id", "run")
multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False
gpus = request.get("gpus", "all")
gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")]
multi_gpu = True if force_multi_gpu or multi_gpu and len(gpus) > 1 else False
logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}")
logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
device = name_to_device(request.get("device", "cuda"))
logger.info(f"Using device: {device}; Type: {type(device)}")
tracking = request.get(
"tracking", "mlflow" if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED else ""
)
tracking = tracking[0] if isinstance(tracking, list) else tracking
tracking_uri = request.get("tracking_uri")
tracking_uri = tracking_uri if tracking_uri else settings.MONAI_LABEL_TRACKING_URI
tracking_experiment_name = request.get("tracking_experiment_name")
tracking_experiment_name = tracking_experiment_name if tracking_experiment_name else request.get("model")
tracking_run_name = request.get("tracking_run_name")
logger.info(f"(Experiment Management) Tracking: {tracking}")
logger.info(f"(Experiment Management) Tracking URI: {tracking_uri}")
logger.info(f"(Experiment Management) Experiment Name: {tracking_experiment_name}")
logger.info(f"(Experiment Management) Run Name: {tracking_run_name}")
train_handlers = self.bundle_config.get(self.const.key_train_handlers(), [])
model_filename = request.get("model_filename", "model.pt")
model_filename = model_filename if isinstance(model_filename, str) else model_filename[0]
model_pytorch = os.path.join(self.bundle_path, "models", model_filename)
self._load_checkpoint(model_pytorch, pretrained, train_handlers)
overrides = {
self.const.key_bundle_root(): self.bundle_path,
self.const.key_train_trainer_max_epochs(): max_epochs,
self.const.key_train_dataset_data(): train_ds,
self.const.key_device(): device,
self.const.key_train_handlers(): train_handlers,
}
# update config options from user
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
displayable_configs = self.bundle_config.get_parsed_content(k, instantiate=True)
overrides[k] = {c: request[c] for c in displayable_configs.keys()}
if tracking and tracking.lower() != "none":
overrides[self.const.key_tracking()] = tracking
if tracking_uri:
overrides[self.const.key_tracking_uri()] = tracking_uri
if tracking_experiment_name:
overrides[self.const.key_experiment_name()] = tracking_experiment_name
if tracking_run_name:
overrides[self.const.key_run_name()] = tracking_run_name
# external validation datalist supported through bundle itself (pass -1 in the request to use the same)
if val_ds is not None:
overrides[self.const.key_validate_dataset_data()] = val_ds
# allow derived class to update further overrides
self._update_overrides(overrides)
if multi_gpu:
config_paths = [
c
for c in self.const.multi_gpu_configs()
if os.path.exists(os.path.join(self.bundle_path, "configs", c))
] | )
return
train_path = os.path.join(self.bundle_path, "configs", "monailabel_train.json")
multi_gpu_train_path = os.path.join(self.bundle_path, "configs", config_paths[0])
logging_file = os.path.join(self.bundle_path, "configs", "logging.conf")
for k, v in overrides.items():
self.bundle_config.set(v, k)
ConfigParser.export_config_file(self.bundle_config.config, train_path, indent=2) # type: ignore
sys.path.insert(0, self.bundle_path)
unload_module("scripts")
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = ",".join([str(g) for g in gpus])
logger.info(f"Using CUDA_VISIBLE_DEVICES: {env['CUDA_VISIBLE_DEVICES']}")
cmd = [
"torchrun",
"--standalone",
"--nnodes=1",
f"--nproc_per_node={len(gpus)}",
"-m",
"monai.bundle",
"run",
run_id, # run_id, user can pass the arg
"--meta_file",
self.bundle_metadata_path,
"--config_file",
f"['{train_path}','{multi_gpu_train_path}']",
"--logging_file",
logging_file,
]
if tracking:
cmd.extend(["--tracking", tracking])
if tracking_uri:
cmd.extend(["--tracking_uri", tracking_uri])
self.run_multi_gpu(request, cmd, env)
else:
sys.path.insert(0, self.bundle_path)
unload_module("scripts")
self.run_single_gpu(request, overrides)
sys.path.remove(self.bundle_path)
logger.info("Training Finished....")
return {}
def run_single_gpu(self, request, overrides):
run_id = request.get("run_id", "run")
monai.bundle.run(
run_id=run_id,
init_id=None,
final_id=None,
meta_file=self.bundle_metadata_path,
config_file=self.bundle_config_path,
**overrides,
)
def run_multi_gpu(self, request, cmd, env):
self._run_command(cmd, env)
def _run_command(self, cmd, env):
logger.info(f"RUNNING COMMAND:: {cmd}")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True, env=env)
while process.poll() is None:
line = process.stdout.readline()
line = line.rstrip()
if line:
print(line, flush=True)
logger.info(f"Return code: {process.returncode}")
process.stdout.close()
def _load_bundle_config(self, path, config):
bundle_config = ConfigParser()
bundle_config.read_config(config)
bundle_config.config.update({self.const.key_bundle_root(): path}) # type: ignore
return bundle_config
def _update_overrides(self, overrides):
return overrides | if not config_paths:
logger.warning(
f"Ignore Multi-GPU Training; No multi-gpu train config {self.const.multi_gpu_configs()} exists" | random_line_split |
bundle.py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import subprocess
import sys
from typing import Dict, Optional, Sequence
import monai.bundle
import torch
from monai.bundle import ConfigParser
from monai.data import partition_dataset
from monai.handlers import CheckpointLoader
from monailabel.config import settings
from monailabel.interfaces.datastore import Datastore
from monailabel.interfaces.tasks.train import TrainTask
from monailabel.utils.others.class_utils import unload_module
from monailabel.utils.others.generic import device_list, name_to_device
logger = logging.getLogger(__name__)
class BundleConstants:
def configs(self) -> Sequence[str]:
return ["train.json", "train.yaml"]
def multi_gpu_configs(self) -> Sequence[str]:
return ["multi_gpu_train.json", "multi_gpu_train.yaml"]
def metadata_json(self) -> str:
return "metadata.json"
def model_pytorch(self) -> str:
return "model.pt"
def key_device(self) -> str:
return "device"
def key_bundle_root(self) -> str:
return "bundle_root"
def key_network(self) -> str:
return "network"
def key_network_def(self) -> str:
return "network_def"
def key_train_trainer_max_epochs(self) -> str:
return "train#trainer#max_epochs"
def key_train_dataset_data(self) -> str:
return "train#dataset#data"
def key_train_handlers(self) -> str:
return "train#handlers"
def key_validate_dataset_data(self) -> str:
|
def key_tracking(self) -> str:
return "tracking"
def key_tracking_uri(self) -> str:
return "tracking_uri"
def key_experiment_name(self) -> str:
return "experiment_name"
def key_run_name(self) -> str:
return "run_name"
def key_displayable_configs(self) -> Sequence[str]:
return ["displayable_configs"]
class BundleTrainTask(TrainTask):
def __init__(
self,
path: str,
conf: Dict[str, str],
const: Optional[BundleConstants] = None,
enable_tracking=False,
model_dict_key="model",
load_strict=False,
):
self.valid: bool = False
self.conf = conf
self.const = const if const else BundleConstants()
self.enable_tracking = enable_tracking
self.model_dict_key = model_dict_key
self.load_strict = load_strict
config_paths = [c for c in self.const.configs() if os.path.exists(os.path.join(path, "configs", c))]
if not config_paths:
logger.warning(f"Ignore {path} as there is no train config {self.const.configs()} exists")
return
self.bundle_path = path
self.bundle_config_path = os.path.join(path, "configs", config_paths[0])
self.bundle_config = self._load_bundle_config(self.bundle_path, self.bundle_config_path)
# https://docs.monai.io/en/latest/mb_specification.html#metadata-json-file
self.bundle_metadata_path = os.path.join(path, "configs", "metadata.json")
with open(os.path.join(path, "configs", self.const.metadata_json())) as fp:
metadata = json.load(fp)
super().__init__(metadata.get("description", ""))
self.valid = True
self.version = metadata.get("version")
def is_valid(self):
return self.valid
def info(self):
i = super().info()
i["version"] = self.version
return i
def config(self):
# Add models and param optiom to train option panel
pytorch_models = [os.path.basename(p) for p in glob.glob(os.path.join(self.bundle_path, "models", "*.pt"))]
pytorch_models.sort(key=len)
config_options = {
"device": device_list(), # DEVICE
"pretrained": True, # USE EXISTING CHECKPOINT/PRETRAINED MODEL
"max_epochs": 50, # TOTAL EPOCHS TO RUN
"val_split": 0.2, # VALIDATION SPLIT; -1 TO USE DEFAULT FROM BUNDLE
"multi_gpu": True, # USE MULTI-GPU
"gpus": "all", # COMMA SEPARATE DEVICE INDEX
"tracking": ["mlflow", "None"]
if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED
else ["None", "mlflow"],
"tracking_uri": settings.MONAI_LABEL_TRACKING_URI,
"tracking_experiment_name": "",
"run_id": "", # bundle run id, if different from default
"model_filename": pytorch_models,
}
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
config_options.update(self.bundle_config.get_parsed_content(k, instantiate=True)) # type: ignore
return config_options
def _fetch_datalist(self, request, datastore: Datastore):
datalist = datastore.datalist()
# only use image and label attributes; skip for other meta info from datastore for now
datalist = [{"image": d["image"], "label": d["label"]} for d in datalist if d]
if "detection" in request.get("model"):
# Generate datalist for detection task, box and label keys are used by default.
# Future: either use box and label keys for all detection models, or set these keys by config.
for idx, d in enumerate(datalist):
with open(d["label"]) as fp:
json_object = json.loads(fp.read()) # load box coordinates from subject JSON
bboxes = [bdict["center"] + bdict["size"] for bdict in json_object["markups"]]
# Only support detection, classification label do not suppot in bundle yet,
# 0 is used for all positive boxes, wait for sync.
datalist[idx] = {"image": d["image"], "box": bboxes, "label": [0] * len(bboxes)}
return datalist
def _partition_datalist(self, datalist, request, shuffle=False):
val_split = request.get("val_split", 0.2)
logger.info(f"Total Records in Dataset: {len(datalist)}; Validation Split: {val_split}")
if val_split > 0.0:
train_datalist, val_datalist = partition_dataset(
datalist, ratios=[(1 - val_split), val_split], shuffle=shuffle
)
else:
train_datalist = datalist
val_datalist = None if val_split < 0 else []
logger.info(f"Total Records for Training: {len(train_datalist)}")
logger.info(f"Total Records for Validation: {len(val_datalist) if val_datalist else ''}")
return train_datalist, val_datalist
def _load_checkpoint(self, model_pytorch, pretrained, train_handlers):
load_path = model_pytorch if pretrained else None
if os.path.exists(load_path):
logger.info(f"Add Checkpoint Loader for Path: {load_path}")
load_dict = {self.model_dict_key: f"$@{self.const.key_network()}"}
if not [t for t in train_handlers if t.get("_target_") == CheckpointLoader.__name__]:
loader = {
"_target_": CheckpointLoader.__name__,
"load_path": load_path,
"load_dict": load_dict,
"strict": self.load_strict,
}
train_handlers.insert(0, loader)
def __call__(self, request, datastore: Datastore):
logger.info(f"Train Request: {request}")
ds = self._fetch_datalist(request, datastore)
train_ds, val_ds = self._partition_datalist(ds, request)
max_epochs = request.get("max_epochs", 50)
pretrained = request.get("pretrained", True)
multi_gpu = request.get("multi_gpu", True)
force_multi_gpu = request.get("force_multi_gpu", False)
run_id = request.get("run_id", "run")
multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False
gpus = request.get("gpus", "all")
gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")]
multi_gpu = True if force_multi_gpu or multi_gpu and len(gpus) > 1 else False
logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}")
logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
device = name_to_device(request.get("device", "cuda"))
logger.info(f"Using device: {device}; Type: {type(device)}")
tracking = request.get(
"tracking", "mlflow" if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED else ""
)
tracking = tracking[0] if isinstance(tracking, list) else tracking
tracking_uri = request.get("tracking_uri")
tracking_uri = tracking_uri if tracking_uri else settings.MONAI_LABEL_TRACKING_URI
tracking_experiment_name = request.get("tracking_experiment_name")
tracking_experiment_name = tracking_experiment_name if tracking_experiment_name else request.get("model")
tracking_run_name = request.get("tracking_run_name")
logger.info(f"(Experiment Management) Tracking: {tracking}")
logger.info(f"(Experiment Management) Tracking URI: {tracking_uri}")
logger.info(f"(Experiment Management) Experiment Name: {tracking_experiment_name}")
logger.info(f"(Experiment Management) Run Name: {tracking_run_name}")
train_handlers = self.bundle_config.get(self.const.key_train_handlers(), [])
model_filename = request.get("model_filename", "model.pt")
model_filename = model_filename if isinstance(model_filename, str) else model_filename[0]
model_pytorch = os.path.join(self.bundle_path, "models", model_filename)
self._load_checkpoint(model_pytorch, pretrained, train_handlers)
overrides = {
self.const.key_bundle_root(): self.bundle_path,
self.const.key_train_trainer_max_epochs(): max_epochs,
self.const.key_train_dataset_data(): train_ds,
self.const.key_device(): device,
self.const.key_train_handlers(): train_handlers,
}
# update config options from user
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
displayable_configs = self.bundle_config.get_parsed_content(k, instantiate=True)
overrides[k] = {c: request[c] for c in displayable_configs.keys()}
if tracking and tracking.lower() != "none":
overrides[self.const.key_tracking()] = tracking
if tracking_uri:
overrides[self.const.key_tracking_uri()] = tracking_uri
if tracking_experiment_name:
overrides[self.const.key_experiment_name()] = tracking_experiment_name
if tracking_run_name:
overrides[self.const.key_run_name()] = tracking_run_name
# external validation datalist supported through bundle itself (pass -1 in the request to use the same)
if val_ds is not None:
overrides[self.const.key_validate_dataset_data()] = val_ds
# allow derived class to update further overrides
self._update_overrides(overrides)
if multi_gpu:
config_paths = [
c
for c in self.const.multi_gpu_configs()
if os.path.exists(os.path.join(self.bundle_path, "configs", c))
]
if not config_paths:
logger.warning(
f"Ignore Multi-GPU Training; No multi-gpu train config {self.const.multi_gpu_configs()} exists"
)
return
train_path = os.path.join(self.bundle_path, "configs", "monailabel_train.json")
multi_gpu_train_path = os.path.join(self.bundle_path, "configs", config_paths[0])
logging_file = os.path.join(self.bundle_path, "configs", "logging.conf")
for k, v in overrides.items():
self.bundle_config.set(v, k)
ConfigParser.export_config_file(self.bundle_config.config, train_path, indent=2) # type: ignore
sys.path.insert(0, self.bundle_path)
unload_module("scripts")
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = ",".join([str(g) for g in gpus])
logger.info(f"Using CUDA_VISIBLE_DEVICES: {env['CUDA_VISIBLE_DEVICES']}")
cmd = [
"torchrun",
"--standalone",
"--nnodes=1",
f"--nproc_per_node={len(gpus)}",
"-m",
"monai.bundle",
"run",
run_id, # run_id, user can pass the arg
"--meta_file",
self.bundle_metadata_path,
"--config_file",
f"['{train_path}','{multi_gpu_train_path}']",
"--logging_file",
logging_file,
]
if tracking:
cmd.extend(["--tracking", tracking])
if tracking_uri:
cmd.extend(["--tracking_uri", tracking_uri])
self.run_multi_gpu(request, cmd, env)
else:
sys.path.insert(0, self.bundle_path)
unload_module("scripts")
self.run_single_gpu(request, overrides)
sys.path.remove(self.bundle_path)
logger.info("Training Finished....")
return {}
def run_single_gpu(self, request, overrides):
run_id = request.get("run_id", "run")
monai.bundle.run(
run_id=run_id,
init_id=None,
final_id=None,
meta_file=self.bundle_metadata_path,
config_file=self.bundle_config_path,
**overrides,
)
def run_multi_gpu(self, request, cmd, env):
self._run_command(cmd, env)
def _run_command(self, cmd, env):
logger.info(f"RUNNING COMMAND:: {cmd}")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True, env=env)
while process.poll() is None:
line = process.stdout.readline()
line = line.rstrip()
if line:
print(line, flush=True)
logger.info(f"Return code: {process.returncode}")
process.stdout.close()
def _load_bundle_config(self, path, config):
bundle_config = ConfigParser()
bundle_config.read_config(config)
bundle_config.config.update({self.const.key_bundle_root(): path}) # type: ignore
return bundle_config
def _update_overrides(self, overrides):
return overrides
| return "validate#dataset#data" | identifier_body |
__init__.py | import functools
import random
import re
import string
from enum import Enum
import PIL
import asyncio
import cachetools
import pkg_resources
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from plumeria import config
from plumeria.command import CommandError, commands, channel_only
from plumeria.command.parse import Word
from plumeria.config import percent
from plumeria.config.common import games_allowed_only
from plumeria.core.scoped_config import scoped_config
from plumeria.message import ImageAttachment, Response
from plumeria.message.lists import parse_list
from plumeria.perms import owners_only
bomb_chance = config.create("minesweeper", "bomb_chance", type=percent, fallback=20, scoped=True, private=False,
comment="The % of a cell being a bomb")
POS_RE = re.compile("^([A-Za-z]+)([0-9]+)$")
def cell_name(x, y):
return string.ascii_uppercase[x] + str(y + 1)
def draw_centered_text(draw, x, y, text, *args, font, **kwargs):
w, h = draw.textsize(text, font=font)
draw.text((x - w / 2, y - h / 2), text, *args, font=font, **kwargs)
def load_tile_graphics():
images = {}
for play in Play:
with pkg_resources.resource_stream(__name__, "assets/{}.png".format(play.name.lower())) as f:
images[play] = Image.open(f).convert('RGBA')
return images
class Play(Enum):
UNKNOWN = 'unknown'
CLEAR = 'clear'
FLAGGED = 'flagged'
EXPLODED = 'exploded'
class State(Enum):
IN_PLAY = 'in_play'
WON = 'won'
LOST = 'lost'
UNKNOWN_OR_FLAGGED = {Play.UNKNOWN, Play.FLAGGED}
TILE_GRAPHICS = load_tile_graphics()
class Game:
def __init__(self, w, h, mine_fraction, r=None):
r = random or random.Random()
self.width = w
self.height = h
self.bomb_map = list(map(lambda y: list(map(lambda x: r.random() <= mine_fraction, range(w))), range(h)))
self.play = list(map(lambda y: list(map(lambda x: Play.UNKNOWN, range(w))), range(h)))
self.state = State.IN_PLAY
self.remaining_unknown = w * h
self.bomb_count = 0
self.cell_size = 25
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.cell_font = ImageFont.truetype(f, 10)
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.count_font = ImageFont.truetype(f, 15)
# count bombs
for x in range(w):
for y in range(h):
if self.bomb_map[x][y]:
self.bomb_count += 1
self.remaining_unknown -= 1
if self.bomb_count == 0:
raise CommandError("No bombs found in created game! Make sure the bomb "
"`minesweeper/bomb_chance` setting is not near 0%.")
# start it off
tries = 0
while self.remaining_unknown > 0 and tries < 20:
x = r.randrange(0, self.width)
y = r.randrange(0, self.height)
if not self.bomb_map[x][y] and not self._count_adjacent_bombs(x, y):
self.click(x, y)
break
tries += 1
def create_image(self, cheat=False) -> PIL.Image.Image:
w = self.width * self.cell_size
h = self.height * self.cell_size
im = Image.new("RGBA", (w, h), "white")
draw = ImageDraw.Draw(im)
for y in range(self.height):
for x in range(self.width):
cx = (x + 0.5) * self.cell_size
cy = (y + 0.5) * self.cell_size
play = self.play[x][y]
# draw background
tile = TILE_GRAPHICS[play].copy().resize((self.cell_size, self.cell_size), PIL.Image.BICUBIC)
im.paste(tile, (x * self.cell_size, y * self.cell_size), mask=tile)
# location text
if play in UNKNOWN_OR_FLAGGED:
draw.text((x * self.cell_size + 2, y * self.cell_size + 2), cell_name(x, y), (68, 68, 150),
font=self.cell_font)
if play == Play.CLEAR:
count = self._count_adjacent_bombs(x, y)
if count:
draw_centered_text(draw, cx, cy - 2, str(count), (217, 50, 50), font=self.count_font)
if cheat and self.bomb_map[x][y]:
draw_centered_text(draw, cx, cy - 2, "XX", (217, 50, 50), font=self.count_font)
return im
async def create_image_async(self, *args, **kwargs):
return await asyncio.get_event_loop().run_in_executor(None,
functools.partial(self.create_image, *args, **kwargs))
def parse_pos(self, str):
m = POS_RE.match(str)
if not m:
raise CommandError("Invalid position '{}' (expected something like C2)".format(str))
x = string.ascii_uppercase.find(m.group(1).upper())
y = int(m.group(2)) - 1
if self._in_bounds(x, y):
return x, y
else:
raise CommandError("Your position '{}' isn't in the grid!".format(str))
def toggle_flag(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
self.play[x][y] = Play.FLAGGED if self.play[x][y] == Play.UNKNOWN else Play.UNKNOWN
else:
raise CommandError("You can't flag that cell!")
def click(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
if self.bomb_map[x][y]: # bomb
self._mutate_cell(x, y, Play.EXPLODED)
self.state = State.LOST
else:
self._clear_cell(x, y, set())
if self.remaining_unknown == 0:
self.state = State.WON
else:
raise CommandError("You can't click that cell!")
def _in_bounds(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
def _is_bomb(self, x, y):
return self._in_bounds(x, y) and self.bomb_map[x][y]
def _count_adjacent_bombs(self, x, y):
return sum([
self._is_bomb(x - 1, y),
self._is_bomb(x, y - 1),
self._is_bomb(x + 1, y),
self._is_bomb(x, y + 1),
self._is_bomb(x - 1, y - 1),
self._is_bomb(x - 1, y + 1),
self._is_bomb(x + 1, y - 1),
self._is_bomb(x + 1, y + 1),
])
def _clear_cell(self, x, y, visited):
if not self._in_bounds(x, y):
return
if (x, y) in visited:
return
visited.add((x, y))
if self.play[x][y] in UNKNOWN_OR_FLAGGED and not self.bomb_map[x][y]:
self._mutate_cell(x, y, Play.CLEAR)
if not self._count_adjacent_bombs(x, y):
self._clear_cell(x - 1, y, visited)
self._clear_cell(x, y - 1, visited)
self._clear_cell(x + 1, y, visited)
self._clear_cell(x, y + 1, visited)
self._clear_cell(x - 1, y - 1, visited)
self._clear_cell(x - 1, y + 1, visited)
self._clear_cell(x + 1, y - 1, visited)
self._clear_cell(x + 1, y + 1, visited)
def _mutate_cell(self, x, y, new_play: Play):
if self.play[x][y] in UNKNOWN_OR_FLAGGED and new_play != Play.UNKNOWN:
self.remaining_unknown -= 1
self.play[x][y] = new_play
else:
raise AssertionError("this shouldn't happen (is {}, wants to be {})".format(self.play[x][y], new_play))
cache = cachetools.LRUCache(maxsize=1000)
@commands.create("minesweeper start", "mine start", "m start", category="Games", params=[])
@channel_only
@games_allowed_only
async def start(message):
"""
Starts a game of minesweeper.
Example::
mine start
"""
key = (message.transport.id, message.server.id, message.channel.id)
if key in cache:
game = cache[key]
else:
game = Game(12, 12, scoped_config.get(bomb_chance, message.channel) / 100, random.Random())
cache[key] = game
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper", "mine", "m", category="Games")
@channel_only
@games_allowed_only
async def click(message):
"""
Click one or more cells on minesweeper.
Start a game with::
mine start
Then choose one or more cells::
mine b5 g7 a7 a1
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.click(*game.parse_pos(position))
if game.state == State.WON:
del cache[key]
return Response("\N{TROPHY} \N{TROPHY} YOU ARE WINNER! \N{TROPHY} \N{TROPHY}", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
elif game.state == State.LOST:
del cache[key]
return Response("\N{BOMB} \N{COLLISION SYMBOL} \N{COLLISION SYMBOL} BOOOOM!!!", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
else:
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper flag", "mine flag", "m flag", category="Games")
@channel_only
@games_allowed_only
async def flag(message):
"""
Toggle flags on one or more cells on minesweeper.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.toggle_flag(*game.parse_pos(position))
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper cheat", "mine cheat", category="Games", params=[])
@channel_only
@owners_only
async def cheat(message):
"""
Bot administrator command to show where bombs are for testing.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
return Response("", attachments=[ImageAttachment(await game.create_image_async(cheat=True), "minesweeper.png")])
def setup():
| config.add(bomb_chance)
commands.add(start)
commands.add(click)
commands.add(flag)
commands.add(cheat) | identifier_body | |
__init__.py | import functools
import random
import re
import string
from enum import Enum
import PIL
import asyncio
import cachetools
import pkg_resources
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from plumeria import config
from plumeria.command import CommandError, commands, channel_only
from plumeria.command.parse import Word
from plumeria.config import percent
from plumeria.config.common import games_allowed_only
from plumeria.core.scoped_config import scoped_config
from plumeria.message import ImageAttachment, Response
from plumeria.message.lists import parse_list
from plumeria.perms import owners_only
bomb_chance = config.create("minesweeper", "bomb_chance", type=percent, fallback=20, scoped=True, private=False,
comment="The % of a cell being a bomb")
POS_RE = re.compile("^([A-Za-z]+)([0-9]+)$")
def cell_name(x, y):
return string.ascii_uppercase[x] + str(y + 1)
def draw_centered_text(draw, x, y, text, *args, font, **kwargs):
w, h = draw.textsize(text, font=font)
draw.text((x - w / 2, y - h / 2), text, *args, font=font, **kwargs)
def load_tile_graphics():
images = {}
for play in Play:
with pkg_resources.resource_stream(__name__, "assets/{}.png".format(play.name.lower())) as f:
images[play] = Image.open(f).convert('RGBA')
return images
class Play(Enum):
UNKNOWN = 'unknown'
CLEAR = 'clear'
FLAGGED = 'flagged'
EXPLODED = 'exploded'
class State(Enum):
IN_PLAY = 'in_play'
WON = 'won'
LOST = 'lost'
UNKNOWN_OR_FLAGGED = {Play.UNKNOWN, Play.FLAGGED}
TILE_GRAPHICS = load_tile_graphics()
class Game:
def __init__(self, w, h, mine_fraction, r=None):
r = random or random.Random()
self.width = w
self.height = h
self.bomb_map = list(map(lambda y: list(map(lambda x: r.random() <= mine_fraction, range(w))), range(h)))
self.play = list(map(lambda y: list(map(lambda x: Play.UNKNOWN, range(w))), range(h)))
self.state = State.IN_PLAY
self.remaining_unknown = w * h
self.bomb_count = 0
self.cell_size = 25
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.cell_font = ImageFont.truetype(f, 10)
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.count_font = ImageFont.truetype(f, 15)
# count bombs
for x in range(w):
for y in range(h):
if self.bomb_map[x][y]:
self.bomb_count += 1
self.remaining_unknown -= 1
if self.bomb_count == 0:
raise CommandError("No bombs found in created game! Make sure the bomb "
"`minesweeper/bomb_chance` setting is not near 0%.")
# start it off
tries = 0
while self.remaining_unknown > 0 and tries < 20:
x = r.randrange(0, self.width)
y = r.randrange(0, self.height)
if not self.bomb_map[x][y] and not self._count_adjacent_bombs(x, y):
self.click(x, y)
break
tries += 1
def create_image(self, cheat=False) -> PIL.Image.Image:
w = self.width * self.cell_size
h = self.height * self.cell_size
im = Image.new("RGBA", (w, h), "white")
draw = ImageDraw.Draw(im)
for y in range(self.height):
for x in range(self.width):
cx = (x + 0.5) * self.cell_size
cy = (y + 0.5) * self.cell_size
play = self.play[x][y]
# draw background
tile = TILE_GRAPHICS[play].copy().resize((self.cell_size, self.cell_size), PIL.Image.BICUBIC)
im.paste(tile, (x * self.cell_size, y * self.cell_size), mask=tile)
# location text
if play in UNKNOWN_OR_FLAGGED:
draw.text((x * self.cell_size + 2, y * self.cell_size + 2), cell_name(x, y), (68, 68, 150),
font=self.cell_font)
if play == Play.CLEAR:
count = self._count_adjacent_bombs(x, y)
if count:
draw_centered_text(draw, cx, cy - 2, str(count), (217, 50, 50), font=self.count_font)
if cheat and self.bomb_map[x][y]:
draw_centered_text(draw, cx, cy - 2, "XX", (217, 50, 50), font=self.count_font)
return im
async def create_image_async(self, *args, **kwargs):
return await asyncio.get_event_loop().run_in_executor(None,
functools.partial(self.create_image, *args, **kwargs))
def parse_pos(self, str):
m = POS_RE.match(str)
if not m:
raise CommandError("Invalid position '{}' (expected something like C2)".format(str))
x = string.ascii_uppercase.find(m.group(1).upper())
y = int(m.group(2)) - 1
if self._in_bounds(x, y):
return x, y
else:
raise CommandError("Your position '{}' isn't in the grid!".format(str))
def toggle_flag(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
self.play[x][y] = Play.FLAGGED if self.play[x][y] == Play.UNKNOWN else Play.UNKNOWN
else:
raise CommandError("You can't flag that cell!")
def click(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
if self.bomb_map[x][y]: # bomb
self._mutate_cell(x, y, Play.EXPLODED)
self.state = State.LOST
else:
self._clear_cell(x, y, set())
if self.remaining_unknown == 0:
|
else:
raise CommandError("You can't click that cell!")
def _in_bounds(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
def _is_bomb(self, x, y):
return self._in_bounds(x, y) and self.bomb_map[x][y]
def _count_adjacent_bombs(self, x, y):
return sum([
self._is_bomb(x - 1, y),
self._is_bomb(x, y - 1),
self._is_bomb(x + 1, y),
self._is_bomb(x, y + 1),
self._is_bomb(x - 1, y - 1),
self._is_bomb(x - 1, y + 1),
self._is_bomb(x + 1, y - 1),
self._is_bomb(x + 1, y + 1),
])
def _clear_cell(self, x, y, visited):
if not self._in_bounds(x, y):
return
if (x, y) in visited:
return
visited.add((x, y))
if self.play[x][y] in UNKNOWN_OR_FLAGGED and not self.bomb_map[x][y]:
self._mutate_cell(x, y, Play.CLEAR)
if not self._count_adjacent_bombs(x, y):
self._clear_cell(x - 1, y, visited)
self._clear_cell(x, y - 1, visited)
self._clear_cell(x + 1, y, visited)
self._clear_cell(x, y + 1, visited)
self._clear_cell(x - 1, y - 1, visited)
self._clear_cell(x - 1, y + 1, visited)
self._clear_cell(x + 1, y - 1, visited)
self._clear_cell(x + 1, y + 1, visited)
def _mutate_cell(self, x, y, new_play: Play):
if self.play[x][y] in UNKNOWN_OR_FLAGGED and new_play != Play.UNKNOWN:
self.remaining_unknown -= 1
self.play[x][y] = new_play
else:
raise AssertionError("this shouldn't happen (is {}, wants to be {})".format(self.play[x][y], new_play))
cache = cachetools.LRUCache(maxsize=1000)
@commands.create("minesweeper start", "mine start", "m start", category="Games", params=[])
@channel_only
@games_allowed_only
async def start(message):
"""
Starts a game of minesweeper.
Example::
mine start
"""
key = (message.transport.id, message.server.id, message.channel.id)
if key in cache:
game = cache[key]
else:
game = Game(12, 12, scoped_config.get(bomb_chance, message.channel) / 100, random.Random())
cache[key] = game
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper", "mine", "m", category="Games")
@channel_only
@games_allowed_only
async def click(message):
"""
Click one or more cells on minesweeper.
Start a game with::
mine start
Then choose one or more cells::
mine b5 g7 a7 a1
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.click(*game.parse_pos(position))
if game.state == State.WON:
del cache[key]
return Response("\N{TROPHY} \N{TROPHY} YOU ARE WINNER! \N{TROPHY} \N{TROPHY}", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
elif game.state == State.LOST:
del cache[key]
return Response("\N{BOMB} \N{COLLISION SYMBOL} \N{COLLISION SYMBOL} BOOOOM!!!", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
else:
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper flag", "mine flag", "m flag", category="Games")
@channel_only
@games_allowed_only
async def flag(message):
"""
Toggle flags on one or more cells on minesweeper.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.toggle_flag(*game.parse_pos(position))
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper cheat", "mine cheat", category="Games", params=[])
@channel_only
@owners_only
async def cheat(message):
"""
Bot administrator command to show where bombs are for testing.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
return Response("", attachments=[ImageAttachment(await game.create_image_async(cheat=True), "minesweeper.png")])
def setup():
config.add(bomb_chance)
commands.add(start)
commands.add(click)
commands.add(flag)
commands.add(cheat)
| self.state = State.WON | conditional_block |
__init__.py | import functools
import random
import re
import string
from enum import Enum
import PIL
import asyncio
import cachetools
import pkg_resources
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from plumeria import config
from plumeria.command import CommandError, commands, channel_only
from plumeria.command.parse import Word
from plumeria.config import percent
from plumeria.config.common import games_allowed_only
from plumeria.core.scoped_config import scoped_config
from plumeria.message import ImageAttachment, Response
from plumeria.message.lists import parse_list
from plumeria.perms import owners_only
bomb_chance = config.create("minesweeper", "bomb_chance", type=percent, fallback=20, scoped=True, private=False,
comment="The % of a cell being a bomb")
POS_RE = re.compile("^([A-Za-z]+)([0-9]+)$")
def cell_name(x, y):
return string.ascii_uppercase[x] + str(y + 1)
def draw_centered_text(draw, x, y, text, *args, font, **kwargs):
w, h = draw.textsize(text, font=font)
draw.text((x - w / 2, y - h / 2), text, *args, font=font, **kwargs)
def load_tile_graphics():
images = {}
for play in Play:
with pkg_resources.resource_stream(__name__, "assets/{}.png".format(play.name.lower())) as f:
images[play] = Image.open(f).convert('RGBA')
return images
class Play(Enum):
UNKNOWN = 'unknown'
CLEAR = 'clear'
FLAGGED = 'flagged'
EXPLODED = 'exploded'
class State(Enum):
IN_PLAY = 'in_play'
WON = 'won'
LOST = 'lost'
UNKNOWN_OR_FLAGGED = {Play.UNKNOWN, Play.FLAGGED}
TILE_GRAPHICS = load_tile_graphics()
class Game:
def __init__(self, w, h, mine_fraction, r=None):
r = random or random.Random()
self.width = w
self.height = h
self.bomb_map = list(map(lambda y: list(map(lambda x: r.random() <= mine_fraction, range(w))), range(h)))
self.play = list(map(lambda y: list(map(lambda x: Play.UNKNOWN, range(w))), range(h)))
self.state = State.IN_PLAY
self.remaining_unknown = w * h
self.bomb_count = 0
self.cell_size = 25
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.cell_font = ImageFont.truetype(f, 10)
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.count_font = ImageFont.truetype(f, 15)
# count bombs
for x in range(w):
for y in range(h):
if self.bomb_map[x][y]:
self.bomb_count += 1
self.remaining_unknown -= 1
if self.bomb_count == 0:
raise CommandError("No bombs found in created game! Make sure the bomb "
"`minesweeper/bomb_chance` setting is not near 0%.")
# start it off
tries = 0
while self.remaining_unknown > 0 and tries < 20:
x = r.randrange(0, self.width)
y = r.randrange(0, self.height)
if not self.bomb_map[x][y] and not self._count_adjacent_bombs(x, y):
self.click(x, y)
break
tries += 1
def create_image(self, cheat=False) -> PIL.Image.Image:
w = self.width * self.cell_size
h = self.height * self.cell_size
im = Image.new("RGBA", (w, h), "white")
draw = ImageDraw.Draw(im)
for y in range(self.height):
for x in range(self.width):
cx = (x + 0.5) * self.cell_size
cy = (y + 0.5) * self.cell_size
play = self.play[x][y]
# draw background
tile = TILE_GRAPHICS[play].copy().resize((self.cell_size, self.cell_size), PIL.Image.BICUBIC)
im.paste(tile, (x * self.cell_size, y * self.cell_size), mask=tile)
# location text
if play in UNKNOWN_OR_FLAGGED:
draw.text((x * self.cell_size + 2, y * self.cell_size + 2), cell_name(x, y), (68, 68, 150),
font=self.cell_font)
if play == Play.CLEAR:
count = self._count_adjacent_bombs(x, y)
if count:
draw_centered_text(draw, cx, cy - 2, str(count), (217, 50, 50), font=self.count_font)
if cheat and self.bomb_map[x][y]:
draw_centered_text(draw, cx, cy - 2, "XX", (217, 50, 50), font=self.count_font)
return im
async def create_image_async(self, *args, **kwargs):
return await asyncio.get_event_loop().run_in_executor(None,
functools.partial(self.create_image, *args, **kwargs))
def parse_pos(self, str):
m = POS_RE.match(str)
if not m:
raise CommandError("Invalid position '{}' (expected something like C2)".format(str))
x = string.ascii_uppercase.find(m.group(1).upper())
y = int(m.group(2)) - 1
if self._in_bounds(x, y):
return x, y
else:
raise CommandError("Your position '{}' isn't in the grid!".format(str))
def toggle_flag(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
self.play[x][y] = Play.FLAGGED if self.play[x][y] == Play.UNKNOWN else Play.UNKNOWN
else:
raise CommandError("You can't flag that cell!")
def click(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
if self.bomb_map[x][y]: # bomb
self._mutate_cell(x, y, Play.EXPLODED)
self.state = State.LOST
else:
self._clear_cell(x, y, set())
if self.remaining_unknown == 0:
self.state = State.WON
else:
raise CommandError("You can't click that cell!")
def _in_bounds(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
def _is_bomb(self, x, y):
return self._in_bounds(x, y) and self.bomb_map[x][y]
def _count_adjacent_bombs(self, x, y):
return sum([
self._is_bomb(x - 1, y),
self._is_bomb(x, y - 1),
self._is_bomb(x + 1, y),
self._is_bomb(x, y + 1),
self._is_bomb(x - 1, y - 1),
self._is_bomb(x - 1, y + 1),
self._is_bomb(x + 1, y - 1),
self._is_bomb(x + 1, y + 1),
])
def _clear_cell(self, x, y, visited):
if not self._in_bounds(x, y):
return
if (x, y) in visited:
return
visited.add((x, y))
if self.play[x][y] in UNKNOWN_OR_FLAGGED and not self.bomb_map[x][y]:
self._mutate_cell(x, y, Play.CLEAR)
if not self._count_adjacent_bombs(x, y):
self._clear_cell(x - 1, y, visited)
self._clear_cell(x, y - 1, visited)
self._clear_cell(x + 1, y, visited)
self._clear_cell(x, y + 1, visited)
self._clear_cell(x - 1, y - 1, visited)
self._clear_cell(x - 1, y + 1, visited)
self._clear_cell(x + 1, y - 1, visited)
self._clear_cell(x + 1, y + 1, visited)
def _mutate_cell(self, x, y, new_play: Play):
if self.play[x][y] in UNKNOWN_OR_FLAGGED and new_play != Play.UNKNOWN:
self.remaining_unknown -= 1
self.play[x][y] = new_play
else:
raise AssertionError("this shouldn't happen (is {}, wants to be {})".format(self.play[x][y], new_play))
cache = cachetools.LRUCache(maxsize=1000)
@commands.create("minesweeper start", "mine start", "m start", category="Games", params=[])
@channel_only
@games_allowed_only
async def | (message):
"""
Starts a game of minesweeper.
Example::
mine start
"""
key = (message.transport.id, message.server.id, message.channel.id)
if key in cache:
game = cache[key]
else:
game = Game(12, 12, scoped_config.get(bomb_chance, message.channel) / 100, random.Random())
cache[key] = game
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper", "mine", "m", category="Games")
@channel_only
@games_allowed_only
async def click(message):
"""
Click one or more cells on minesweeper.
Start a game with::
mine start
Then choose one or more cells::
mine b5 g7 a7 a1
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.click(*game.parse_pos(position))
if game.state == State.WON:
del cache[key]
return Response("\N{TROPHY} \N{TROPHY} YOU ARE WINNER! \N{TROPHY} \N{TROPHY}", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
elif game.state == State.LOST:
del cache[key]
return Response("\N{BOMB} \N{COLLISION SYMBOL} \N{COLLISION SYMBOL} BOOOOM!!!", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
else:
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper flag", "mine flag", "m flag", category="Games")
@channel_only
@games_allowed_only
async def flag(message):
"""
Toggle flags on one or more cells on minesweeper.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.toggle_flag(*game.parse_pos(position))
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper cheat", "mine cheat", category="Games", params=[])
@channel_only
@owners_only
async def cheat(message):
"""
Bot administrator command to show where bombs are for testing.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
return Response("", attachments=[ImageAttachment(await game.create_image_async(cheat=True), "minesweeper.png")])
def setup():
config.add(bomb_chance)
commands.add(start)
commands.add(click)
commands.add(flag)
commands.add(cheat)
| start | identifier_name |
__init__.py | import functools
import random
import re
import string
from enum import Enum
import PIL
import asyncio
import cachetools
import pkg_resources
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from plumeria import config
from plumeria.command import CommandError, commands, channel_only
from plumeria.command.parse import Word
from plumeria.config import percent
from plumeria.config.common import games_allowed_only
from plumeria.core.scoped_config import scoped_config
from plumeria.message import ImageAttachment, Response
from plumeria.message.lists import parse_list
from plumeria.perms import owners_only
bomb_chance = config.create("minesweeper", "bomb_chance", type=percent, fallback=20, scoped=True, private=False,
comment="The % of a cell being a bomb")
POS_RE = re.compile("^([A-Za-z]+)([0-9]+)$")
def cell_name(x, y):
return string.ascii_uppercase[x] + str(y + 1)
def draw_centered_text(draw, x, y, text, *args, font, **kwargs):
w, h = draw.textsize(text, font=font)
draw.text((x - w / 2, y - h / 2), text, *args, font=font, **kwargs)
def load_tile_graphics():
images = {}
for play in Play:
with pkg_resources.resource_stream(__name__, "assets/{}.png".format(play.name.lower())) as f:
images[play] = Image.open(f).convert('RGBA')
return images
class Play(Enum):
UNKNOWN = 'unknown'
CLEAR = 'clear'
FLAGGED = 'flagged'
EXPLODED = 'exploded'
class State(Enum):
IN_PLAY = 'in_play'
WON = 'won'
LOST = 'lost'
UNKNOWN_OR_FLAGGED = {Play.UNKNOWN, Play.FLAGGED}
TILE_GRAPHICS = load_tile_graphics()
class Game:
def __init__(self, w, h, mine_fraction, r=None):
r = random or random.Random()
self.width = w
self.height = h
self.bomb_map = list(map(lambda y: list(map(lambda x: r.random() <= mine_fraction, range(w))), range(h)))
self.play = list(map(lambda y: list(map(lambda x: Play.UNKNOWN, range(w))), range(h)))
self.state = State.IN_PLAY
self.remaining_unknown = w * h
self.bomb_count = 0
self.cell_size = 25
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.cell_font = ImageFont.truetype(f, 10)
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.count_font = ImageFont.truetype(f, 15)
# count bombs
for x in range(w):
for y in range(h):
if self.bomb_map[x][y]:
self.bomb_count += 1
self.remaining_unknown -= 1
if self.bomb_count == 0:
raise CommandError("No bombs found in created game! Make sure the bomb "
"`minesweeper/bomb_chance` setting is not near 0%.")
# start it off
tries = 0
while self.remaining_unknown > 0 and tries < 20:
x = r.randrange(0, self.width)
y = r.randrange(0, self.height)
if not self.bomb_map[x][y] and not self._count_adjacent_bombs(x, y):
self.click(x, y)
break
tries += 1
def create_image(self, cheat=False) -> PIL.Image.Image:
w = self.width * self.cell_size
h = self.height * self.cell_size
im = Image.new("RGBA", (w, h), "white")
draw = ImageDraw.Draw(im)
for y in range(self.height):
for x in range(self.width):
cx = (x + 0.5) * self.cell_size
cy = (y + 0.5) * self.cell_size
play = self.play[x][y]
# draw background
tile = TILE_GRAPHICS[play].copy().resize((self.cell_size, self.cell_size), PIL.Image.BICUBIC)
im.paste(tile, (x * self.cell_size, y * self.cell_size), mask=tile)
# location text
if play in UNKNOWN_OR_FLAGGED:
draw.text((x * self.cell_size + 2, y * self.cell_size + 2), cell_name(x, y), (68, 68, 150),
font=self.cell_font)
if play == Play.CLEAR:
count = self._count_adjacent_bombs(x, y)
if count:
draw_centered_text(draw, cx, cy - 2, str(count), (217, 50, 50), font=self.count_font)
if cheat and self.bomb_map[x][y]:
draw_centered_text(draw, cx, cy - 2, "XX", (217, 50, 50), font=self.count_font)
return im
async def create_image_async(self, *args, **kwargs):
return await asyncio.get_event_loop().run_in_executor(None,
functools.partial(self.create_image, *args, **kwargs))
def parse_pos(self, str):
m = POS_RE.match(str)
if not m:
raise CommandError("Invalid position '{}' (expected something like C2)".format(str))
x = string.ascii_uppercase.find(m.group(1).upper())
y = int(m.group(2)) - 1
if self._in_bounds(x, y):
return x, y
else:
raise CommandError("Your position '{}' isn't in the grid!".format(str))
def toggle_flag(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
self.play[x][y] = Play.FLAGGED if self.play[x][y] == Play.UNKNOWN else Play.UNKNOWN
else:
raise CommandError("You can't flag that cell!")
def click(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
if self.bomb_map[x][y]: # bomb
self._mutate_cell(x, y, Play.EXPLODED)
self.state = State.LOST
else: |
def _in_bounds(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
def _is_bomb(self, x, y):
return self._in_bounds(x, y) and self.bomb_map[x][y]
def _count_adjacent_bombs(self, x, y):
return sum([
self._is_bomb(x - 1, y),
self._is_bomb(x, y - 1),
self._is_bomb(x + 1, y),
self._is_bomb(x, y + 1),
self._is_bomb(x - 1, y - 1),
self._is_bomb(x - 1, y + 1),
self._is_bomb(x + 1, y - 1),
self._is_bomb(x + 1, y + 1),
])
def _clear_cell(self, x, y, visited):
if not self._in_bounds(x, y):
return
if (x, y) in visited:
return
visited.add((x, y))
if self.play[x][y] in UNKNOWN_OR_FLAGGED and not self.bomb_map[x][y]:
self._mutate_cell(x, y, Play.CLEAR)
if not self._count_adjacent_bombs(x, y):
self._clear_cell(x - 1, y, visited)
self._clear_cell(x, y - 1, visited)
self._clear_cell(x + 1, y, visited)
self._clear_cell(x, y + 1, visited)
self._clear_cell(x - 1, y - 1, visited)
self._clear_cell(x - 1, y + 1, visited)
self._clear_cell(x + 1, y - 1, visited)
self._clear_cell(x + 1, y + 1, visited)
def _mutate_cell(self, x, y, new_play: Play):
if self.play[x][y] in UNKNOWN_OR_FLAGGED and new_play != Play.UNKNOWN:
self.remaining_unknown -= 1
self.play[x][y] = new_play
else:
raise AssertionError("this shouldn't happen (is {}, wants to be {})".format(self.play[x][y], new_play))
cache = cachetools.LRUCache(maxsize=1000)
@commands.create("minesweeper start", "mine start", "m start", category="Games", params=[])
@channel_only
@games_allowed_only
async def start(message):
"""
Starts a game of minesweeper.
Example::
mine start
"""
key = (message.transport.id, message.server.id, message.channel.id)
if key in cache:
game = cache[key]
else:
game = Game(12, 12, scoped_config.get(bomb_chance, message.channel) / 100, random.Random())
cache[key] = game
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper", "mine", "m", category="Games")
@channel_only
@games_allowed_only
async def click(message):
"""
Click one or more cells on minesweeper.
Start a game with::
mine start
Then choose one or more cells::
mine b5 g7 a7 a1
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.click(*game.parse_pos(position))
if game.state == State.WON:
del cache[key]
return Response("\N{TROPHY} \N{TROPHY} YOU ARE WINNER! \N{TROPHY} \N{TROPHY}", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
elif game.state == State.LOST:
del cache[key]
return Response("\N{BOMB} \N{COLLISION SYMBOL} \N{COLLISION SYMBOL} BOOOOM!!!", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
else:
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper flag", "mine flag", "m flag", category="Games")
@channel_only
@games_allowed_only
async def flag(message):
"""
Toggle flags on one or more cells on minesweeper.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.toggle_flag(*game.parse_pos(position))
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper cheat", "mine cheat", category="Games", params=[])
@channel_only
@owners_only
async def cheat(message):
"""
Bot administrator command to show where bombs are for testing.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
return Response("", attachments=[ImageAttachment(await game.create_image_async(cheat=True), "minesweeper.png")])
def setup():
config.add(bomb_chance)
commands.add(start)
commands.add(click)
commands.add(flag)
commands.add(cheat) | self._clear_cell(x, y, set())
if self.remaining_unknown == 0:
self.state = State.WON
else:
raise CommandError("You can't click that cell!") | random_line_split |
lsh.py | """
lsh.py
Algorithms based on 'Mining of Massive Datasets'
"""
from unionfind import UnionFind
from collections import defaultdict
from collections import defaultdict, namedtuple
from copy import deepcopy
import operator
def shingle(s, k):
"""Generate k-length shingles of string s"""
k = min(len(s), k)
for i in range(len(s) - k + 1):
yield s[i:i+k]
def hshingle(s, k):
"""Generate k-length shingles then hash"""
for s in shingle(s, k):
|
def jaccard_sim(X, Y):
"""Jaccard similarity between two sets"""
x = set(X)
y = set(Y)
return float(len(x & y)) / len(x | y)
def jaccard_dist(X, Y):
"""Jaccard distance between two sets"""
return 1 - jaccard_sim(X, Y)
class Signature(object):
"""Signature Base class."""
def __init__(self, dim):
self.dim = dim
self.hashes = self.hash_functions()
def hash_functions(self):
"""Returns dim different hash functions"""
pass
def sign(self, object):
"""Return the signature for object s"""
pass
class MinHashSignature(Signature):
"""Creates signatures for sets/tuples using minhash."""
def hash_functions(self):
"""Return dim different hash functions"""
def hash_factory(n):
return lambda x: hash("salt" + str(n) + str(x) + "salt")
return [ hash_factory(_) for _ in range(self.dim) ]
def sign(self, s):
"""Returns minhash signature for set s"""
sig = [ float("inf") ] * self.dim
for hash_ix, hash_fn in enumerate(self.hashes):
sig[hash_ix] = min(hash_fn(value) for value in s)
return sig
class LSH(object):
"""Locality sensitive hashing. Uses a banding approach to hash
similar signatures to the same buckets."""
def __init__(self, length, threshold):
self.length = length
self.threshold = threshold
self.bandwidth = self.get_bandwidth(length, threshold)
def hash(self, sig, band_idx=None):
"""Generate hashvals for this signature"""
for band in zip(*(iter(sig),) * self.bandwidth):
yield hash("salt" + str(band) + "tlas")
def get_bandwidth(self, n, t):
"""Approximates the bandwidth (number of rows in each band)
needed to get threshold.
Threshold t = (1/b) ** (1/r) where
b = #bands
r = #rows per band
n = b * r = #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (t ** r)
except: # Divide by zero, your signature is huge
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def get_threshold(self):
r = self.bandwidth
b = self.length / r
return (1. / b) ** (1. / r)
def get_n_bands(self):
return int(self.length / self.bandwidth)
class Cluster(object):
"""Clusters sets with Jaccard similarity above threshold with high
probability.
Algorithm based on Rajaraman, "Mining of Massive Datasets":
1. Generate set signature
2. Use LSH to map similar signatures to same buckets
3. Use UnionFind to merge buckets containing same values
"""
def __init__(self, width=10, threshold=0.5):
self.width = width
self.unionfind = UnionFind()
self.signer = MinHashSignature(width)
self.hasher = LSH(width, threshold)
self.hashmaps = [defaultdict(list)
for _ in range(self.hasher.get_n_bands())]
def add_set(self, s, label=None):
# A label for this set
if not label:
label = s
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
self.hashmaps[band_idx][hshval].append(label)
self.unionfind.union(label, self.hashmaps[band_idx][hshval][0])
def get_sets(self):
return self.unionfind.sets()
class ConstrainedCluster(Cluster):
"""To fight the problem of big clusters created by the aggregation of a
large number of false positives (i.e. two items found to be a candidate
pair, but that really shouldn't belong to the same cluster), this class
introduces an extra constraint which must be met for two items to be
clustered. This mechanism imposes that we keep track of extra items, that
are encapsulated in the LabelObj namedtuple. The constraint, by default, is
that the Jaccard Similarity must be as high as the hasher threshold, which
is defined with this anonymous function:
lambda lo1, lo2: jaccard_sim(lo1.obj, lo2.obj)
where the lo's are object of type LabelObj. However, this could be easily
redefined to a function possibly more useful in some context, like the
Levenshtein Ratio for instance (or any other similarity function to be
maximized):
lambda lo1, lo2: Levenshtein.ratio(lo1.obj, lo2.obj)
which will work, provided that an "obj" argument has been previously passed
to add_set. In this case "obj" is a string, but it could be of whatever
type, as long as the "contraint_fn" function properly handles it.
"""
# Structure to be stored in the ConstrainedCluster.hashmaps band/hash cell
# cluster lists.
LabelObj = namedtuple('LabelObj', 'label obj')
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj)):
super(ConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
def add_set(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
# if obj is not defined, s is used
lo = ConstrainedCluster.LabelObj(label, obj if obj else s)
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
class SemiParallellizableConstrainedCluster(Cluster):
"""This is a semi-parallel version of ConstrainedCluster, to be used with
multiprocessing; explanations and documentation soon to come..
"""
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj),
sigmaps_to_merge=None):
super(SemiParallellizableConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
if sigmaps_to_merge is None:
self.sigmap = {}
else:
self.sigmap = dict(reduce(operator.__add__,
[sm.items() for sm in sigmaps_to_merge]))
def sign(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
self.sigmap[label] = (self.signer.sign(s) if s else None,
obj if obj else s)
def find_clusters(self):
for label, (sig, obj) in self.sigmap.iteritems():
self.unionfind[label]
if sig is None: continue
lo = ConstrainedCluster.LabelObj(label, obj)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
if __name__ == '__main__':
n = 2
sa = set(shingle("1234abcdef", n))
sb = set(shingle("4321abcdef", n))
print 'Jaccard Sim:', jaccard_sim(sa, sb)
cluster = Cluster()
cluster.add_set(sa, 'a')
cluster.add_set(sb, 'b')
print 'Cluster:', cluster.get_sets() # [['a', 'b']]
cluster = ConstrainedCluster()
cluster.add_set(sa, 'a')
cluster.add_set(sb, 'b')
print 'ConstrainedCluster:', cluster.get_sets() # [['a'], ['b']]
| yield hash(s) | conditional_block |
lsh.py | """
lsh.py
Algorithms based on 'Mining of Massive Datasets'
"""
from unionfind import UnionFind
from collections import defaultdict
from collections import defaultdict, namedtuple
from copy import deepcopy
import operator
def | (s, k):
"""Generate k-length shingles of string s"""
k = min(len(s), k)
for i in range(len(s) - k + 1):
yield s[i:i+k]
def hshingle(s, k):
"""Generate k-length shingles then hash"""
for s in shingle(s, k):
yield hash(s)
def jaccard_sim(X, Y):
"""Jaccard similarity between two sets"""
x = set(X)
y = set(Y)
return float(len(x & y)) / len(x | y)
def jaccard_dist(X, Y):
"""Jaccard distance between two sets"""
return 1 - jaccard_sim(X, Y)
class Signature(object):
"""Signature Base class."""
def __init__(self, dim):
self.dim = dim
self.hashes = self.hash_functions()
def hash_functions(self):
"""Returns dim different hash functions"""
pass
def sign(self, object):
"""Return the signature for object s"""
pass
class MinHashSignature(Signature):
"""Creates signatures for sets/tuples using minhash."""
def hash_functions(self):
"""Return dim different hash functions"""
def hash_factory(n):
return lambda x: hash("salt" + str(n) + str(x) + "salt")
return [ hash_factory(_) for _ in range(self.dim) ]
def sign(self, s):
"""Returns minhash signature for set s"""
sig = [ float("inf") ] * self.dim
for hash_ix, hash_fn in enumerate(self.hashes):
sig[hash_ix] = min(hash_fn(value) for value in s)
return sig
class LSH(object):
"""Locality sensitive hashing. Uses a banding approach to hash
similar signatures to the same buckets."""
def __init__(self, length, threshold):
self.length = length
self.threshold = threshold
self.bandwidth = self.get_bandwidth(length, threshold)
def hash(self, sig, band_idx=None):
"""Generate hashvals for this signature"""
for band in zip(*(iter(sig),) * self.bandwidth):
yield hash("salt" + str(band) + "tlas")
def get_bandwidth(self, n, t):
"""Approximates the bandwidth (number of rows in each band)
needed to get threshold.
Threshold t = (1/b) ** (1/r) where
b = #bands
r = #rows per band
n = b * r = #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (t ** r)
except: # Divide by zero, your signature is huge
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def get_threshold(self):
r = self.bandwidth
b = self.length / r
return (1. / b) ** (1. / r)
def get_n_bands(self):
return int(self.length / self.bandwidth)
class Cluster(object):
"""Clusters sets with Jaccard similarity above threshold with high
probability.
Algorithm based on Rajaraman, "Mining of Massive Datasets":
1. Generate set signature
2. Use LSH to map similar signatures to same buckets
3. Use UnionFind to merge buckets containing same values
"""
def __init__(self, width=10, threshold=0.5):
self.width = width
self.unionfind = UnionFind()
self.signer = MinHashSignature(width)
self.hasher = LSH(width, threshold)
self.hashmaps = [defaultdict(list)
for _ in range(self.hasher.get_n_bands())]
def add_set(self, s, label=None):
# A label for this set
if not label:
label = s
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
self.hashmaps[band_idx][hshval].append(label)
self.unionfind.union(label, self.hashmaps[band_idx][hshval][0])
def get_sets(self):
return self.unionfind.sets()
class ConstrainedCluster(Cluster):
"""To fight the problem of big clusters created by the aggregation of a
large number of false positives (i.e. two items found to be a candidate
pair, but that really shouldn't belong to the same cluster), this class
introduces an extra constraint which must be met for two items to be
clustered. This mechanism imposes that we keep track of extra items, that
are encapsulated in the LabelObj namedtuple. The constraint, by default, is
that the Jaccard Similarity must be as high as the hasher threshold, which
is defined with this anonymous function:
lambda lo1, lo2: jaccard_sim(lo1.obj, lo2.obj)
where the lo's are object of type LabelObj. However, this could be easily
redefined to a function possibly more useful in some context, like the
Levenshtein Ratio for instance (or any other similarity function to be
maximized):
lambda lo1, lo2: Levenshtein.ratio(lo1.obj, lo2.obj)
which will work, provided that an "obj" argument has been previously passed
to add_set. In this case "obj" is a string, but it could be of whatever
type, as long as the "contraint_fn" function properly handles it.
"""
# Structure to be stored in the ConstrainedCluster.hashmaps band/hash cell
# cluster lists.
LabelObj = namedtuple('LabelObj', 'label obj')
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj)):
super(ConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
def add_set(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
# if obj is not defined, s is used
lo = ConstrainedCluster.LabelObj(label, obj if obj else s)
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
class SemiParallellizableConstrainedCluster(Cluster):
"""This is a semi-parallel version of ConstrainedCluster, to be used with
multiprocessing; explanations and documentation soon to come..
"""
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj),
sigmaps_to_merge=None):
super(SemiParallellizableConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
if sigmaps_to_merge is None:
self.sigmap = {}
else:
self.sigmap = dict(reduce(operator.__add__,
[sm.items() for sm in sigmaps_to_merge]))
def sign(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
self.sigmap[label] = (self.signer.sign(s) if s else None,
obj if obj else s)
def find_clusters(self):
for label, (sig, obj) in self.sigmap.iteritems():
self.unionfind[label]
if sig is None: continue
lo = ConstrainedCluster.LabelObj(label, obj)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
if __name__ == '__main__':
n = 2
sa = set(shingle("1234abcdef", n))
sb = set(shingle("4321abcdef", n))
print 'Jaccard Sim:', jaccard_sim(sa, sb)
cluster = Cluster()
cluster.add_set(sa, 'a')
cluster.add_set(sb, 'b')
print 'Cluster:', cluster.get_sets() # [['a', 'b']]
cluster = ConstrainedCluster()
cluster.add_set(sa, 'a')
cluster.add_set(sb, 'b')
print 'ConstrainedCluster:', cluster.get_sets() # [['a'], ['b']]
| shingle | identifier_name |
lsh.py | """
lsh.py
Algorithms based on 'Mining of Massive Datasets'
"""
from unionfind import UnionFind
from collections import defaultdict
from collections import defaultdict, namedtuple
from copy import deepcopy
import operator
def shingle(s, k):
"""Generate k-length shingles of string s"""
k = min(len(s), k)
for i in range(len(s) - k + 1):
yield s[i:i+k]
def hshingle(s, k):
"""Generate k-length shingles then hash"""
for s in shingle(s, k):
yield hash(s)
def jaccard_sim(X, Y):
"""Jaccard similarity between two sets"""
x = set(X)
y = set(Y)
return float(len(x & y)) / len(x | y)
def jaccard_dist(X, Y):
"""Jaccard distance between two sets"""
return 1 - jaccard_sim(X, Y) | class Signature(object):
"""Signature Base class."""
def __init__(self, dim):
self.dim = dim
self.hashes = self.hash_functions()
def hash_functions(self):
"""Returns dim different hash functions"""
pass
def sign(self, object):
"""Return the signature for object s"""
pass
class MinHashSignature(Signature):
"""Creates signatures for sets/tuples using minhash."""
def hash_functions(self):
"""Return dim different hash functions"""
def hash_factory(n):
return lambda x: hash("salt" + str(n) + str(x) + "salt")
return [ hash_factory(_) for _ in range(self.dim) ]
def sign(self, s):
"""Returns minhash signature for set s"""
sig = [ float("inf") ] * self.dim
for hash_ix, hash_fn in enumerate(self.hashes):
sig[hash_ix] = min(hash_fn(value) for value in s)
return sig
class LSH(object):
"""Locality sensitive hashing. Uses a banding approach to hash
similar signatures to the same buckets."""
def __init__(self, length, threshold):
self.length = length
self.threshold = threshold
self.bandwidth = self.get_bandwidth(length, threshold)
def hash(self, sig, band_idx=None):
"""Generate hashvals for this signature"""
for band in zip(*(iter(sig),) * self.bandwidth):
yield hash("salt" + str(band) + "tlas")
def get_bandwidth(self, n, t):
"""Approximates the bandwidth (number of rows in each band)
needed to get threshold.
Threshold t = (1/b) ** (1/r) where
b = #bands
r = #rows per band
n = b * r = #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (t ** r)
except: # Divide by zero, your signature is huge
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def get_threshold(self):
r = self.bandwidth
b = self.length / r
return (1. / b) ** (1. / r)
def get_n_bands(self):
return int(self.length / self.bandwidth)
class Cluster(object):
"""Clusters sets with Jaccard similarity above threshold with high
probability.
Algorithm based on Rajaraman, "Mining of Massive Datasets":
1. Generate set signature
2. Use LSH to map similar signatures to same buckets
3. Use UnionFind to merge buckets containing same values
"""
def __init__(self, width=10, threshold=0.5):
self.width = width
self.unionfind = UnionFind()
self.signer = MinHashSignature(width)
self.hasher = LSH(width, threshold)
self.hashmaps = [defaultdict(list)
for _ in range(self.hasher.get_n_bands())]
def add_set(self, s, label=None):
# A label for this set
if not label:
label = s
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
self.hashmaps[band_idx][hshval].append(label)
self.unionfind.union(label, self.hashmaps[band_idx][hshval][0])
def get_sets(self):
return self.unionfind.sets()
class ConstrainedCluster(Cluster):
"""To fight the problem of big clusters created by the aggregation of a
large number of false positives (i.e. two items found to be a candidate
pair, but that really shouldn't belong to the same cluster), this class
introduces an extra constraint which must be met for two items to be
clustered. This mechanism imposes that we keep track of extra items, that
are encapsulated in the LabelObj namedtuple. The constraint, by default, is
that the Jaccard Similarity must be as high as the hasher threshold, which
is defined with this anonymous function:
lambda lo1, lo2: jaccard_sim(lo1.obj, lo2.obj)
where the lo's are object of type LabelObj. However, this could be easily
redefined to a function possibly more useful in some context, like the
Levenshtein Ratio for instance (or any other similarity function to be
maximized):
lambda lo1, lo2: Levenshtein.ratio(lo1.obj, lo2.obj)
which will work, provided that an "obj" argument has been previously passed
to add_set. In this case "obj" is a string, but it could be of whatever
type, as long as the "contraint_fn" function properly handles it.
"""
# Structure to be stored in the ConstrainedCluster.hashmaps band/hash cell
# cluster lists.
LabelObj = namedtuple('LabelObj', 'label obj')
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj)):
super(ConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
def add_set(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
# if obj is not defined, s is used
lo = ConstrainedCluster.LabelObj(label, obj if obj else s)
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
class SemiParallellizableConstrainedCluster(Cluster):
"""This is a semi-parallel version of ConstrainedCluster, to be used with
multiprocessing; explanations and documentation soon to come..
"""
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj),
sigmaps_to_merge=None):
super(SemiParallellizableConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
if sigmaps_to_merge is None:
self.sigmap = {}
else:
self.sigmap = dict(reduce(operator.__add__,
[sm.items() for sm in sigmaps_to_merge]))
def sign(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
self.sigmap[label] = (self.signer.sign(s) if s else None,
obj if obj else s)
def find_clusters(self):
for label, (sig, obj) in self.sigmap.iteritems():
self.unionfind[label]
if sig is None: continue
lo = ConstrainedCluster.LabelObj(label, obj)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
if __name__ == '__main__':
n = 2
sa = set(shingle("1234abcdef", n))
sb = set(shingle("4321abcdef", n))
print 'Jaccard Sim:', jaccard_sim(sa, sb)
cluster = Cluster()
cluster.add_set(sa, 'a')
cluster.add_set(sb, 'b')
print 'Cluster:', cluster.get_sets() # [['a', 'b']]
cluster = ConstrainedCluster()
cluster.add_set(sa, 'a')
cluster.add_set(sb, 'b')
print 'ConstrainedCluster:', cluster.get_sets() # [['a'], ['b']] | random_line_split | |
lsh.py | """
lsh.py
Algorithms based on 'Mining of Massive Datasets'
"""
from unionfind import UnionFind
from collections import defaultdict
from collections import defaultdict, namedtuple
from copy import deepcopy
import operator
def shingle(s, k):
"""Generate k-length shingles of string s"""
k = min(len(s), k)
for i in range(len(s) - k + 1):
yield s[i:i+k]
def hshingle(s, k):
"""Generate k-length shingles then hash"""
for s in shingle(s, k):
yield hash(s)
def jaccard_sim(X, Y):
"""Jaccard similarity between two sets"""
x = set(X)
y = set(Y)
return float(len(x & y)) / len(x | y)
def jaccard_dist(X, Y):
"""Jaccard distance between two sets"""
return 1 - jaccard_sim(X, Y)
class Signature(object):
"""Signature Base class."""
def __init__(self, dim):
self.dim = dim
self.hashes = self.hash_functions()
def hash_functions(self):
"""Returns dim different hash functions"""
pass
def sign(self, object):
"""Return the signature for object s"""
pass
class MinHashSignature(Signature):
"""Creates signatures for sets/tuples using minhash."""
def hash_functions(self):
"""Return dim different hash functions"""
def hash_factory(n):
return lambda x: hash("salt" + str(n) + str(x) + "salt")
return [ hash_factory(_) for _ in range(self.dim) ]
def sign(self, s):
"""Returns minhash signature for set s"""
sig = [ float("inf") ] * self.dim
for hash_ix, hash_fn in enumerate(self.hashes):
sig[hash_ix] = min(hash_fn(value) for value in s)
return sig
class LSH(object):
"""Locality sensitive hashing. Uses a banding approach to hash
similar signatures to the same buckets."""
def __init__(self, length, threshold):
self.length = length
self.threshold = threshold
self.bandwidth = self.get_bandwidth(length, threshold)
def hash(self, sig, band_idx=None):
"""Generate hashvals for this signature"""
for band in zip(*(iter(sig),) * self.bandwidth):
yield hash("salt" + str(band) + "tlas")
def get_bandwidth(self, n, t):
"""Approximates the bandwidth (number of rows in each band)
needed to get threshold.
Threshold t = (1/b) ** (1/r) where
b = #bands
r = #rows per band
n = b * r = #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (t ** r)
except: # Divide by zero, your signature is huge
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def get_threshold(self):
r = self.bandwidth
b = self.length / r
return (1. / b) ** (1. / r)
def get_n_bands(self):
return int(self.length / self.bandwidth)
class Cluster(object):
|
class ConstrainedCluster(Cluster):
"""To fight the problem of big clusters created by the aggregation of a
large number of false positives (i.e. two items found to be a candidate
pair, but that really shouldn't belong to the same cluster), this class
introduces an extra constraint which must be met for two items to be
clustered. This mechanism imposes that we keep track of extra items, that
are encapsulated in the LabelObj namedtuple. The constraint, by default, is
that the Jaccard Similarity must be as high as the hasher threshold, which
is defined with this anonymous function:
lambda lo1, lo2: jaccard_sim(lo1.obj, lo2.obj)
where the lo's are object of type LabelObj. However, this could be easily
redefined to a function possibly more useful in some context, like the
Levenshtein Ratio for instance (or any other similarity function to be
maximized):
lambda lo1, lo2: Levenshtein.ratio(lo1.obj, lo2.obj)
which will work, provided that an "obj" argument has been previously passed
to add_set. In this case "obj" is a string, but it could be of whatever
type, as long as the "contraint_fn" function properly handles it.
"""
# Structure to be stored in the ConstrainedCluster.hashmaps band/hash cell
# cluster lists.
LabelObj = namedtuple('LabelObj', 'label obj')
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj)):
super(ConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
def add_set(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
# if obj is not defined, s is used
lo = ConstrainedCluster.LabelObj(label, obj if obj else s)
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
class SemiParallellizableConstrainedCluster(Cluster):
"""This is a semi-parallel version of ConstrainedCluster, to be used with
multiprocessing; explanations and documentation soon to come..
"""
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj),
sigmaps_to_merge=None):
super(SemiParallellizableConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
if sigmaps_to_merge is None:
self.sigmap = {}
else:
self.sigmap = dict(reduce(operator.__add__,
[sm.items() for sm in sigmaps_to_merge]))
def sign(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
self.sigmap[label] = (self.signer.sign(s) if s else None,
obj if obj else s)
def find_clusters(self):
for label, (sig, obj) in self.sigmap.iteritems():
self.unionfind[label]
if sig is None: continue
lo = ConstrainedCluster.LabelObj(label, obj)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
if __name__ == '__main__':
n = 2
sa = set(shingle("1234abcdef", n))
sb = set(shingle("4321abcdef", n))
print 'Jaccard Sim:', jaccard_sim(sa, sb)
cluster = Cluster()
cluster.add_set(sa, 'a')
cluster.add_set(sb, 'b')
print 'Cluster:', cluster.get_sets() # [['a', 'b']]
cluster = ConstrainedCluster()
cluster.add_set(sa, 'a')
cluster.add_set(sb, 'b')
print 'ConstrainedCluster:', cluster.get_sets() # [['a'], ['b']]
| """Clusters sets with Jaccard similarity above threshold with high
probability.
Algorithm based on Rajaraman, "Mining of Massive Datasets":
1. Generate set signature
2. Use LSH to map similar signatures to same buckets
3. Use UnionFind to merge buckets containing same values
"""
def __init__(self, width=10, threshold=0.5):
self.width = width
self.unionfind = UnionFind()
self.signer = MinHashSignature(width)
self.hasher = LSH(width, threshold)
self.hashmaps = [defaultdict(list)
for _ in range(self.hasher.get_n_bands())]
def add_set(self, s, label=None):
# A label for this set
if not label:
label = s
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
self.hashmaps[band_idx][hshval].append(label)
self.unionfind.union(label, self.hashmaps[band_idx][hshval][0])
def get_sets(self):
return self.unionfind.sets() | identifier_body |
denoising_autoencoder.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Learns features of inputs.
Principal Author: Matthew Alger
"""
from __future__ import division
import time
import numpy
import theano
from theano.tensor.shared_randomstreams import RandomStreams
class Denoising_Autoencoder(object):
"""
It like learns how to take noisy versions of the input to unnoisy versions
of the input like back to the original versions of the input. -- Buck
"""
def __init__(self
, input_dimension
, hidden_dimension
, output_dimension
, input_batch=None
, output_batch=None
, symbolic_input=None
, rng=None
, theano_rng=None
, learning_rate=0.1
, corruption=0.3):
"""
input_dimension: The dimension of the input vectors.
hidden_dimension: How many hidden nodes to map to.
input_batch: Optional. Input data.
output_batch: Optional. A vector of labels corresponding to each input vector.
output_dimension: How many labels there are.
symbolic_input: Optional. A symbolic input value.
rng: Optional. A NumPy RandomState.
theano_rng: Optional. A Theano RandomStream.
learning_rate: Optional. How large gradient descent jumps are.
corruption: Optional. How much to corrupt the input when learning.
"""
self.input_dimension = input_dimension
self.hidden_dimension = hidden_dimension
self.output_batch = output_batch
self.output_dimension = output_dimension
if symbolic_input is None:
self.initialise_symbolic_input()
else:
self.symbolic_input = symbolic_input
self.initialise_symbolic_output()
if rng is None:
self.initialise_rng()
else:
self.rng = rng
if theano_rng is None:
self.initialise_theano_rng()
else:
self.theano_rng = theano_rng
self.corruption = corruption
self.input_batch = input_batch
self.activation = theano.tensor.nnet.sigmoid
self.learning_rate = learning_rate
self.initialise_corrupted_input()
self.initialise_parameters()
self.initialise_theano_functions()
def initialise_corrupted_input(self):
self.symbolic_corrupted_input = self.theano_rng.binomial(
size=self.symbolic_input.shape,
n=1,
p=1 - self.corruption,
dtype=theano.config.floatX) * self.symbolic_input
def initialise_theano_rng(self):
"""
Initialise and store a Theano RandomStream.
"""
self.theano_rng = RandomStreams(self.rng.randint(2**30))
def initialise_parameters(self):
"""
Initialises and subsequently stores a weight matrix, bias vector,
reverse bias vector, label weight matrix, and label bias vector.
"""
low = -numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
high = numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
if self.activation is theano.tensor.nnet.sigmoid:
# We know the optimum distribution for tanh and sigmoid, so we
# assume that we're using tanh unless we're using sigmoid.
low *= 4
high *= 4
self.weights = theano.shared(
value=numpy.asarray(
self.rng.uniform( # This distribution is apparently optimal for tanh.
low=low,
high=high,
size=(self.input_dimension, self.hidden_dimension)),
dtype=theano.config.floatX),
name="W",
borrow=True)
self.bias = theano.shared(
value=numpy.zeros((self.hidden_dimension,),
dtype=theano.config.floatX),
name="b",
borrow=True)
self.reverse_bias = theano.shared(
value=numpy.zeros((self.input_dimension,),
dtype=theano.config.floatX),
name="b'",
borrow=True)
self.reverse_weights = self.weights.T # Tied weights, so the reverse weight
# matrix is just the transpose.
self.label_weights = theano.shared(
value=numpy.zeros((self.hidden_dimension, self.output_dimension),
dtype=theano.config.floatX),
name="lW",
borrow=True)
self.label_bias = theano.shared(
value=numpy.zeros((self.output_dimension,),
dtype=theano.config.floatX),
name="lb",
borrow=True)
def initialise_rng(self):
"""
Initialises and subsequently stores a NumPy RandomState.
"""
self.rng = numpy.random.RandomState()
def initialise_symbolic_input(self):
"""
Initialises and subsequently stores a symbolic input value.
"""
self.symbolic_input = theano.tensor.dmatrix("x")
def initialise_symbolic_output(self):
"""
Initialises and subsequently stores a symbolic output value.
"""
self.symbolic_output = theano.tensor.ivector("y")
def get_hidden_output(self):
"""
Get the values output by the hidden layer.
"""
return self.activation(
theano.tensor.dot(self.symbolic_corrupted_input, self.weights) +
self.bias)
def get_reconstructed_input(self):
"""
Get the reconstructed input.
"""
return self.activation(
theano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +
self.reverse_bias)
def error_rate(self):
"""
Get the rate of incorrect prediction.
"""
return theano.tensor.mean(theano.tensor.neq(
self.get_symbolic_predicted_labels(),
self.symbolic_output))
def get_cost(self):
"""
Get the symbolic cost for the weight matrix and bias vectors.
"""
x = self.symbolic_input
y = self.get_reconstructed_input()
negative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +
(1-x)*theano.tensor.log(1-y), axis=1)
mean_loss = theano.tensor.mean(negative_log_loss)
return mean_loss
def get_lr_cost(self):
"""
Get the symbolic cost for the logistic regression matrix and bias vector.
"""
labels = self.get_symbolic_expected_rewards()
return -theano.tensor.mean(
theano.tensor.log(labels)[
theano.tensor.arange(self.symbolic_output.shape[0]),
self.symbolic_output])
def get_symbolic_predicted_labels(self):
"""
Predict labels of a minibatch.
"""
return theano.tensor.argmax(self.get_symbolic_expected_rewards(), axis=1)
def get_symbolic_expected_rewards(self):
"""
Get probabilities of the input values being each label.
"""
prob_matrix = theano.tensor.nnet.softmax(
theano.tensor.dot(self.get_hidden_output(),
self.label_weights) + self.label_bias)
return prob_matrix
def get_updates(self):
"""
Get a list of updates to make when the model is trained.
"""
da_cost = self.get_cost()
weight_gradient = theano.tensor.grad(da_cost, self.weights)
bias_gradient = theano.tensor.grad(da_cost, self.bias)
reverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)
lr_cost = self.get_lr_cost()
lr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)
lr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)
updates = [
(self.weights, self.weights - self.learning_rate*weight_gradient),
(self.bias, self.bias - self.learning_rate*bias_gradient),
(self.reverse_bias, self.reverse_bias -
self.learning_rate*reverse_bias_gradient),
(self.label_weights, self.label_weights -
self.learning_rate*lr_weight_gradient),
(self.label_bias, self.label_bias -
self.learning_rate*lr_bias_gradient)]
return updates
def initialise_theano_functions(self):
"""
Compile Theano functions for symbolic variables.
"""
index = theano.tensor.lscalar("i")
batch_size = theano.tensor.lscalar("b")
validation_images = theano.tensor.matrix("vx")
validation_labels = theano.tensor.ivector("vy")
input_matrix = theano.tensor.matrix("ix")
if (self.input_batch is not None and
self.output_batch is not None):
self.train_model_once = theano.function([index, batch_size],
outputs=self.get_cost(),
updates=self.get_updates(),
givens={
self.symbolic_input: self.input_batch[index*batch_size:
(index+1)*batch_size],
self.symbolic_output: self.output_batch[index*batch_size:
(index+1)*batch_size]})
self.validate_model = theano.function(inputs=[validation_images, validation_labels],
outputs=self.error_rate(),
givens={
self.symbolic_input: validation_images,
self.symbolic_output: validation_labels},
allow_input_downcast=True)
self.get_expected_rewards = theano.function([input_matrix],
outputs=self.get_symbolic_expected_rewards(),
givens={self.symbolic_input: input_matrix})
|
return self.weights.get_value(borrow=True)
def train_model(self
, epochs=100
, minibatch_size=20
, yield_every_iteration=False):
"""
Train the model against the given data.
epochs: How long to train for.
minibatch_size: How large each minibatch is.
yield_every_iteration: When to yield.
"""
if self.input_batch is None:
raise ValueError("Denoising autoencoder must be initialised with "
"input data to train model independently.")
if self.output_batch is None:
raise ValueError("RMI denoising autoencoder must be initialised "
"with output data to train model independently.")
batch_count = self.input_batch.get_value(
borrow=True).shape[0]//minibatch_size
for epoch in xrange(epochs):
costs = []
for index in xrange(batch_count):
cost = self.train_model_once(index, minibatch_size)
costs.append(cost)
if yield_every_iteration:
yield (index, cost)
if not yield_every_iteration:
yield (epoch, numpy.mean(costs))
def test_DA(DA, epochs=15):
from sys import argv
import lib.mnist as mnist
print "loading training images"
images = mnist.load_training_images(format="theano", validation=False, div=256.0)
labels = mnist.load_training_labels(format="theano", validation=False)
print "loading test images"
validation_images = mnist.load_training_images(format="numpy", validation=True)
validation_labels = mnist.load_training_labels(format="numpy", validation=True)
print "instantiating denoising autoencoder"
corruption = 0.3
learning_rate = 0.1
hiddens = 500
da = DA(784, hiddens,
input_batch=images,
output_batch=labels,
output_dimension=10,
corruption=corruption,
learning_rate=learning_rate)
print "training..."
for epoch, cost in da.train_model(epochs):
print epoch, cost
print "wrong {:.02%} of the time".format(
float(da.validate_model(validation_images, validation_labels)))
print "done."
import PIL
import lib.dlt_utils as utils
import random
image = PIL.Image.fromarray(utils.tile_raster_images(
X=da.weights.get_value(borrow=True).T,
img_shape=(28, 28), tile_shape=(50, 10),
tile_spacing=(1, 1)))
image.save('../plots/{:010x}_{}_{}_{}_{}_{}.png'.format(
random.randrange(16**10), argv[0].replace("/", "-"), corruption, learning_rate, epochs, hiddens))
if __name__ == '__main__':
test_DA(Denoising_Autoencoder, 10) | def get_weight_matrix(self):
"""
Get the weight matrix.
""" | random_line_split |
denoising_autoencoder.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Learns features of inputs.
Principal Author: Matthew Alger
"""
from __future__ import division
import time
import numpy
import theano
from theano.tensor.shared_randomstreams import RandomStreams
class Denoising_Autoencoder(object):
"""
It like learns how to take noisy versions of the input to unnoisy versions
of the input like back to the original versions of the input. -- Buck
"""
def __init__(self
, input_dimension
, hidden_dimension
, output_dimension
, input_batch=None
, output_batch=None
, symbolic_input=None
, rng=None
, theano_rng=None
, learning_rate=0.1
, corruption=0.3):
"""
input_dimension: The dimension of the input vectors.
hidden_dimension: How many hidden nodes to map to.
input_batch: Optional. Input data.
output_batch: Optional. A vector of labels corresponding to each input vector.
output_dimension: How many labels there are.
symbolic_input: Optional. A symbolic input value.
rng: Optional. A NumPy RandomState.
theano_rng: Optional. A Theano RandomStream.
learning_rate: Optional. How large gradient descent jumps are.
corruption: Optional. How much to corrupt the input when learning.
"""
self.input_dimension = input_dimension
self.hidden_dimension = hidden_dimension
self.output_batch = output_batch
self.output_dimension = output_dimension
if symbolic_input is None:
self.initialise_symbolic_input()
else:
self.symbolic_input = symbolic_input
self.initialise_symbolic_output()
if rng is None:
self.initialise_rng()
else:
self.rng = rng
if theano_rng is None:
self.initialise_theano_rng()
else:
self.theano_rng = theano_rng
self.corruption = corruption
self.input_batch = input_batch
self.activation = theano.tensor.nnet.sigmoid
self.learning_rate = learning_rate
self.initialise_corrupted_input()
self.initialise_parameters()
self.initialise_theano_functions()
def initialise_corrupted_input(self):
self.symbolic_corrupted_input = self.theano_rng.binomial(
size=self.symbolic_input.shape,
n=1,
p=1 - self.corruption,
dtype=theano.config.floatX) * self.symbolic_input
def initialise_theano_rng(self):
"""
Initialise and store a Theano RandomStream.
"""
self.theano_rng = RandomStreams(self.rng.randint(2**30))
def initialise_parameters(self):
"""
Initialises and subsequently stores a weight matrix, bias vector,
reverse bias vector, label weight matrix, and label bias vector.
"""
low = -numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
high = numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
if self.activation is theano.tensor.nnet.sigmoid:
# We know the optimum distribution for tanh and sigmoid, so we
# assume that we're using tanh unless we're using sigmoid.
low *= 4
high *= 4
self.weights = theano.shared(
value=numpy.asarray(
self.rng.uniform( # This distribution is apparently optimal for tanh.
low=low,
high=high,
size=(self.input_dimension, self.hidden_dimension)),
dtype=theano.config.floatX),
name="W",
borrow=True)
self.bias = theano.shared(
value=numpy.zeros((self.hidden_dimension,),
dtype=theano.config.floatX),
name="b",
borrow=True)
self.reverse_bias = theano.shared(
value=numpy.zeros((self.input_dimension,),
dtype=theano.config.floatX),
name="b'",
borrow=True)
self.reverse_weights = self.weights.T # Tied weights, so the reverse weight
# matrix is just the transpose.
self.label_weights = theano.shared(
value=numpy.zeros((self.hidden_dimension, self.output_dimension),
dtype=theano.config.floatX),
name="lW",
borrow=True)
self.label_bias = theano.shared(
value=numpy.zeros((self.output_dimension,),
dtype=theano.config.floatX),
name="lb",
borrow=True)
def initialise_rng(self):
"""
Initialises and subsequently stores a NumPy RandomState.
"""
self.rng = numpy.random.RandomState()
def initialise_symbolic_input(self):
"""
Initialises and subsequently stores a symbolic input value.
"""
self.symbolic_input = theano.tensor.dmatrix("x")
def initialise_symbolic_output(self):
"""
Initialises and subsequently stores a symbolic output value.
"""
self.symbolic_output = theano.tensor.ivector("y")
def get_hidden_output(self):
"""
Get the values output by the hidden layer.
"""
return self.activation(
theano.tensor.dot(self.symbolic_corrupted_input, self.weights) +
self.bias)
def get_reconstructed_input(self):
"""
Get the reconstructed input.
"""
return self.activation(
theano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +
self.reverse_bias)
def error_rate(self):
"""
Get the rate of incorrect prediction.
"""
return theano.tensor.mean(theano.tensor.neq(
self.get_symbolic_predicted_labels(),
self.symbolic_output))
def get_cost(self):
"""
Get the symbolic cost for the weight matrix and bias vectors.
"""
x = self.symbolic_input
y = self.get_reconstructed_input()
negative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +
(1-x)*theano.tensor.log(1-y), axis=1)
mean_loss = theano.tensor.mean(negative_log_loss)
return mean_loss
def get_lr_cost(self):
"""
Get the symbolic cost for the logistic regression matrix and bias vector.
"""
labels = self.get_symbolic_expected_rewards()
return -theano.tensor.mean(
theano.tensor.log(labels)[
theano.tensor.arange(self.symbolic_output.shape[0]),
self.symbolic_output])
def get_symbolic_predicted_labels(self):
"""
Predict labels of a minibatch.
"""
return theano.tensor.argmax(self.get_symbolic_expected_rewards(), axis=1)
def get_symbolic_expected_rewards(self):
"""
Get probabilities of the input values being each label.
"""
prob_matrix = theano.tensor.nnet.softmax(
theano.tensor.dot(self.get_hidden_output(),
self.label_weights) + self.label_bias)
return prob_matrix
def get_updates(self):
"""
Get a list of updates to make when the model is trained.
"""
da_cost = self.get_cost()
weight_gradient = theano.tensor.grad(da_cost, self.weights)
bias_gradient = theano.tensor.grad(da_cost, self.bias)
reverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)
lr_cost = self.get_lr_cost()
lr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)
lr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)
updates = [
(self.weights, self.weights - self.learning_rate*weight_gradient),
(self.bias, self.bias - self.learning_rate*bias_gradient),
(self.reverse_bias, self.reverse_bias -
self.learning_rate*reverse_bias_gradient),
(self.label_weights, self.label_weights -
self.learning_rate*lr_weight_gradient),
(self.label_bias, self.label_bias -
self.learning_rate*lr_bias_gradient)]
return updates
def initialise_theano_functions(self):
"""
Compile Theano functions for symbolic variables.
"""
index = theano.tensor.lscalar("i")
batch_size = theano.tensor.lscalar("b")
validation_images = theano.tensor.matrix("vx")
validation_labels = theano.tensor.ivector("vy")
input_matrix = theano.tensor.matrix("ix")
if (self.input_batch is not None and
self.output_batch is not None):
self.train_model_once = theano.function([index, batch_size],
outputs=self.get_cost(),
updates=self.get_updates(),
givens={
self.symbolic_input: self.input_batch[index*batch_size:
(index+1)*batch_size],
self.symbolic_output: self.output_batch[index*batch_size:
(index+1)*batch_size]})
self.validate_model = theano.function(inputs=[validation_images, validation_labels],
outputs=self.error_rate(),
givens={
self.symbolic_input: validation_images,
self.symbolic_output: validation_labels},
allow_input_downcast=True)
self.get_expected_rewards = theano.function([input_matrix],
outputs=self.get_symbolic_expected_rewards(),
givens={self.symbolic_input: input_matrix})
def get_weight_matrix(self):
"""
Get the weight matrix.
"""
return self.weights.get_value(borrow=True)
def | (self
, epochs=100
, minibatch_size=20
, yield_every_iteration=False):
"""
Train the model against the given data.
epochs: How long to train for.
minibatch_size: How large each minibatch is.
yield_every_iteration: When to yield.
"""
if self.input_batch is None:
raise ValueError("Denoising autoencoder must be initialised with "
"input data to train model independently.")
if self.output_batch is None:
raise ValueError("RMI denoising autoencoder must be initialised "
"with output data to train model independently.")
batch_count = self.input_batch.get_value(
borrow=True).shape[0]//minibatch_size
for epoch in xrange(epochs):
costs = []
for index in xrange(batch_count):
cost = self.train_model_once(index, minibatch_size)
costs.append(cost)
if yield_every_iteration:
yield (index, cost)
if not yield_every_iteration:
yield (epoch, numpy.mean(costs))
def test_DA(DA, epochs=15):
from sys import argv
import lib.mnist as mnist
print "loading training images"
images = mnist.load_training_images(format="theano", validation=False, div=256.0)
labels = mnist.load_training_labels(format="theano", validation=False)
print "loading test images"
validation_images = mnist.load_training_images(format="numpy", validation=True)
validation_labels = mnist.load_training_labels(format="numpy", validation=True)
print "instantiating denoising autoencoder"
corruption = 0.3
learning_rate = 0.1
hiddens = 500
da = DA(784, hiddens,
input_batch=images,
output_batch=labels,
output_dimension=10,
corruption=corruption,
learning_rate=learning_rate)
print "training..."
for epoch, cost in da.train_model(epochs):
print epoch, cost
print "wrong {:.02%} of the time".format(
float(da.validate_model(validation_images, validation_labels)))
print "done."
import PIL
import lib.dlt_utils as utils
import random
image = PIL.Image.fromarray(utils.tile_raster_images(
X=da.weights.get_value(borrow=True).T,
img_shape=(28, 28), tile_shape=(50, 10),
tile_spacing=(1, 1)))
image.save('../plots/{:010x}_{}_{}_{}_{}_{}.png'.format(
random.randrange(16**10), argv[0].replace("/", "-"), corruption, learning_rate, epochs, hiddens))
if __name__ == '__main__':
test_DA(Denoising_Autoencoder, 10)
| train_model | identifier_name |
denoising_autoencoder.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Learns features of inputs.
Principal Author: Matthew Alger
"""
from __future__ import division
import time
import numpy
import theano
from theano.tensor.shared_randomstreams import RandomStreams
class Denoising_Autoencoder(object):
"""
It like learns how to take noisy versions of the input to unnoisy versions
of the input like back to the original versions of the input. -- Buck
"""
def __init__(self
, input_dimension
, hidden_dimension
, output_dimension
, input_batch=None
, output_batch=None
, symbolic_input=None
, rng=None
, theano_rng=None
, learning_rate=0.1
, corruption=0.3):
"""
input_dimension: The dimension of the input vectors.
hidden_dimension: How many hidden nodes to map to.
input_batch: Optional. Input data.
output_batch: Optional. A vector of labels corresponding to each input vector.
output_dimension: How many labels there are.
symbolic_input: Optional. A symbolic input value.
rng: Optional. A NumPy RandomState.
theano_rng: Optional. A Theano RandomStream.
learning_rate: Optional. How large gradient descent jumps are.
corruption: Optional. How much to corrupt the input when learning.
"""
self.input_dimension = input_dimension
self.hidden_dimension = hidden_dimension
self.output_batch = output_batch
self.output_dimension = output_dimension
if symbolic_input is None:
self.initialise_symbolic_input()
else:
self.symbolic_input = symbolic_input
self.initialise_symbolic_output()
if rng is None:
self.initialise_rng()
else:
self.rng = rng
if theano_rng is None:
self.initialise_theano_rng()
else:
self.theano_rng = theano_rng
self.corruption = corruption
self.input_batch = input_batch
self.activation = theano.tensor.nnet.sigmoid
self.learning_rate = learning_rate
self.initialise_corrupted_input()
self.initialise_parameters()
self.initialise_theano_functions()
def initialise_corrupted_input(self):
self.symbolic_corrupted_input = self.theano_rng.binomial(
size=self.symbolic_input.shape,
n=1,
p=1 - self.corruption,
dtype=theano.config.floatX) * self.symbolic_input
def initialise_theano_rng(self):
"""
Initialise and store a Theano RandomStream.
"""
self.theano_rng = RandomStreams(self.rng.randint(2**30))
def initialise_parameters(self):
"""
Initialises and subsequently stores a weight matrix, bias vector,
reverse bias vector, label weight matrix, and label bias vector.
"""
low = -numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
high = numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
if self.activation is theano.tensor.nnet.sigmoid:
# We know the optimum distribution for tanh and sigmoid, so we
# assume that we're using tanh unless we're using sigmoid.
low *= 4
high *= 4
self.weights = theano.shared(
value=numpy.asarray(
self.rng.uniform( # This distribution is apparently optimal for tanh.
low=low,
high=high,
size=(self.input_dimension, self.hidden_dimension)),
dtype=theano.config.floatX),
name="W",
borrow=True)
self.bias = theano.shared(
value=numpy.zeros((self.hidden_dimension,),
dtype=theano.config.floatX),
name="b",
borrow=True)
self.reverse_bias = theano.shared(
value=numpy.zeros((self.input_dimension,),
dtype=theano.config.floatX),
name="b'",
borrow=True)
self.reverse_weights = self.weights.T # Tied weights, so the reverse weight
# matrix is just the transpose.
self.label_weights = theano.shared(
value=numpy.zeros((self.hidden_dimension, self.output_dimension),
dtype=theano.config.floatX),
name="lW",
borrow=True)
self.label_bias = theano.shared(
value=numpy.zeros((self.output_dimension,),
dtype=theano.config.floatX),
name="lb",
borrow=True)
def initialise_rng(self):
"""
Initialises and subsequently stores a NumPy RandomState.
"""
self.rng = numpy.random.RandomState()
def initialise_symbolic_input(self):
"""
Initialises and subsequently stores a symbolic input value.
"""
self.symbolic_input = theano.tensor.dmatrix("x")
def initialise_symbolic_output(self):
"""
Initialises and subsequently stores a symbolic output value.
"""
self.symbolic_output = theano.tensor.ivector("y")
def get_hidden_output(self):
"""
Get the values output by the hidden layer.
"""
return self.activation(
theano.tensor.dot(self.symbolic_corrupted_input, self.weights) +
self.bias)
def get_reconstructed_input(self):
"""
Get the reconstructed input.
"""
return self.activation(
theano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +
self.reverse_bias)
def error_rate(self):
"""
Get the rate of incorrect prediction.
"""
return theano.tensor.mean(theano.tensor.neq(
self.get_symbolic_predicted_labels(),
self.symbolic_output))
def get_cost(self):
"""
Get the symbolic cost for the weight matrix and bias vectors.
"""
x = self.symbolic_input
y = self.get_reconstructed_input()
negative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +
(1-x)*theano.tensor.log(1-y), axis=1)
mean_loss = theano.tensor.mean(negative_log_loss)
return mean_loss
def get_lr_cost(self):
"""
Get the symbolic cost for the logistic regression matrix and bias vector.
"""
labels = self.get_symbolic_expected_rewards()
return -theano.tensor.mean(
theano.tensor.log(labels)[
theano.tensor.arange(self.symbolic_output.shape[0]),
self.symbolic_output])
def get_symbolic_predicted_labels(self):
"""
Predict labels of a minibatch.
"""
return theano.tensor.argmax(self.get_symbolic_expected_rewards(), axis=1)
def get_symbolic_expected_rewards(self):
"""
Get probabilities of the input values being each label.
"""
prob_matrix = theano.tensor.nnet.softmax(
theano.tensor.dot(self.get_hidden_output(),
self.label_weights) + self.label_bias)
return prob_matrix
def get_updates(self):
|
def initialise_theano_functions(self):
"""
Compile Theano functions for symbolic variables.
"""
index = theano.tensor.lscalar("i")
batch_size = theano.tensor.lscalar("b")
validation_images = theano.tensor.matrix("vx")
validation_labels = theano.tensor.ivector("vy")
input_matrix = theano.tensor.matrix("ix")
if (self.input_batch is not None and
self.output_batch is not None):
self.train_model_once = theano.function([index, batch_size],
outputs=self.get_cost(),
updates=self.get_updates(),
givens={
self.symbolic_input: self.input_batch[index*batch_size:
(index+1)*batch_size],
self.symbolic_output: self.output_batch[index*batch_size:
(index+1)*batch_size]})
self.validate_model = theano.function(inputs=[validation_images, validation_labels],
outputs=self.error_rate(),
givens={
self.symbolic_input: validation_images,
self.symbolic_output: validation_labels},
allow_input_downcast=True)
self.get_expected_rewards = theano.function([input_matrix],
outputs=self.get_symbolic_expected_rewards(),
givens={self.symbolic_input: input_matrix})
def get_weight_matrix(self):
"""
Get the weight matrix.
"""
return self.weights.get_value(borrow=True)
def train_model(self
, epochs=100
, minibatch_size=20
, yield_every_iteration=False):
"""
Train the model against the given data.
epochs: How long to train for.
minibatch_size: How large each minibatch is.
yield_every_iteration: When to yield.
"""
if self.input_batch is None:
raise ValueError("Denoising autoencoder must be initialised with "
"input data to train model independently.")
if self.output_batch is None:
raise ValueError("RMI denoising autoencoder must be initialised "
"with output data to train model independently.")
batch_count = self.input_batch.get_value(
borrow=True).shape[0]//minibatch_size
for epoch in xrange(epochs):
costs = []
for index in xrange(batch_count):
cost = self.train_model_once(index, minibatch_size)
costs.append(cost)
if yield_every_iteration:
yield (index, cost)
if not yield_every_iteration:
yield (epoch, numpy.mean(costs))
def test_DA(DA, epochs=15):
from sys import argv
import lib.mnist as mnist
print "loading training images"
images = mnist.load_training_images(format="theano", validation=False, div=256.0)
labels = mnist.load_training_labels(format="theano", validation=False)
print "loading test images"
validation_images = mnist.load_training_images(format="numpy", validation=True)
validation_labels = mnist.load_training_labels(format="numpy", validation=True)
print "instantiating denoising autoencoder"
corruption = 0.3
learning_rate = 0.1
hiddens = 500
da = DA(784, hiddens,
input_batch=images,
output_batch=labels,
output_dimension=10,
corruption=corruption,
learning_rate=learning_rate)
print "training..."
for epoch, cost in da.train_model(epochs):
print epoch, cost
print "wrong {:.02%} of the time".format(
float(da.validate_model(validation_images, validation_labels)))
print "done."
import PIL
import lib.dlt_utils as utils
import random
image = PIL.Image.fromarray(utils.tile_raster_images(
X=da.weights.get_value(borrow=True).T,
img_shape=(28, 28), tile_shape=(50, 10),
tile_spacing=(1, 1)))
image.save('../plots/{:010x}_{}_{}_{}_{}_{}.png'.format(
random.randrange(16**10), argv[0].replace("/", "-"), corruption, learning_rate, epochs, hiddens))
if __name__ == '__main__':
test_DA(Denoising_Autoencoder, 10)
| """
Get a list of updates to make when the model is trained.
"""
da_cost = self.get_cost()
weight_gradient = theano.tensor.grad(da_cost, self.weights)
bias_gradient = theano.tensor.grad(da_cost, self.bias)
reverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)
lr_cost = self.get_lr_cost()
lr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)
lr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)
updates = [
(self.weights, self.weights - self.learning_rate*weight_gradient),
(self.bias, self.bias - self.learning_rate*bias_gradient),
(self.reverse_bias, self.reverse_bias -
self.learning_rate*reverse_bias_gradient),
(self.label_weights, self.label_weights -
self.learning_rate*lr_weight_gradient),
(self.label_bias, self.label_bias -
self.learning_rate*lr_bias_gradient)]
return updates | identifier_body |
denoising_autoencoder.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Learns features of inputs.
Principal Author: Matthew Alger
"""
from __future__ import division
import time
import numpy
import theano
from theano.tensor.shared_randomstreams import RandomStreams
class Denoising_Autoencoder(object):
"""
It like learns how to take noisy versions of the input to unnoisy versions
of the input like back to the original versions of the input. -- Buck
"""
def __init__(self
, input_dimension
, hidden_dimension
, output_dimension
, input_batch=None
, output_batch=None
, symbolic_input=None
, rng=None
, theano_rng=None
, learning_rate=0.1
, corruption=0.3):
"""
input_dimension: The dimension of the input vectors.
hidden_dimension: How many hidden nodes to map to.
input_batch: Optional. Input data.
output_batch: Optional. A vector of labels corresponding to each input vector.
output_dimension: How many labels there are.
symbolic_input: Optional. A symbolic input value.
rng: Optional. A NumPy RandomState.
theano_rng: Optional. A Theano RandomStream.
learning_rate: Optional. How large gradient descent jumps are.
corruption: Optional. How much to corrupt the input when learning.
"""
self.input_dimension = input_dimension
self.hidden_dimension = hidden_dimension
self.output_batch = output_batch
self.output_dimension = output_dimension
if symbolic_input is None:
self.initialise_symbolic_input()
else:
self.symbolic_input = symbolic_input
self.initialise_symbolic_output()
if rng is None:
self.initialise_rng()
else:
self.rng = rng
if theano_rng is None:
self.initialise_theano_rng()
else:
self.theano_rng = theano_rng
self.corruption = corruption
self.input_batch = input_batch
self.activation = theano.tensor.nnet.sigmoid
self.learning_rate = learning_rate
self.initialise_corrupted_input()
self.initialise_parameters()
self.initialise_theano_functions()
def initialise_corrupted_input(self):
self.symbolic_corrupted_input = self.theano_rng.binomial(
size=self.symbolic_input.shape,
n=1,
p=1 - self.corruption,
dtype=theano.config.floatX) * self.symbolic_input
def initialise_theano_rng(self):
"""
Initialise and store a Theano RandomStream.
"""
self.theano_rng = RandomStreams(self.rng.randint(2**30))
def initialise_parameters(self):
"""
Initialises and subsequently stores a weight matrix, bias vector,
reverse bias vector, label weight matrix, and label bias vector.
"""
low = -numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
high = numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
if self.activation is theano.tensor.nnet.sigmoid:
# We know the optimum distribution for tanh and sigmoid, so we
# assume that we're using tanh unless we're using sigmoid.
low *= 4
high *= 4
self.weights = theano.shared(
value=numpy.asarray(
self.rng.uniform( # This distribution is apparently optimal for tanh.
low=low,
high=high,
size=(self.input_dimension, self.hidden_dimension)),
dtype=theano.config.floatX),
name="W",
borrow=True)
self.bias = theano.shared(
value=numpy.zeros((self.hidden_dimension,),
dtype=theano.config.floatX),
name="b",
borrow=True)
self.reverse_bias = theano.shared(
value=numpy.zeros((self.input_dimension,),
dtype=theano.config.floatX),
name="b'",
borrow=True)
self.reverse_weights = self.weights.T # Tied weights, so the reverse weight
# matrix is just the transpose.
self.label_weights = theano.shared(
value=numpy.zeros((self.hidden_dimension, self.output_dimension),
dtype=theano.config.floatX),
name="lW",
borrow=True)
self.label_bias = theano.shared(
value=numpy.zeros((self.output_dimension,),
dtype=theano.config.floatX),
name="lb",
borrow=True)
def initialise_rng(self):
"""
Initialises and subsequently stores a NumPy RandomState.
"""
self.rng = numpy.random.RandomState()
def initialise_symbolic_input(self):
"""
Initialises and subsequently stores a symbolic input value.
"""
self.symbolic_input = theano.tensor.dmatrix("x")
def initialise_symbolic_output(self):
"""
Initialises and subsequently stores a symbolic output value.
"""
self.symbolic_output = theano.tensor.ivector("y")
def get_hidden_output(self):
"""
Get the values output by the hidden layer.
"""
return self.activation(
theano.tensor.dot(self.symbolic_corrupted_input, self.weights) +
self.bias)
def get_reconstructed_input(self):
"""
Get the reconstructed input.
"""
return self.activation(
theano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +
self.reverse_bias)
def error_rate(self):
"""
Get the rate of incorrect prediction.
"""
return theano.tensor.mean(theano.tensor.neq(
self.get_symbolic_predicted_labels(),
self.symbolic_output))
def get_cost(self):
"""
Get the symbolic cost for the weight matrix and bias vectors.
"""
x = self.symbolic_input
y = self.get_reconstructed_input()
negative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +
(1-x)*theano.tensor.log(1-y), axis=1)
mean_loss = theano.tensor.mean(negative_log_loss)
return mean_loss
def get_lr_cost(self):
"""
Get the symbolic cost for the logistic regression matrix and bias vector.
"""
labels = self.get_symbolic_expected_rewards()
return -theano.tensor.mean(
theano.tensor.log(labels)[
theano.tensor.arange(self.symbolic_output.shape[0]),
self.symbolic_output])
def get_symbolic_predicted_labels(self):
"""
Predict labels of a minibatch.
"""
return theano.tensor.argmax(self.get_symbolic_expected_rewards(), axis=1)
def get_symbolic_expected_rewards(self):
"""
Get probabilities of the input values being each label.
"""
prob_matrix = theano.tensor.nnet.softmax(
theano.tensor.dot(self.get_hidden_output(),
self.label_weights) + self.label_bias)
return prob_matrix
def get_updates(self):
"""
Get a list of updates to make when the model is trained.
"""
da_cost = self.get_cost()
weight_gradient = theano.tensor.grad(da_cost, self.weights)
bias_gradient = theano.tensor.grad(da_cost, self.bias)
reverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)
lr_cost = self.get_lr_cost()
lr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)
lr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)
updates = [
(self.weights, self.weights - self.learning_rate*weight_gradient),
(self.bias, self.bias - self.learning_rate*bias_gradient),
(self.reverse_bias, self.reverse_bias -
self.learning_rate*reverse_bias_gradient),
(self.label_weights, self.label_weights -
self.learning_rate*lr_weight_gradient),
(self.label_bias, self.label_bias -
self.learning_rate*lr_bias_gradient)]
return updates
def initialise_theano_functions(self):
"""
Compile Theano functions for symbolic variables.
"""
index = theano.tensor.lscalar("i")
batch_size = theano.tensor.lscalar("b")
validation_images = theano.tensor.matrix("vx")
validation_labels = theano.tensor.ivector("vy")
input_matrix = theano.tensor.matrix("ix")
if (self.input_batch is not None and
self.output_batch is not None):
self.train_model_once = theano.function([index, batch_size],
outputs=self.get_cost(),
updates=self.get_updates(),
givens={
self.symbolic_input: self.input_batch[index*batch_size:
(index+1)*batch_size],
self.symbolic_output: self.output_batch[index*batch_size:
(index+1)*batch_size]})
self.validate_model = theano.function(inputs=[validation_images, validation_labels],
outputs=self.error_rate(),
givens={
self.symbolic_input: validation_images,
self.symbolic_output: validation_labels},
allow_input_downcast=True)
self.get_expected_rewards = theano.function([input_matrix],
outputs=self.get_symbolic_expected_rewards(),
givens={self.symbolic_input: input_matrix})
def get_weight_matrix(self):
"""
Get the weight matrix.
"""
return self.weights.get_value(borrow=True)
def train_model(self
, epochs=100
, minibatch_size=20
, yield_every_iteration=False):
"""
Train the model against the given data.
epochs: How long to train for.
minibatch_size: How large each minibatch is.
yield_every_iteration: When to yield.
"""
if self.input_batch is None:
raise ValueError("Denoising autoencoder must be initialised with "
"input data to train model independently.")
if self.output_batch is None:
raise ValueError("RMI denoising autoencoder must be initialised "
"with output data to train model independently.")
batch_count = self.input_batch.get_value(
borrow=True).shape[0]//minibatch_size
for epoch in xrange(epochs):
costs = []
for index in xrange(batch_count):
cost = self.train_model_once(index, minibatch_size)
costs.append(cost)
if yield_every_iteration:
yield (index, cost)
if not yield_every_iteration:
yield (epoch, numpy.mean(costs))
def test_DA(DA, epochs=15):
from sys import argv
import lib.mnist as mnist
print "loading training images"
images = mnist.load_training_images(format="theano", validation=False, div=256.0)
labels = mnist.load_training_labels(format="theano", validation=False)
print "loading test images"
validation_images = mnist.load_training_images(format="numpy", validation=True)
validation_labels = mnist.load_training_labels(format="numpy", validation=True)
print "instantiating denoising autoencoder"
corruption = 0.3
learning_rate = 0.1
hiddens = 500
da = DA(784, hiddens,
input_batch=images,
output_batch=labels,
output_dimension=10,
corruption=corruption,
learning_rate=learning_rate)
print "training..."
for epoch, cost in da.train_model(epochs):
|
print "done."
import PIL
import lib.dlt_utils as utils
import random
image = PIL.Image.fromarray(utils.tile_raster_images(
X=da.weights.get_value(borrow=True).T,
img_shape=(28, 28), tile_shape=(50, 10),
tile_spacing=(1, 1)))
image.save('../plots/{:010x}_{}_{}_{}_{}_{}.png'.format(
random.randrange(16**10), argv[0].replace("/", "-"), corruption, learning_rate, epochs, hiddens))
if __name__ == '__main__':
test_DA(Denoising_Autoencoder, 10)
| print epoch, cost
print "wrong {:.02%} of the time".format(
float(da.validate_model(validation_images, validation_labels))) | conditional_block |
webapp.py | # Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
from __future__ import print_function
import re
import six
import argparse
from six.moves.urllib import parse
from six.moves import BaseHTTPServer
import json
import ssl
import logging
import subprocess
from six.moves.socketserver import ThreadingMixIn
import sg_jira
DESCRIPTION = """
A simple web app frontend to the SG Jira bridge.
"""
CSS_TEMPLATE = """
<style>
body {
margin: 0;
background-color: #eee;
font-family: Arial, Helvetica, sans-serif;
}
h1 {
background-color: whitesmoke;
color: #00BAFF;
border-radius: 5px;
padding: 5 5 5 15px;
border-bottom: 1px solid #ddd;
}
.content { margin: 0 0 15px 15px; }
.error { margin: 0 0 15px 15px; }
.details { margin: 40px 0 15px 15px; }
h2 { margin-bottom: 10px; }
p { margin-top: 10px; }
</style>
"""
HMTL_TEMPLATE = """
<head>
<title>SG Jira Bridge: %s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="content">
<h2>%s</h2>
<p>%s</p>
</div>
</body>
</html>
""".format(
style=CSS_TEMPLATE
)
# We overriding the default html error template to render errors to the user.
# This template *requires* the following format tokens:
# - %(code)d - for the response code
# - %(explain)s - for the short explanation of the response code
# - %(message)s - for a detailed message about the error
HTML_ERROR_TEMPLATE = """
<head>
<title>SG Jira Bridge Error %(code)d: %(message)s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="error">
<h2>Error %(code)d</h2>
<p>%(explain)s</p>
</div>
<div class="details">
<p><strong>Details: </strong> <pre>%(message)s</pre></p>
</div>
</body>
""".format(
style=CSS_TEMPLATE
)
# Please note that we can't use __name__ here as it would be __main__
logger = logging.getLogger("webapp")
def get_sg_jira_bridge_version():
"""
Helper to extract a version number for the sg-jira-bridge module.
This will attenmpt to extract the version number from git if installed from
a cloned repo. If a version is unable to be determined, or the process
fails for any reason, we return "dev"
:returns: A major.minor.patch[.sub] version string or "dev".
"""
# Note: if you install from a cloned git repository
# (e.g. pip install ./tk-core), the version number
# will be picked up from the most recently added tag.
try:
version_git = subprocess.check_output(
["git", "describe", "--abbrev=0"]
).rstrip()
return version_git
except Exception:
# Blindly ignore problems. Git might be not available, or the user may
# have installed via a zip archive, etc...
pass
return "dev"
class SgJiraBridgeBadRequestError(Exception):
"""
Custom exception so we can differentiate between errors we raise that
should return 4xx error codes and errors in the application which should
return 500 error codes.
"""
pass
class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""
Basic server with threading functionality mixed in. This will help the server
keep up with a high volume of throughput from ShotGrid and Jira.
"""
def __init__(self, settings, *args, **kwargs):
# Note: BaseHTTPServer.HTTPServer is not a new style class so we can't use
# super here.
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
self._sg_jira = sg_jira.Bridge.get_bridge(settings)
def sync_in_jira(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_jira(*args, **kwargs)
def sync_in_shotgun(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_shotgun(*args, **kwargs)
def admin_reset(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Bridge method.
"""
return self._sg_jira.reset(*args, **kwargs)
@property
def sync_settings_names(self):
"""
Return the list of sync settings this server handles.
"""
return self._sg_jira.sync_settings_names
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# On Python3, in socketserver.StreamRequestHandler, if this is
# set it will use makefile() to produce the output stream. Otherwise,
# it will use socketserver._SocketWriter, and we won't be able to get
# to the data.
# taken from https://stackoverflow.com/a/53163148/4223964
wbufsize = 1
protocol_version = "HTTP/1.1"
# Inject the version of sg-jira-bridge into server_version for the headers.
server_version = "sg-jira-bridge/%s %s" % (
get_sg_jira_bridge_version(),
BaseHTTPServer.BaseHTTPRequestHandler.server_version,
)
# BaseHTTPServer Class variable that stores the HTML template for error
# pages. Override the default error page template with our own.
error_message_format = HTML_ERROR_TEMPLATE
def post_response(self, response_code, message, content=None):
"""
Convenience method for handling the response
Handles sending the response, setting headers, and writing any
content in the expected order. Sets appropriate headers including
content length which is required by HTTP/1.1.
:param int response_code: Standard HTTP response code sent in headers.
:param str message: Message to accompany response code in headers.
:param str content: Optional content to return as content in the
response. This is typically html displayed in a browser.
"""
# NOTE: All responses must:
# - send the response first.
# - then, if there is some data, call end_headers to add a blank line.
# - then write the data, if any, with self.wfile.write
self.send_response(response_code, message)
content_len = 0
if content:
content_len = len(content)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", content_len)
# TODO: Ideally we use the default functionality of HTTP/1.1 where
# keep-alive is True (no header needed). However, for some reason,
# this currently blocks new connections for 60 seconds (likely the
# default keep-alive timeout). So for now we explicitly close the
# connection with the header below to ensure things run smoothly.
# Once the issue has been resolved, we can remove this header.
self.send_header("Connection", "close")
self.end_headers()
if content:
self.wfile.write(content)
def do_GET(self):
"""
Handle a GET request.
"""
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in self.path[1:].split("/") if x]
if not path_parts:
self.post_response(
200,
"The server is alive",
HMTL_TEMPLATE % ("The server is alive", "The server is alive", ""),
)
return
# Return a correct error for browser favicon requests in order to
# reduce confusing log messages that look bad but aren't.
if len(path_parts) == 1 and path_parts[0] == "favicon.ico":
self.send_error(404)
return
if path_parts[0] == "sg2jira":
title = "Shotgun to Jira"
elif path_parts[0] == "jira2sg":
title = "Jira to Shotgun"
else:
self.send_error(400, "Invalid request path %s" % self.path)
return
settings_name = path_parts[1]
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
self.send_error(400, "Invalid settings name %s" % settings_name)
return
# Success, send a basic html page.
self.post_response(
200,
six.ensure_binary("Syncing with %s settings." % settings_name),
six.ensure_binary(
HMTL_TEMPLATE
% (title, title, "Syncing with %s settings." % settings_name)
),
)
def do_POST(self):
"""
Handle a POST request.
Post url paths need to have the form::
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
admin/reset
If the SG Entity is not specified in the path, it must be specified in
the provided payload. | # /sg2jira/default[/Task/123]
# /jira2sg/default/Issue/KEY-123
# /admin/reset
try:
parsed = parse.urlparse(self.path)
# Extract additional query parameters.
# What they could be is still TBD, may be things like `dry_run=1`?
parameters = {}
if parsed.query:
parameters = parse.parse_qs(parsed.query, True, True)
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in parsed.path[1:].split("/") if x]
if not path_parts:
self.send_error(400, "Invalid request path %s" % self.path)
# Treat the command
if path_parts[0] == "admin":
self._handle_admin_request(path_parts, parameters)
elif path_parts[0] in ["sg2jira", "jira2sg"]:
self._handle_sync_request(path_parts, parameters)
else:
self.send_error(
400,
"Invalid request path %s: unknown command %s"
% (self.path, path_parts[0]),
)
return
self.post_response(200, "POST request successful")
except SgJiraBridgeBadRequestError as e:
self.send_error(400, str(e))
except Exception as e:
self.send_error(500, str(e))
logger.debug(e, exc_info=True)
def _read_payload(self):
"""
Read the body of a request to get the payload.
:returns: payload as a dictionary or empty dict if there was no payload
"""
content_type = self.headers.get("content-type")
# Check the content type, if not set we assume json.
# We can have a charset just after the content type, e.g.
# application/json; charset=UTF-8.
if content_type and not re.search(r"\s*application/json\s*;?", content_type):
raise SgJiraBridgeBadRequestError(
"Invalid content-type %s, it must be 'application/json'" % content_type
)
content_len = int(self.headers.get("content-length", 0))
body = self.rfile.read(content_len)
payload = {}
if body:
payload = json.loads(body)
return payload
def _handle_sync_request(self, path_parts, parameters):
"""
Handle a request to sync between ShotGrid and Jira in either direction.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed. We expect the path to for this request to
be one of the following:
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
If the SG Entity is not specified in the path, it must be present in
the loaded payload.
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["sg2jira", "default", "Task", "123"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
entity_type = None
entity_key = None
if len(path_parts) == 4:
direction, settings_name, entity_type, entity_key = path_parts
elif len(path_parts) == 2:
direction, settings_name = path_parts
else:
raise SgJiraBridgeBadRequestError("Invalid request path %s" % self.path)
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
raise SgJiraBridgeBadRequestError(
"Invalid settings name %s" % settings_name
)
payload = self._read_payload()
if direction == "sg2jira":
# Ensure we get a valid entity_type and entity_id
if not entity_type or not entity_key:
# We need to retrieve this from the payload.
entity_type = payload.get("entity_type")
entity_key = payload.get("entity_id")
if not entity_type or not entity_key:
raise SgJiraBridgeBadRequestError(
"Invalid request payload %s, unable to retrieve a Shotgun Entity type and its id."
% payload
)
# We could have a str or int here depending on how it was sent.
try:
entity_key = int(entity_key)
except ValueError as e:
# log the original exception before we obfuscate it
logger.debug(e, exc_info=True)
raise SgJiraBridgeBadRequestError(
"Invalid Shotgun %s id %s, it must be a number."
% (entity_type, entity_key,)
)
self.server.sync_in_jira(
settings_name, entity_type, int(entity_key), event=payload, **parameters
)
elif direction == "jira2sg":
if not entity_type or not entity_key:
# We can't retrieve this easily from the webhook payload without
# hard coding a list of supported resource types, so we require
# it to be specified in the path for the time being.
raise SgJiraBridgeBadRequestError(
"Invalid request path %s, it must include a Jira resource "
"type and its key" % self.path
)
self.server.sync_in_shotgun(
settings_name, entity_type, entity_key, event=payload, **parameters
)
def _handle_admin_request(self, path_parts, parameters):
"""
Handle admin request to the server.
Currently handles a single action, ``reset`` which resets the Bridge
in order to clear out the ShotGrid schema cache.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed.
admin/reset
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["admin", "reset"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
# The only function we respond to now is reset
if len(path_parts) != 2 or path_parts[1] != "reset":
raise SgJiraBridgeBadRequestError(
"Invalid admin path '%s'. Action is not set or unsupported." % self.path
)
self.server.admin_reset(**parameters)
def log_message(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.info(message)
def log_error(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.error(message)
def create_server(port, settings, keyfile=None, certfile=None):
"""
Create the server.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in HTTPS mode.
:param str certfile: Optional path to a PEM certificate file to run in HTTPS mode.
:returns: The HTTP Server
:type: :class:`BaseHTTPServer.BaseHTTPRequestHandler`
"""
httpd = Server(settings, ("localhost", port), RequestHandler)
if keyfile and certfile:
# Activate HTTPS.
httpd.socket = ssl.wrap_socket(
httpd.socket, keyfile=keyfile, certfile=certfile, server_side=True
)
return httpd
def run_server(port, settings, keyfile=None, certfile=None):
"""
Run the server until a shutdown is requested.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in https mode.
:param str certfile: Optional path to a PEM certificate file to run in https mode.
"""
create_server(port, settings, keyfile, certfile).serve_forever()
def main():
"""
Retrieve command line arguments and start the server.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
"--port", type=int, default=9090, help="The port number to listen to.",
)
parser.add_argument("--settings", help="Full path to settings file.", required=True)
parser.add_argument(
"--ssl_context",
help="A key and certificate file pair to run the server in HTTPS mode.",
nargs=2,
)
args = parser.parse_args()
keyfile = None
certfile = None
if args.ssl_context:
keyfile, certfile = args.ssl_context
run_server(
port=args.port, settings=args.settings, keyfile=keyfile, certfile=certfile,
)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Shutting down...") | """ | random_line_split |
webapp.py | # Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
from __future__ import print_function
import re
import six
import argparse
from six.moves.urllib import parse
from six.moves import BaseHTTPServer
import json
import ssl
import logging
import subprocess
from six.moves.socketserver import ThreadingMixIn
import sg_jira
DESCRIPTION = """
A simple web app frontend to the SG Jira bridge.
"""
CSS_TEMPLATE = """
<style>
body {
margin: 0;
background-color: #eee;
font-family: Arial, Helvetica, sans-serif;
}
h1 {
background-color: whitesmoke;
color: #00BAFF;
border-radius: 5px;
padding: 5 5 5 15px;
border-bottom: 1px solid #ddd;
}
.content { margin: 0 0 15px 15px; }
.error { margin: 0 0 15px 15px; }
.details { margin: 40px 0 15px 15px; }
h2 { margin-bottom: 10px; }
p { margin-top: 10px; }
</style>
"""
HMTL_TEMPLATE = """
<head>
<title>SG Jira Bridge: %s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="content">
<h2>%s</h2>
<p>%s</p>
</div>
</body>
</html>
""".format(
style=CSS_TEMPLATE
)
# We overriding the default html error template to render errors to the user.
# This template *requires* the following format tokens:
# - %(code)d - for the response code
# - %(explain)s - for the short explanation of the response code
# - %(message)s - for a detailed message about the error
HTML_ERROR_TEMPLATE = """
<head>
<title>SG Jira Bridge Error %(code)d: %(message)s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="error">
<h2>Error %(code)d</h2>
<p>%(explain)s</p>
</div>
<div class="details">
<p><strong>Details: </strong> <pre>%(message)s</pre></p>
</div>
</body>
""".format(
style=CSS_TEMPLATE
)
# Please note that we can't use __name__ here as it would be __main__
logger = logging.getLogger("webapp")
def get_sg_jira_bridge_version():
|
class SgJiraBridgeBadRequestError(Exception):
"""
Custom exception so we can differentiate between errors we raise that
should return 4xx error codes and errors in the application which should
return 500 error codes.
"""
pass
class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""
Basic server with threading functionality mixed in. This will help the server
keep up with a high volume of throughput from ShotGrid and Jira.
"""
def __init__(self, settings, *args, **kwargs):
# Note: BaseHTTPServer.HTTPServer is not a new style class so we can't use
# super here.
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
self._sg_jira = sg_jira.Bridge.get_bridge(settings)
def sync_in_jira(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_jira(*args, **kwargs)
def sync_in_shotgun(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_shotgun(*args, **kwargs)
def admin_reset(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Bridge method.
"""
return self._sg_jira.reset(*args, **kwargs)
@property
def sync_settings_names(self):
"""
Return the list of sync settings this server handles.
"""
return self._sg_jira.sync_settings_names
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# On Python3, in socketserver.StreamRequestHandler, if this is
# set it will use makefile() to produce the output stream. Otherwise,
# it will use socketserver._SocketWriter, and we won't be able to get
# to the data.
# taken from https://stackoverflow.com/a/53163148/4223964
wbufsize = 1
protocol_version = "HTTP/1.1"
# Inject the version of sg-jira-bridge into server_version for the headers.
server_version = "sg-jira-bridge/%s %s" % (
get_sg_jira_bridge_version(),
BaseHTTPServer.BaseHTTPRequestHandler.server_version,
)
# BaseHTTPServer Class variable that stores the HTML template for error
# pages. Override the default error page template with our own.
error_message_format = HTML_ERROR_TEMPLATE
def post_response(self, response_code, message, content=None):
"""
Convenience method for handling the response
Handles sending the response, setting headers, and writing any
content in the expected order. Sets appropriate headers including
content length which is required by HTTP/1.1.
:param int response_code: Standard HTTP response code sent in headers.
:param str message: Message to accompany response code in headers.
:param str content: Optional content to return as content in the
response. This is typically html displayed in a browser.
"""
# NOTE: All responses must:
# - send the response first.
# - then, if there is some data, call end_headers to add a blank line.
# - then write the data, if any, with self.wfile.write
self.send_response(response_code, message)
content_len = 0
if content:
content_len = len(content)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", content_len)
# TODO: Ideally we use the default functionality of HTTP/1.1 where
# keep-alive is True (no header needed). However, for some reason,
# this currently blocks new connections for 60 seconds (likely the
# default keep-alive timeout). So for now we explicitly close the
# connection with the header below to ensure things run smoothly.
# Once the issue has been resolved, we can remove this header.
self.send_header("Connection", "close")
self.end_headers()
if content:
self.wfile.write(content)
def do_GET(self):
"""
Handle a GET request.
"""
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in self.path[1:].split("/") if x]
if not path_parts:
self.post_response(
200,
"The server is alive",
HMTL_TEMPLATE % ("The server is alive", "The server is alive", ""),
)
return
# Return a correct error for browser favicon requests in order to
# reduce confusing log messages that look bad but aren't.
if len(path_parts) == 1 and path_parts[0] == "favicon.ico":
self.send_error(404)
return
if path_parts[0] == "sg2jira":
title = "Shotgun to Jira"
elif path_parts[0] == "jira2sg":
title = "Jira to Shotgun"
else:
self.send_error(400, "Invalid request path %s" % self.path)
return
settings_name = path_parts[1]
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
self.send_error(400, "Invalid settings name %s" % settings_name)
return
# Success, send a basic html page.
self.post_response(
200,
six.ensure_binary("Syncing with %s settings." % settings_name),
six.ensure_binary(
HMTL_TEMPLATE
% (title, title, "Syncing with %s settings." % settings_name)
),
)
def do_POST(self):
"""
Handle a POST request.
Post url paths need to have the form::
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
admin/reset
If the SG Entity is not specified in the path, it must be specified in
the provided payload.
"""
# /sg2jira/default[/Task/123]
# /jira2sg/default/Issue/KEY-123
# /admin/reset
try:
parsed = parse.urlparse(self.path)
# Extract additional query parameters.
# What they could be is still TBD, may be things like `dry_run=1`?
parameters = {}
if parsed.query:
parameters = parse.parse_qs(parsed.query, True, True)
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in parsed.path[1:].split("/") if x]
if not path_parts:
self.send_error(400, "Invalid request path %s" % self.path)
# Treat the command
if path_parts[0] == "admin":
self._handle_admin_request(path_parts, parameters)
elif path_parts[0] in ["sg2jira", "jira2sg"]:
self._handle_sync_request(path_parts, parameters)
else:
self.send_error(
400,
"Invalid request path %s: unknown command %s"
% (self.path, path_parts[0]),
)
return
self.post_response(200, "POST request successful")
except SgJiraBridgeBadRequestError as e:
self.send_error(400, str(e))
except Exception as e:
self.send_error(500, str(e))
logger.debug(e, exc_info=True)
def _read_payload(self):
"""
Read the body of a request to get the payload.
:returns: payload as a dictionary or empty dict if there was no payload
"""
content_type = self.headers.get("content-type")
# Check the content type, if not set we assume json.
# We can have a charset just after the content type, e.g.
# application/json; charset=UTF-8.
if content_type and not re.search(r"\s*application/json\s*;?", content_type):
raise SgJiraBridgeBadRequestError(
"Invalid content-type %s, it must be 'application/json'" % content_type
)
content_len = int(self.headers.get("content-length", 0))
body = self.rfile.read(content_len)
payload = {}
if body:
payload = json.loads(body)
return payload
def _handle_sync_request(self, path_parts, parameters):
"""
Handle a request to sync between ShotGrid and Jira in either direction.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed. We expect the path to for this request to
be one of the following:
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
If the SG Entity is not specified in the path, it must be present in
the loaded payload.
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["sg2jira", "default", "Task", "123"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
entity_type = None
entity_key = None
if len(path_parts) == 4:
direction, settings_name, entity_type, entity_key = path_parts
elif len(path_parts) == 2:
direction, settings_name = path_parts
else:
raise SgJiraBridgeBadRequestError("Invalid request path %s" % self.path)
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
raise SgJiraBridgeBadRequestError(
"Invalid settings name %s" % settings_name
)
payload = self._read_payload()
if direction == "sg2jira":
# Ensure we get a valid entity_type and entity_id
if not entity_type or not entity_key:
# We need to retrieve this from the payload.
entity_type = payload.get("entity_type")
entity_key = payload.get("entity_id")
if not entity_type or not entity_key:
raise SgJiraBridgeBadRequestError(
"Invalid request payload %s, unable to retrieve a Shotgun Entity type and its id."
% payload
)
# We could have a str or int here depending on how it was sent.
try:
entity_key = int(entity_key)
except ValueError as e:
# log the original exception before we obfuscate it
logger.debug(e, exc_info=True)
raise SgJiraBridgeBadRequestError(
"Invalid Shotgun %s id %s, it must be a number."
% (entity_type, entity_key,)
)
self.server.sync_in_jira(
settings_name, entity_type, int(entity_key), event=payload, **parameters
)
elif direction == "jira2sg":
if not entity_type or not entity_key:
# We can't retrieve this easily from the webhook payload without
# hard coding a list of supported resource types, so we require
# it to be specified in the path for the time being.
raise SgJiraBridgeBadRequestError(
"Invalid request path %s, it must include a Jira resource "
"type and its key" % self.path
)
self.server.sync_in_shotgun(
settings_name, entity_type, entity_key, event=payload, **parameters
)
def _handle_admin_request(self, path_parts, parameters):
"""
Handle admin request to the server.
Currently handles a single action, ``reset`` which resets the Bridge
in order to clear out the ShotGrid schema cache.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed.
admin/reset
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["admin", "reset"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
# The only function we respond to now is reset
if len(path_parts) != 2 or path_parts[1] != "reset":
raise SgJiraBridgeBadRequestError(
"Invalid admin path '%s'. Action is not set or unsupported." % self.path
)
self.server.admin_reset(**parameters)
def log_message(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.info(message)
def log_error(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.error(message)
def create_server(port, settings, keyfile=None, certfile=None):
"""
Create the server.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in HTTPS mode.
:param str certfile: Optional path to a PEM certificate file to run in HTTPS mode.
:returns: The HTTP Server
:type: :class:`BaseHTTPServer.BaseHTTPRequestHandler`
"""
httpd = Server(settings, ("localhost", port), RequestHandler)
if keyfile and certfile:
# Activate HTTPS.
httpd.socket = ssl.wrap_socket(
httpd.socket, keyfile=keyfile, certfile=certfile, server_side=True
)
return httpd
def run_server(port, settings, keyfile=None, certfile=None):
"""
Run the server until a shutdown is requested.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in https mode.
:param str certfile: Optional path to a PEM certificate file to run in https mode.
"""
create_server(port, settings, keyfile, certfile).serve_forever()
def main():
"""
Retrieve command line arguments and start the server.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
"--port", type=int, default=9090, help="The port number to listen to.",
)
parser.add_argument("--settings", help="Full path to settings file.", required=True)
parser.add_argument(
"--ssl_context",
help="A key and certificate file pair to run the server in HTTPS mode.",
nargs=2,
)
args = parser.parse_args()
keyfile = None
certfile = None
if args.ssl_context:
keyfile, certfile = args.ssl_context
run_server(
port=args.port, settings=args.settings, keyfile=keyfile, certfile=certfile,
)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Shutting down...")
| """
Helper to extract a version number for the sg-jira-bridge module.
This will attenmpt to extract the version number from git if installed from
a cloned repo. If a version is unable to be determined, or the process
fails for any reason, we return "dev"
:returns: A major.minor.patch[.sub] version string or "dev".
"""
# Note: if you install from a cloned git repository
# (e.g. pip install ./tk-core), the version number
# will be picked up from the most recently added tag.
try:
version_git = subprocess.check_output(
["git", "describe", "--abbrev=0"]
).rstrip()
return version_git
except Exception:
# Blindly ignore problems. Git might be not available, or the user may
# have installed via a zip archive, etc...
pass
return "dev" | identifier_body |
webapp.py | # Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
from __future__ import print_function
import re
import six
import argparse
from six.moves.urllib import parse
from six.moves import BaseHTTPServer
import json
import ssl
import logging
import subprocess
from six.moves.socketserver import ThreadingMixIn
import sg_jira
DESCRIPTION = """
A simple web app frontend to the SG Jira bridge.
"""
CSS_TEMPLATE = """
<style>
body {
margin: 0;
background-color: #eee;
font-family: Arial, Helvetica, sans-serif;
}
h1 {
background-color: whitesmoke;
color: #00BAFF;
border-radius: 5px;
padding: 5 5 5 15px;
border-bottom: 1px solid #ddd;
}
.content { margin: 0 0 15px 15px; }
.error { margin: 0 0 15px 15px; }
.details { margin: 40px 0 15px 15px; }
h2 { margin-bottom: 10px; }
p { margin-top: 10px; }
</style>
"""
HMTL_TEMPLATE = """
<head>
<title>SG Jira Bridge: %s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="content">
<h2>%s</h2>
<p>%s</p>
</div>
</body>
</html>
""".format(
style=CSS_TEMPLATE
)
# We overriding the default html error template to render errors to the user.
# This template *requires* the following format tokens:
# - %(code)d - for the response code
# - %(explain)s - for the short explanation of the response code
# - %(message)s - for a detailed message about the error
HTML_ERROR_TEMPLATE = """
<head>
<title>SG Jira Bridge Error %(code)d: %(message)s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="error">
<h2>Error %(code)d</h2>
<p>%(explain)s</p>
</div>
<div class="details">
<p><strong>Details: </strong> <pre>%(message)s</pre></p>
</div>
</body>
""".format(
style=CSS_TEMPLATE
)
# Please note that we can't use __name__ here as it would be __main__
logger = logging.getLogger("webapp")
def get_sg_jira_bridge_version():
"""
Helper to extract a version number for the sg-jira-bridge module.
This will attenmpt to extract the version number from git if installed from
a cloned repo. If a version is unable to be determined, or the process
fails for any reason, we return "dev"
:returns: A major.minor.patch[.sub] version string or "dev".
"""
# Note: if you install from a cloned git repository
# (e.g. pip install ./tk-core), the version number
# will be picked up from the most recently added tag.
try:
version_git = subprocess.check_output(
["git", "describe", "--abbrev=0"]
).rstrip()
return version_git
except Exception:
# Blindly ignore problems. Git might be not available, or the user may
# have installed via a zip archive, etc...
pass
return "dev"
class SgJiraBridgeBadRequestError(Exception):
"""
Custom exception so we can differentiate between errors we raise that
should return 4xx error codes and errors in the application which should
return 500 error codes.
"""
pass
class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""
Basic server with threading functionality mixed in. This will help the server
keep up with a high volume of throughput from ShotGrid and Jira.
"""
def __init__(self, settings, *args, **kwargs):
# Note: BaseHTTPServer.HTTPServer is not a new style class so we can't use
# super here.
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
self._sg_jira = sg_jira.Bridge.get_bridge(settings)
def sync_in_jira(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_jira(*args, **kwargs)
def sync_in_shotgun(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_shotgun(*args, **kwargs)
def admin_reset(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Bridge method.
"""
return self._sg_jira.reset(*args, **kwargs)
@property
def sync_settings_names(self):
"""
Return the list of sync settings this server handles.
"""
return self._sg_jira.sync_settings_names
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# On Python3, in socketserver.StreamRequestHandler, if this is
# set it will use makefile() to produce the output stream. Otherwise,
# it will use socketserver._SocketWriter, and we won't be able to get
# to the data.
# taken from https://stackoverflow.com/a/53163148/4223964
wbufsize = 1
protocol_version = "HTTP/1.1"
# Inject the version of sg-jira-bridge into server_version for the headers.
server_version = "sg-jira-bridge/%s %s" % (
get_sg_jira_bridge_version(),
BaseHTTPServer.BaseHTTPRequestHandler.server_version,
)
# BaseHTTPServer Class variable that stores the HTML template for error
# pages. Override the default error page template with our own.
error_message_format = HTML_ERROR_TEMPLATE
def post_response(self, response_code, message, content=None):
"""
Convenience method for handling the response
Handles sending the response, setting headers, and writing any
content in the expected order. Sets appropriate headers including
content length which is required by HTTP/1.1.
:param int response_code: Standard HTTP response code sent in headers.
:param str message: Message to accompany response code in headers.
:param str content: Optional content to return as content in the
response. This is typically html displayed in a browser.
"""
# NOTE: All responses must:
# - send the response first.
# - then, if there is some data, call end_headers to add a blank line.
# - then write the data, if any, with self.wfile.write
self.send_response(response_code, message)
content_len = 0
if content:
content_len = len(content)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", content_len)
# TODO: Ideally we use the default functionality of HTTP/1.1 where
# keep-alive is True (no header needed). However, for some reason,
# this currently blocks new connections for 60 seconds (likely the
# default keep-alive timeout). So for now we explicitly close the
# connection with the header below to ensure things run smoothly.
# Once the issue has been resolved, we can remove this header.
self.send_header("Connection", "close")
self.end_headers()
if content:
self.wfile.write(content)
def do_GET(self):
"""
Handle a GET request.
"""
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in self.path[1:].split("/") if x]
if not path_parts:
self.post_response(
200,
"The server is alive",
HMTL_TEMPLATE % ("The server is alive", "The server is alive", ""),
)
return
# Return a correct error for browser favicon requests in order to
# reduce confusing log messages that look bad but aren't.
if len(path_parts) == 1 and path_parts[0] == "favicon.ico":
self.send_error(404)
return
if path_parts[0] == "sg2jira":
title = "Shotgun to Jira"
elif path_parts[0] == "jira2sg":
title = "Jira to Shotgun"
else:
self.send_error(400, "Invalid request path %s" % self.path)
return
settings_name = path_parts[1]
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
self.send_error(400, "Invalid settings name %s" % settings_name)
return
# Success, send a basic html page.
self.post_response(
200,
six.ensure_binary("Syncing with %s settings." % settings_name),
six.ensure_binary(
HMTL_TEMPLATE
% (title, title, "Syncing with %s settings." % settings_name)
),
)
def do_POST(self):
"""
Handle a POST request.
Post url paths need to have the form::
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
admin/reset
If the SG Entity is not specified in the path, it must be specified in
the provided payload.
"""
# /sg2jira/default[/Task/123]
# /jira2sg/default/Issue/KEY-123
# /admin/reset
try:
parsed = parse.urlparse(self.path)
# Extract additional query parameters.
# What they could be is still TBD, may be things like `dry_run=1`?
parameters = {}
if parsed.query:
parameters = parse.parse_qs(parsed.query, True, True)
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in parsed.path[1:].split("/") if x]
if not path_parts:
self.send_error(400, "Invalid request path %s" % self.path)
# Treat the command
if path_parts[0] == "admin":
self._handle_admin_request(path_parts, parameters)
elif path_parts[0] in ["sg2jira", "jira2sg"]:
self._handle_sync_request(path_parts, parameters)
else:
self.send_error(
400,
"Invalid request path %s: unknown command %s"
% (self.path, path_parts[0]),
)
return
self.post_response(200, "POST request successful")
except SgJiraBridgeBadRequestError as e:
self.send_error(400, str(e))
except Exception as e:
self.send_error(500, str(e))
logger.debug(e, exc_info=True)
def _read_payload(self):
"""
Read the body of a request to get the payload.
:returns: payload as a dictionary or empty dict if there was no payload
"""
content_type = self.headers.get("content-type")
# Check the content type, if not set we assume json.
# We can have a charset just after the content type, e.g.
# application/json; charset=UTF-8.
if content_type and not re.search(r"\s*application/json\s*;?", content_type):
raise SgJiraBridgeBadRequestError(
"Invalid content-type %s, it must be 'application/json'" % content_type
)
content_len = int(self.headers.get("content-length", 0))
body = self.rfile.read(content_len)
payload = {}
if body:
payload = json.loads(body)
return payload
def _handle_sync_request(self, path_parts, parameters):
"""
Handle a request to sync between ShotGrid and Jira in either direction.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed. We expect the path to for this request to
be one of the following:
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
If the SG Entity is not specified in the path, it must be present in
the loaded payload.
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["sg2jira", "default", "Task", "123"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
entity_type = None
entity_key = None
if len(path_parts) == 4:
direction, settings_name, entity_type, entity_key = path_parts
elif len(path_parts) == 2:
direction, settings_name = path_parts
else:
raise SgJiraBridgeBadRequestError("Invalid request path %s" % self.path)
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
raise SgJiraBridgeBadRequestError(
"Invalid settings name %s" % settings_name
)
payload = self._read_payload()
if direction == "sg2jira":
# Ensure we get a valid entity_type and entity_id
if not entity_type or not entity_key:
# We need to retrieve this from the payload.
entity_type = payload.get("entity_type")
entity_key = payload.get("entity_id")
if not entity_type or not entity_key:
raise SgJiraBridgeBadRequestError(
"Invalid request payload %s, unable to retrieve a Shotgun Entity type and its id."
% payload
)
# We could have a str or int here depending on how it was sent.
try:
entity_key = int(entity_key)
except ValueError as e:
# log the original exception before we obfuscate it
logger.debug(e, exc_info=True)
raise SgJiraBridgeBadRequestError(
"Invalid Shotgun %s id %s, it must be a number."
% (entity_type, entity_key,)
)
self.server.sync_in_jira(
settings_name, entity_type, int(entity_key), event=payload, **parameters
)
elif direction == "jira2sg":
if not entity_type or not entity_key:
# We can't retrieve this easily from the webhook payload without
# hard coding a list of supported resource types, so we require
# it to be specified in the path for the time being.
raise SgJiraBridgeBadRequestError(
"Invalid request path %s, it must include a Jira resource "
"type and its key" % self.path
)
self.server.sync_in_shotgun(
settings_name, entity_type, entity_key, event=payload, **parameters
)
def _handle_admin_request(self, path_parts, parameters):
"""
Handle admin request to the server.
Currently handles a single action, ``reset`` which resets the Bridge
in order to clear out the ShotGrid schema cache.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed.
admin/reset
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["admin", "reset"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
# The only function we respond to now is reset
if len(path_parts) != 2 or path_parts[1] != "reset":
raise SgJiraBridgeBadRequestError(
"Invalid admin path '%s'. Action is not set or unsupported." % self.path
)
self.server.admin_reset(**parameters)
def log_message(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.info(message)
def | (self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.error(message)
def create_server(port, settings, keyfile=None, certfile=None):
"""
Create the server.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in HTTPS mode.
:param str certfile: Optional path to a PEM certificate file to run in HTTPS mode.
:returns: The HTTP Server
:type: :class:`BaseHTTPServer.BaseHTTPRequestHandler`
"""
httpd = Server(settings, ("localhost", port), RequestHandler)
if keyfile and certfile:
# Activate HTTPS.
httpd.socket = ssl.wrap_socket(
httpd.socket, keyfile=keyfile, certfile=certfile, server_side=True
)
return httpd
def run_server(port, settings, keyfile=None, certfile=None):
"""
Run the server until a shutdown is requested.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in https mode.
:param str certfile: Optional path to a PEM certificate file to run in https mode.
"""
create_server(port, settings, keyfile, certfile).serve_forever()
def main():
"""
Retrieve command line arguments and start the server.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
"--port", type=int, default=9090, help="The port number to listen to.",
)
parser.add_argument("--settings", help="Full path to settings file.", required=True)
parser.add_argument(
"--ssl_context",
help="A key and certificate file pair to run the server in HTTPS mode.",
nargs=2,
)
args = parser.parse_args()
keyfile = None
certfile = None
if args.ssl_context:
keyfile, certfile = args.ssl_context
run_server(
port=args.port, settings=args.settings, keyfile=keyfile, certfile=certfile,
)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Shutting down...")
| log_error | identifier_name |
webapp.py | # Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
from __future__ import print_function
import re
import six
import argparse
from six.moves.urllib import parse
from six.moves import BaseHTTPServer
import json
import ssl
import logging
import subprocess
from six.moves.socketserver import ThreadingMixIn
import sg_jira
DESCRIPTION = """
A simple web app frontend to the SG Jira bridge.
"""
CSS_TEMPLATE = """
<style>
body {
margin: 0;
background-color: #eee;
font-family: Arial, Helvetica, sans-serif;
}
h1 {
background-color: whitesmoke;
color: #00BAFF;
border-radius: 5px;
padding: 5 5 5 15px;
border-bottom: 1px solid #ddd;
}
.content { margin: 0 0 15px 15px; }
.error { margin: 0 0 15px 15px; }
.details { margin: 40px 0 15px 15px; }
h2 { margin-bottom: 10px; }
p { margin-top: 10px; }
</style>
"""
HMTL_TEMPLATE = """
<head>
<title>SG Jira Bridge: %s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="content">
<h2>%s</h2>
<p>%s</p>
</div>
</body>
</html>
""".format(
style=CSS_TEMPLATE
)
# We overriding the default html error template to render errors to the user.
# This template *requires* the following format tokens:
# - %(code)d - for the response code
# - %(explain)s - for the short explanation of the response code
# - %(message)s - for a detailed message about the error
HTML_ERROR_TEMPLATE = """
<head>
<title>SG Jira Bridge Error %(code)d: %(message)s</title>
{style}
</head>
<body>
<h1>SG Jira Bridge</h1>
<div class="error">
<h2>Error %(code)d</h2>
<p>%(explain)s</p>
</div>
<div class="details">
<p><strong>Details: </strong> <pre>%(message)s</pre></p>
</div>
</body>
""".format(
style=CSS_TEMPLATE
)
# Please note that we can't use __name__ here as it would be __main__
logger = logging.getLogger("webapp")
def get_sg_jira_bridge_version():
"""
Helper to extract a version number for the sg-jira-bridge module.
This will attenmpt to extract the version number from git if installed from
a cloned repo. If a version is unable to be determined, or the process
fails for any reason, we return "dev"
:returns: A major.minor.patch[.sub] version string or "dev".
"""
# Note: if you install from a cloned git repository
# (e.g. pip install ./tk-core), the version number
# will be picked up from the most recently added tag.
try:
version_git = subprocess.check_output(
["git", "describe", "--abbrev=0"]
).rstrip()
return version_git
except Exception:
# Blindly ignore problems. Git might be not available, or the user may
# have installed via a zip archive, etc...
pass
return "dev"
class SgJiraBridgeBadRequestError(Exception):
"""
Custom exception so we can differentiate between errors we raise that
should return 4xx error codes and errors in the application which should
return 500 error codes.
"""
pass
class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""
Basic server with threading functionality mixed in. This will help the server
keep up with a high volume of throughput from ShotGrid and Jira.
"""
def __init__(self, settings, *args, **kwargs):
# Note: BaseHTTPServer.HTTPServer is not a new style class so we can't use
# super here.
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
self._sg_jira = sg_jira.Bridge.get_bridge(settings)
def sync_in_jira(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_jira(*args, **kwargs)
def sync_in_shotgun(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_shotgun(*args, **kwargs)
def admin_reset(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Bridge method.
"""
return self._sg_jira.reset(*args, **kwargs)
@property
def sync_settings_names(self):
"""
Return the list of sync settings this server handles.
"""
return self._sg_jira.sync_settings_names
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# On Python3, in socketserver.StreamRequestHandler, if this is
# set it will use makefile() to produce the output stream. Otherwise,
# it will use socketserver._SocketWriter, and we won't be able to get
# to the data.
# taken from https://stackoverflow.com/a/53163148/4223964
wbufsize = 1
protocol_version = "HTTP/1.1"
# Inject the version of sg-jira-bridge into server_version for the headers.
server_version = "sg-jira-bridge/%s %s" % (
get_sg_jira_bridge_version(),
BaseHTTPServer.BaseHTTPRequestHandler.server_version,
)
# BaseHTTPServer Class variable that stores the HTML template for error
# pages. Override the default error page template with our own.
error_message_format = HTML_ERROR_TEMPLATE
def post_response(self, response_code, message, content=None):
"""
Convenience method for handling the response
Handles sending the response, setting headers, and writing any
content in the expected order. Sets appropriate headers including
content length which is required by HTTP/1.1.
:param int response_code: Standard HTTP response code sent in headers.
:param str message: Message to accompany response code in headers.
:param str content: Optional content to return as content in the
response. This is typically html displayed in a browser.
"""
# NOTE: All responses must:
# - send the response first.
# - then, if there is some data, call end_headers to add a blank line.
# - then write the data, if any, with self.wfile.write
self.send_response(response_code, message)
content_len = 0
if content:
content_len = len(content)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", content_len)
# TODO: Ideally we use the default functionality of HTTP/1.1 where
# keep-alive is True (no header needed). However, for some reason,
# this currently blocks new connections for 60 seconds (likely the
# default keep-alive timeout). So for now we explicitly close the
# connection with the header below to ensure things run smoothly.
# Once the issue has been resolved, we can remove this header.
self.send_header("Connection", "close")
self.end_headers()
if content:
self.wfile.write(content)
def do_GET(self):
"""
Handle a GET request.
"""
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in self.path[1:].split("/") if x]
if not path_parts:
self.post_response(
200,
"The server is alive",
HMTL_TEMPLATE % ("The server is alive", "The server is alive", ""),
)
return
# Return a correct error for browser favicon requests in order to
# reduce confusing log messages that look bad but aren't.
if len(path_parts) == 1 and path_parts[0] == "favicon.ico":
self.send_error(404)
return
if path_parts[0] == "sg2jira":
title = "Shotgun to Jira"
elif path_parts[0] == "jira2sg":
title = "Jira to Shotgun"
else:
self.send_error(400, "Invalid request path %s" % self.path)
return
settings_name = path_parts[1]
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
self.send_error(400, "Invalid settings name %s" % settings_name)
return
# Success, send a basic html page.
self.post_response(
200,
six.ensure_binary("Syncing with %s settings." % settings_name),
six.ensure_binary(
HMTL_TEMPLATE
% (title, title, "Syncing with %s settings." % settings_name)
),
)
def do_POST(self):
"""
Handle a POST request.
Post url paths need to have the form::
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
admin/reset
If the SG Entity is not specified in the path, it must be specified in
the provided payload.
"""
# /sg2jira/default[/Task/123]
# /jira2sg/default/Issue/KEY-123
# /admin/reset
try:
parsed = parse.urlparse(self.path)
# Extract additional query parameters.
# What they could be is still TBD, may be things like `dry_run=1`?
parameters = {}
if parsed.query:
parameters = parse.parse_qs(parsed.query, True, True)
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in parsed.path[1:].split("/") if x]
if not path_parts:
self.send_error(400, "Invalid request path %s" % self.path)
# Treat the command
if path_parts[0] == "admin":
self._handle_admin_request(path_parts, parameters)
elif path_parts[0] in ["sg2jira", "jira2sg"]:
self._handle_sync_request(path_parts, parameters)
else:
self.send_error(
400,
"Invalid request path %s: unknown command %s"
% (self.path, path_parts[0]),
)
return
self.post_response(200, "POST request successful")
except SgJiraBridgeBadRequestError as e:
self.send_error(400, str(e))
except Exception as e:
self.send_error(500, str(e))
logger.debug(e, exc_info=True)
def _read_payload(self):
"""
Read the body of a request to get the payload.
:returns: payload as a dictionary or empty dict if there was no payload
"""
content_type = self.headers.get("content-type")
# Check the content type, if not set we assume json.
# We can have a charset just after the content type, e.g.
# application/json; charset=UTF-8.
if content_type and not re.search(r"\s*application/json\s*;?", content_type):
raise SgJiraBridgeBadRequestError(
"Invalid content-type %s, it must be 'application/json'" % content_type
)
content_len = int(self.headers.get("content-length", 0))
body = self.rfile.read(content_len)
payload = {}
if body:
payload = json.loads(body)
return payload
def _handle_sync_request(self, path_parts, parameters):
"""
Handle a request to sync between ShotGrid and Jira in either direction.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed. We expect the path to for this request to
be one of the following:
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
If the SG Entity is not specified in the path, it must be present in
the loaded payload.
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["sg2jira", "default", "Task", "123"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
entity_type = None
entity_key = None
if len(path_parts) == 4:
direction, settings_name, entity_type, entity_key = path_parts
elif len(path_parts) == 2:
direction, settings_name = path_parts
else:
raise SgJiraBridgeBadRequestError("Invalid request path %s" % self.path)
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
raise SgJiraBridgeBadRequestError(
"Invalid settings name %s" % settings_name
)
payload = self._read_payload()
if direction == "sg2jira":
# Ensure we get a valid entity_type and entity_id
if not entity_type or not entity_key:
# We need to retrieve this from the payload.
|
if not entity_type or not entity_key:
raise SgJiraBridgeBadRequestError(
"Invalid request payload %s, unable to retrieve a Shotgun Entity type and its id."
% payload
)
# We could have a str or int here depending on how it was sent.
try:
entity_key = int(entity_key)
except ValueError as e:
# log the original exception before we obfuscate it
logger.debug(e, exc_info=True)
raise SgJiraBridgeBadRequestError(
"Invalid Shotgun %s id %s, it must be a number."
% (entity_type, entity_key,)
)
self.server.sync_in_jira(
settings_name, entity_type, int(entity_key), event=payload, **parameters
)
elif direction == "jira2sg":
if not entity_type or not entity_key:
# We can't retrieve this easily from the webhook payload without
# hard coding a list of supported resource types, so we require
# it to be specified in the path for the time being.
raise SgJiraBridgeBadRequestError(
"Invalid request path %s, it must include a Jira resource "
"type and its key" % self.path
)
self.server.sync_in_shotgun(
settings_name, entity_type, entity_key, event=payload, **parameters
)
def _handle_admin_request(self, path_parts, parameters):
"""
Handle admin request to the server.
Currently handles a single action, ``reset`` which resets the Bridge
in order to clear out the ShotGrid schema cache.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed.
admin/reset
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["admin", "reset"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
# The only function we respond to now is reset
if len(path_parts) != 2 or path_parts[1] != "reset":
raise SgJiraBridgeBadRequestError(
"Invalid admin path '%s'. Action is not set or unsupported." % self.path
)
self.server.admin_reset(**parameters)
def log_message(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.info(message)
def log_error(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.error(message)
def create_server(port, settings, keyfile=None, certfile=None):
"""
Create the server.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in HTTPS mode.
:param str certfile: Optional path to a PEM certificate file to run in HTTPS mode.
:returns: The HTTP Server
:type: :class:`BaseHTTPServer.BaseHTTPRequestHandler`
"""
httpd = Server(settings, ("localhost", port), RequestHandler)
if keyfile and certfile:
# Activate HTTPS.
httpd.socket = ssl.wrap_socket(
httpd.socket, keyfile=keyfile, certfile=certfile, server_side=True
)
return httpd
def run_server(port, settings, keyfile=None, certfile=None):
"""
Run the server until a shutdown is requested.
:param int port: A port number to listen to.
:param str settings: Path to settings file.
:param str keyfile: Optional path to a PEM key file to run in https mode.
:param str certfile: Optional path to a PEM certificate file to run in https mode.
"""
create_server(port, settings, keyfile, certfile).serve_forever()
def main():
"""
Retrieve command line arguments and start the server.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
"--port", type=int, default=9090, help="The port number to listen to.",
)
parser.add_argument("--settings", help="Full path to settings file.", required=True)
parser.add_argument(
"--ssl_context",
help="A key and certificate file pair to run the server in HTTPS mode.",
nargs=2,
)
args = parser.parse_args()
keyfile = None
certfile = None
if args.ssl_context:
keyfile, certfile = args.ssl_context
run_server(
port=args.port, settings=args.settings, keyfile=keyfile, certfile=certfile,
)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Shutting down...")
| entity_type = payload.get("entity_type")
entity_key = payload.get("entity_id") | conditional_block |
test.ts | import test from 'ava'
import ApiBuilder from 'claudia-api-builder'
import * as fs from 'fs'
import authenticator, {
APIGatewayProxyEventJwt,
authenticator as authenticator2,
JwtHeader,
Secret,
SigningKeyCallback
} from '../../package/'
test('authenticator is exported', (t) => {
t.is(typeof authenticator, 'function')
t.is(authenticator, authenticator2)
})
/**
* ## Using a Public Key
*
* Using a public key with an algorithm such as RS256 is recommended
* for security and convenience. The public key can be kept inline with
* your code without any security concerns.
*/
// tslint:disable-next-line:max-line-length
const TOKEN_RS512 = 'eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.iNa6ZAKSn3J8GvG6hgfydtbRsKekSp1RunRceNhrRtUSZVvlSz4X1g1OXSJwCG2OyYXhaN0pyxuR7JlI1n663DnkEvop3T7Whxoy5uMxji9vSZ5MrvtLXY75On0ALqZuPuyuH4x6o1xI0huKHJGmRM2OVqD9W80AkpYtszwRwXkjiXdfJHMry9czXm5JrYNp9VowA9jATpkH2IatfSIVAK0c6hJg6Gz05PdtMjFwHpJJFn0qzfexf97pZgqITOX4f-pHZQ6i_jnBciocjMrn62tz8XpGrgjSGNqeUxROPjKCTnmwi0kpMAuBp2rUgj7Ns5wHaKE1riz_qrQqCgxrww'
const PUBLIC_KEY = `-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAry0bg77WbExsds8R4eJo
fNqbeWnu1QqRqG0wOk35JenMXDU6mCfUFas0ANgS/2PhxOoem5dtxKpJEzXF8eQh
xrO3J9zD9HMbLVMfodpG9Up9u+AUICGvMCAbAuCHcp7vTZtc+OmmSyk5qF1ApGnU
rWromBB8TDFVx0UdOR6I+1F3DvIk7mgjLAhwzycgsLRZFwXxS2mwHVAafD6QYbxZ
I655+ltaf3Gb3CBJSz888i3DfaKT30cCC/7r3rnOqbKjUcG8qxrsp+yOo8l6BeeJ
g57ITeuaRrSza7zdvS0Vydp9RS7VS9JdHQv9b48b7rsx+WLghI/AQ3kK0Xg85C9R
TQIDAQAB
-----END PUBLIC KEY-----`
test.cb('Authenticate with public key', (t) => {
// Begin by creating your Api Builder as normal
const api = new ApiBuilder()
// Next pass in the authenticator along with your key and any config
api.intercept(authenticator(PUBLIC_KEY))
// Register your routes as normal
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
// Export the proxyRouter normally in your code
// export handler = api.proxyRouter
// Here we call it instead to test it
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
/**
* ## Using a Secret Key
*
* When using a secret key you cannot safely include it in your code.
* You must fetch the secret key securely at run time, either from a
* file on you server or using a service like AWS Secrets Manager.
*
* Create a function that fetches the secret key like below,
* and returns it via the callback or as a promise.
* Be sure to decode from base64 if needed.
*/
// tslint:disable-next-line:max-line-length
const TOKEN_HS512 = 'eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.Hk1Qgr18H-VwmDnMcljqEFy8_F1zeIVS-FY-3Xl2pKsMEeFii5-WEVDyBNRPredB9JjoNAkR23iOkTDN4Mu-Xg'
const getSecretKeyCb = (header: JwtHeader, callback: SigningKeyCallback) => {
const filename = `test/test.secret.${header.alg.toLowerCase()}.b64.txt`
fs.readFile(filename, (err, b64) => {
if (err) {
return callback(err)
}
// toString is important, if the first arg is a buffer the second is ignored
const secret = Buffer.from(b64.toString(), 'base64')
return callback(null, secret)
})
}
const getSecretKeyP = (header: JwtHeader) =>
new Promise((res, rej) => {
getSecretKeyCb(header, (err, secret) => {
if (err) {
rej(err)
} else {
res(secret)
}
})
})
test.cb('Authenticate with secret key, via promise', (t) => {
// Begin by creating your Api Builder as normal
const api = new ApiBuilder()
// Next pass in the authenticator along with your key and any config
api.intercept(authenticator(getSecretKeyP))
// Register your routes as normal
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
// Export the proxyRouter normally in your code
// export handler = api.proxyRouter
// Here we call it instead to test it
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_HS512 }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
test.cb('Authenticate with secret key, via callback', (t) => {
// Begin by creating your Api Builder as normal
const api = new ApiBuilder()
// Next pass in the authenticator along with your key and any config
api.intercept(authenticator(getSecretKeyCb))
// Register your routes as normal
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
// Export the proxyRouter normally in your code
// export handler = api.proxyRouter
// Here we call it instead to test it
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_HS512 }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
/**
* ## JWT Headers & Signature
*
* You can access the headers, payload and signature of the JWT
*/
test.cb('Headers & Signature access', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY))
api.get('/token', (event: APIGatewayProxyEventJwt) => ({
algorithm: event.jwt.header.alg,
signature: event.jwt.signature.substr(0, 12),
subscriber: event.jwt.payload.sub
}))
testApi(t, api, {
context: { method: 'GET', path: '/token' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
}, {
body: JSON.stringify({
algorithm: 'RS512',
signature: 'iNa6ZAKSn3J8',
subscriber: '1234567890'
}),
statusCode: 200
})
})
/**
* ## Extra Config
*
* You can specify more arguments to increase security and help catch bugs
*
* For a full list see https://www.npmjs.com/package/jsonwebtoken#jwtverifytoken-secretorpublickey-options-callback
*/
test.cb('Specify algorithm - success', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY, { algorithms: ['RS512'] }))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
test.cb('Specify algorithm - failure', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY, { algorithms: ['RS256'] }))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
}, {
body: '"Unauthorised: JsonWebTokenError invalid algorithm"',
statusCode: 401
})
})
test.cb('Specify audience - success', (t) => {
// tslint:disable-next-line:max-line-length
const TOKEN_RS512_AUD = 'eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJhdWQiOiJGNDJFRDA4Mi03OTlGLTQ3NkItQjc3RS0xMjAxM0Y1Mzc5QTUifQ.VAnH9ozEAcL3foiSgqJspqS05AdYchn57uKrbCUEwX9uXbsg8nct9bL7y8Omw6qg5ZdTcNsnor8tysGW460yOmg06Pbx0SRHJifJGLpy1bOCWRPG_5NB5aM6uKf78T2QCJXm9f73nKfZ9QJUlfzW41bT2khnsO8gTVYo9yd3yesrKegMlSomxd4VrZFYz4jbNh2f9FUe8MNkubfOxVbM5U7sh5aZMs_uoef08Gxp3Aqx7fPpzj16uW2JTNlhoIYUF4J33T0SufgiR1Xw3R3Jn2BnwdlfgqjLrv0lxzDzHoPyPP8i6TSl3notTcTmLc_GItdcnLNPn8wtjxKNW81tMQ'
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY, { audience: 'F42ED082-799F-476B-B77E-12013F5379A5' }))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512_AUD }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
test.cb('Specify audience - failure', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY, { audience: 'F42ED082-799F-476B-B77E-12013F5379A5' }))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`) | body: '"Unauthorised: JsonWebTokenError jwt audience invalid. expected: F42ED082-799F-476B-B77E-12013F5379A5"',
statusCode: 401
})
})
/**
* ## When is a JWT not a JWT?
*/
test.cb('XWT', (t) => {
// tslint:disable-next-line:max-line-length
const TOKEN_RS512_XWT = 'eyJhbGciOiJSUzUxMiIsInR5cCI6IlhXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.liqYAp9dbby9IzokHKbLkQb5mUAHoO0Ef7hn4Wz-Oh6kVcdqGQjtUFZPbONe9NPLLCuX_82_TkWXdKja5ISHLw3m028d2NGEQ2cbBxOCjHSqeuztUavwzQPeJUNUREh07IQK_MTw-BCskKLoJToIx2NZ3AfttCu4QWXBaJTZkIds0sQIQR11Z3w48QQS7Bjbtmrhzufpw_yfk8Fh0a0PjAlYmTgkE7JAUBT0NwqVPuqrTNUKxHe5DrqeuSAr0VeHSM05HvJFqrncF0KuBfTj0HUzwi6JpZxzlwxx_gzfaHEw2lFtRLJIonVLMAKM1aFj5m67FyfxqUQx0JnqRkEWrQ'
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512_XWT }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
/**
* ## Failures and Errors
*
* Here follow a number of error cases
*/
test.cb('Expired', (t) => {
// tslint:disable-next-line:max-line-length
const TOKEN_RS512_EXP = 'eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiZXhwIjoxNTE2MjM5MDIyfQ.ed_PPzBaYmalmwTp1OxF9--QlPATgTWYVuc2Qg-tofDe4Mhn98B0aEZ-3wN9h2loQG05xhhUy_ZOyLYPYhZrKavU7UiVEIRmDUj2VYzmX575_GdGmxsaoluNP3xYqGjxs4U1-uQN1YIEQRvGx2pn-QeK9crawvzLVdZgyBr69-xVUbsDNIR5msx2Qg2uZLrPWe4ZGoYlpecUDfSoktHAkxsTfcjtE2niS_-Y8yoRqGemu8MWNwMca7edg2xJn-J0z5DDMYgzdVyI9oHkf-vu_lb535ekuYAigXBKLRBbPO9zzXv3LmJFlDJKJzkKGU8CSkUTR11ftsEc7BbUvsQ6Zg'
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512_EXP }
}, {
body: '"Unauthorised: TokenExpiredError jwt expired"',
statusCode: 401
})
})
test.cb('Invalid/Forged token signature', (t) => {
// tslint:disable-next-line:max-line-length
const TOKEN_RS512_INV = 'eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiZXhwIjoxNTE2MjM5MDIyfQ.ed_PPzBaYmalmwTp1OxF9--QlPATgTWYVuc2Qg-tofDe4Mhn98B0aEZ-3wN9h2loQG05xhhUy_ZOyLYPYhZrKavU7UiVEIRmDUj2VYzmX575_GdGmxsaoluNP3xYqGjxs4U1-uQN1YIEQRvGx2pn-QeK9crawvzLVdZgyBr69-xVUbsDNIR5msx2Qg2uZLrPWe4ZGoYlpecUDfSoktHAkxsTfcjtE2niS_-Y8yoRqGemu8MWNwMca7edg2xJn-J0z5DDMYgzdVyI9oHkf-vu_lb535ekuYAigXBKLRBbPO9zzXv3LmJFlDJKJzkKGU7CSkUTR11ftsEc7BbUvsQ6Zg'
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512_INV }
}, {
body: '"Unauthorised: JsonWebTokenError invalid signature"',
statusCode: 401
})
})
test.cb('Forgot to provide secret/key', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator) // sic
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
},
undefined,
'event does not contain routing information'
)
})
const getSecretKeyPFail = () =>
new Promise((res, rej) => {
getSecretKeyCb({ alg: 'fail' }, (err, secret) => {
if (err) {
rej(err)
} else {
res(secret)
}
})
})
test.cb('Failure fetching key', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(getSecretKeyPFail))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_HS512 }
}, {
// tslint:disable-next-line:max-line-length
body: '"Unauthorised: JsonWebTokenError error in secret or public key callback: ENOENT: no such file or directory, open \'test/test.secret.fail.b64.txt\'"',
statusCode: 401
})
})
test.cb('No headers in event', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' }
}, {
body: '"Unauthorised: no headers"',
statusCode: 401
})
})
test.cb('No authorization in headers', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: {}
}, {
body: '"Unauthorised: no authorization header"',
statusCode: 401
})
})
test.cb('No bearer in authorization header', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'Basic QWxhZGRpbjpPcGVuU2VzYW1l' }
}, {
body: '"Unauthorised: authorization scheme must be bearer"',
statusCode: 401
})
})
test.cb('No token in authorization header', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' }
}, {
body: '"Unauthorised: no authorization token"',
statusCode: 401
})
})
const testApi = (t: any, api: any, event: any, expectedResponse: any, expectedError: any = null) => {
t.plan(1)
const done = async (error: any, response: any) => {
// await error
// await response
if (response) delete response.headers
t.deepEqual(
{ error, response },
{ error: expectedError, response: expectedResponse }
)
t.end()
}
api.proxyRouter(event, { done }, done)
} | testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
}, { | random_line_split |
main.rs | extern crate hyper;
extern crate rustc_serialize;
extern crate url;
mod errors;
mod structs;
use errors::*;
use errors::ResourceError::*;
use structs::*;
use hyper::client::Client;
use std::fs::File;
use hyper::header::*;
use std::io::prelude::*;
use std::path::Path;
use std::thread;
use std::sync::Arc;
use rustc_serialize::json;
use hyper::mime::{Mime};
use std::sync::mpsc;
use hyper::status::StatusCode;
use url::Url;
//use std::fs;
//const TEST_ID : &'static str = "5b11f4ce-a62d-471e-81fc-a69a8278c7da";
const USER_ARGENT: &'static str = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36";
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
macro_rules! musicbrainz_url {
($id : expr) => ( format!("http://musicbrainz.org/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
//($id : expr) => ( format!("http://localhost:8000/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
}
macro_rules! musicbrainz_file {
($id : expr) => ( format!("mb_{}.json", $id))
}
macro_rules! cover_art_url {
($id : expr) => ( format!("http://coverartarchive.org/release-group/{}", $id) )
}
macro_rules! cover_art_file {
($id : expr) => ( format!("ca_{}.json", $id) )
}
#[allow(dead_code)]
#[allow(unused_must_use)]
fn filter_successful(resource: &str, mut resp : hyper::client::response::Response) -> Result<String, TypedIOError>
{
match resp.status {
StatusCode::Ok => {
let mut s = String::new();
resp.read_to_string(&mut s);
Ok(s)
},
code @ _ => Err( TypedIOError {
resource : resource.to_string(),
cause: hyper::Error::Io(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Service responded with statuscode {}", code)))
})
}
}
#[allow(dead_code)]
struct SimpleFs {
directory : String
}
#[allow(dead_code)]
#[allow(unused_must_use)]
impl SimpleFs {
fn read(&self, id: String) -> Result<String, TypedIOError> {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
read_resource_from_file(path.as_path())
}
fn store(&self, id : &str, content: &str) |
}
#[allow(unused_must_use)]
#[allow(dead_code)]
fn save_response_to_file(url : &str, content : &str, provider : &Provider) {
let fs = provider.fs();
let id = provider.extract_id(url);
fs.store(&provider.format_file_name(&id), content);
}
trait Meshup {
fn artist_resource_by_id (&self, id : &str) -> String;
fn album_resource_by_id (&self, id : &str) -> String;
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError>;
fn query_cover_art<F>(&self, artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
}
struct FileMeshup;
struct WebMeshup;
fn read_resource_from_url(url : &str, provider : &Provider) -> Result<String, TypedIOError> {
println_stderr!("invoking {}", url);
let client = Client::new();
let mime: Mime = "text/json".parse().unwrap();
let response = client.get(url)
.header(ContentType::json())
.header(UserAgent(USER_ARGENT.to_owned()))
.header(Connection::keep_alive())
.header(Accept(vec![qitem(mime)]))
.send()
.map_err(|err| TypedIOError {
resource : url.to_string(),
cause : err
})
.and_then(|resp| filter_successful(url, resp))
.map(|resp| {
if cfg!(feature="meshup_mode_save_web") {
save_response_to_file(url, &resp, provider);
}
resp
});
response
}
impl Meshup for WebMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
cover_art_url!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
musicbrainz_url!(id)
}
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_query_url = self.artist_resource_by_id(id);
print!("{}", mb_query_url);
let mb_response = try!(read_resource_from_url(&mb_query_url, &Provider::Musicbrainz).map_err(|err| ArtistError {
artist_id: id.to_string(),
cause: err
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(artist_ref.name.clone(), artist_ref.albums, |id| {
let url = cover_art_url!(id);
read_resource_from_url(&url, &Provider::CoverArt)
});
Ok(ArtistReference {
name : artist_ref.name.clone(),
albums : albums
})
}
}
fn read_resource_from_file(path : &Path) -> Result<String, TypedIOError> {
let mut content = String::new();
File::open(&path)
.and_then(|mut file| file.read_to_string(&mut content))
.map(|_| {
//return the content rather than the size
content
})
.map_err(|err| TypedIOError {
resource : path.to_str().unwrap_or("").to_string(),
cause : hyper::Error::from(err)
})
}
impl Meshup for FileMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
musicbrainz_file!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
cover_art_file!(id)
}
fn query (&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_file = self.album_resource_by_id(id);
let fs = Provider::Musicbrainz.fs();
let mb_response = try!(fs.read(mb_file).map_err(|err| {
ArtistError {
artist_id : id.to_string(),
cause: err.into()
}
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(id.to_string(), artist_ref.albums, |id| {
let file_name = cover_art_file!(id);
let fs = Provider::CoverArt.fs();
fs.read(file_name)
});
Ok(ArtistReference {
name: artist_ref.name,
albums: albums
})
}
}
#[allow(dead_code)]
fn query_cover_art<F>(artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
fn image_from_cover_art_response(payload : &str) -> String {
let body : CoverArtResponse = json::decode(&payload).unwrap();
body.images.into_iter().find(|item| item.front).unwrap().image
}
#[test]
fn test_image_from_cover_art_response() {
let payload = "{\"images\":[{\"front\":true,\"image\":\"http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg\"}]}";
let response = image_from_cover_art_response(payload);
assert_eq!("http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg", response);
}
fn process_mb_response(payload: &str) -> ArtistReference {
let a: ArtistReference = json::decode(payload).unwrap();
a
}
enum Provider {
Musicbrainz,
CoverArt
}
impl Provider {
fn fs(&self) -> SimpleFs {
match *self {
Provider::Musicbrainz => SimpleFs { directory : "tmp".to_string() },
Provider::CoverArt => SimpleFs { directory : "tmp".to_string() }
}
}
fn extract_id(&self, url: &str) -> String {
let parsed : Url = Url::parse(url).unwrap();
parsed.path_segments().unwrap().last().unwrap().to_string()
}
fn format_file_name (&self, id : &str) -> String {
match *self {
Provider::Musicbrainz => musicbrainz_file!(id),
Provider::CoverArt => cover_art_file!(id)
}
}
}
#[test]
fn test_extract_id_from_url() {
let mb : String = musicbrainz_url!("1289836171-250");
assert_eq!("1289836171-250", Provider::extract_id(&mb));
}
#[test]
fn test_format_file_name() {
assert_eq!("ca_123", Provider::COVER_ART.format_file_name("123"));
assert_eq!("mb_123", Provider::MUSICBRAINZ.format_file_name("123"));
}
#[cfg(not(feature="meshup_mode_web"))]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
FileMeshup.query(id)
}
#[cfg(feature="meshup_mode_web")]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
WebMeshup.query(id)
}
fn main() {
let args : Vec<String> = std::env::args().into_iter().collect();
let id = &args[1];
let web_response = query(id).unwrap();
print!("{}", json::encode(&web_response).unwrap())
}
| {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
if !path.exists() {
File::create(path)
.and_then(|mut f| f.write_all(content.as_bytes()));
};
} | identifier_body |
main.rs | extern crate hyper;
extern crate rustc_serialize;
extern crate url;
mod errors;
mod structs;
use errors::*;
use errors::ResourceError::*;
use structs::*;
use hyper::client::Client;
use std::fs::File;
use hyper::header::*;
use std::io::prelude::*;
use std::path::Path;
use std::thread;
use std::sync::Arc;
use rustc_serialize::json;
use hyper::mime::{Mime};
use std::sync::mpsc;
use hyper::status::StatusCode;
use url::Url;
//use std::fs;
//const TEST_ID : &'static str = "5b11f4ce-a62d-471e-81fc-a69a8278c7da";
const USER_ARGENT: &'static str = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36";
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
macro_rules! musicbrainz_url {
($id : expr) => ( format!("http://musicbrainz.org/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
//($id : expr) => ( format!("http://localhost:8000/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
}
macro_rules! musicbrainz_file {
($id : expr) => ( format!("mb_{}.json", $id))
}
macro_rules! cover_art_url {
($id : expr) => ( format!("http://coverartarchive.org/release-group/{}", $id) )
}
macro_rules! cover_art_file {
($id : expr) => ( format!("ca_{}.json", $id) )
}
#[allow(dead_code)]
#[allow(unused_must_use)]
fn filter_successful(resource: &str, mut resp : hyper::client::response::Response) -> Result<String, TypedIOError>
{
match resp.status {
StatusCode::Ok => {
let mut s = String::new();
resp.read_to_string(&mut s);
Ok(s)
},
code @ _ => Err( TypedIOError {
resource : resource.to_string(),
cause: hyper::Error::Io(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Service responded with statuscode {}", code)))
})
}
}
#[allow(dead_code)]
struct SimpleFs {
directory : String
}
#[allow(dead_code)]
#[allow(unused_must_use)]
impl SimpleFs {
fn read(&self, id: String) -> Result<String, TypedIOError> {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
read_resource_from_file(path.as_path())
}
fn store(&self, id : &str, content: &str) {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
if !path.exists() {
File::create(path)
.and_then(|mut f| f.write_all(content.as_bytes()));
};
}
}
#[allow(unused_must_use)]
#[allow(dead_code)]
fn save_response_to_file(url : &str, content : &str, provider : &Provider) {
let fs = provider.fs();
let id = provider.extract_id(url);
fs.store(&provider.format_file_name(&id), content);
}
trait Meshup {
fn artist_resource_by_id (&self, id : &str) -> String;
fn album_resource_by_id (&self, id : &str) -> String;
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError>;
fn query_cover_art<F>(&self, artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
}
struct FileMeshup;
struct WebMeshup;
fn read_resource_from_url(url : &str, provider : &Provider) -> Result<String, TypedIOError> {
println_stderr!("invoking {}", url);
let client = Client::new();
let mime: Mime = "text/json".parse().unwrap();
let response = client.get(url)
.header(ContentType::json())
.header(UserAgent(USER_ARGENT.to_owned()))
.header(Connection::keep_alive())
.header(Accept(vec![qitem(mime)]))
.send()
.map_err(|err| TypedIOError {
resource : url.to_string(),
cause : err
})
.and_then(|resp| filter_successful(url, resp))
.map(|resp| {
if cfg!(feature="meshup_mode_save_web") {
save_response_to_file(url, &resp, provider);
}
resp
});
response
}
impl Meshup for WebMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
cover_art_url!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
musicbrainz_url!(id)
}
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_query_url = self.artist_resource_by_id(id);
print!("{}", mb_query_url);
let mb_response = try!(read_resource_from_url(&mb_query_url, &Provider::Musicbrainz).map_err(|err| ArtistError {
artist_id: id.to_string(),
cause: err
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(artist_ref.name.clone(), artist_ref.albums, |id| {
let url = cover_art_url!(id);
read_resource_from_url(&url, &Provider::CoverArt)
});
Ok(ArtistReference {
name : artist_ref.name.clone(),
albums : albums
})
}
}
fn read_resource_from_file(path : &Path) -> Result<String, TypedIOError> {
let mut content = String::new();
File::open(&path)
.and_then(|mut file| file.read_to_string(&mut content))
.map(|_| {
//return the content rather than the size
content
})
.map_err(|err| TypedIOError {
resource : path.to_str().unwrap_or("").to_string(),
cause : hyper::Error::from(err)
})
}
impl Meshup for FileMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
musicbrainz_file!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
cover_art_file!(id)
}
fn query (&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_file = self.album_resource_by_id(id);
let fs = Provider::Musicbrainz.fs();
let mb_response = try!(fs.read(mb_file).map_err(|err| {
ArtistError {
artist_id : id.to_string(),
cause: err.into()
}
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(id.to_string(), artist_ref.albums, |id| {
let file_name = cover_art_file!(id);
let fs = Provider::CoverArt.fs();
fs.read(file_name)
});
Ok(ArtistReference {
name: artist_ref.name,
albums: albums
})
}
}
#[allow(dead_code)]
fn query_cover_art<F>(artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
fn image_from_cover_art_response(payload : &str) -> String {
let body : CoverArtResponse = json::decode(&payload).unwrap();
body.images.into_iter().find(|item| item.front).unwrap().image
}
#[test]
fn test_image_from_cover_art_response() {
let payload = "{\"images\":[{\"front\":true,\"image\":\"http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg\"}]}";
let response = image_from_cover_art_response(payload);
assert_eq!("http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg", response);
}
fn process_mb_response(payload: &str) -> ArtistReference {
let a: ArtistReference = json::decode(payload).unwrap();
a
}
enum Provider {
Musicbrainz,
CoverArt
}
impl Provider {
fn | (&self) -> SimpleFs {
match *self {
Provider::Musicbrainz => SimpleFs { directory : "tmp".to_string() },
Provider::CoverArt => SimpleFs { directory : "tmp".to_string() }
}
}
fn extract_id(&self, url: &str) -> String {
let parsed : Url = Url::parse(url).unwrap();
parsed.path_segments().unwrap().last().unwrap().to_string()
}
fn format_file_name (&self, id : &str) -> String {
match *self {
Provider::Musicbrainz => musicbrainz_file!(id),
Provider::CoverArt => cover_art_file!(id)
}
}
}
#[test]
fn test_extract_id_from_url() {
let mb : String = musicbrainz_url!("1289836171-250");
assert_eq!("1289836171-250", Provider::extract_id(&mb));
}
#[test]
fn test_format_file_name() {
assert_eq!("ca_123", Provider::COVER_ART.format_file_name("123"));
assert_eq!("mb_123", Provider::MUSICBRAINZ.format_file_name("123"));
}
#[cfg(not(feature="meshup_mode_web"))]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
FileMeshup.query(id)
}
#[cfg(feature="meshup_mode_web")]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
WebMeshup.query(id)
}
fn main() {
let args : Vec<String> = std::env::args().into_iter().collect();
let id = &args[1];
let web_response = query(id).unwrap();
print!("{}", json::encode(&web_response).unwrap())
}
| fs | identifier_name |
main.rs | extern crate hyper;
extern crate rustc_serialize;
extern crate url;
mod errors;
mod structs;
use errors::*;
use errors::ResourceError::*;
use structs::*;
use hyper::client::Client;
use std::fs::File;
use hyper::header::*;
use std::io::prelude::*;
use std::path::Path;
use std::thread;
use std::sync::Arc;
use rustc_serialize::json;
use hyper::mime::{Mime};
use std::sync::mpsc;
use hyper::status::StatusCode;
use url::Url;
//use std::fs;
//const TEST_ID : &'static str = "5b11f4ce-a62d-471e-81fc-a69a8278c7da";
const USER_ARGENT: &'static str = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36";
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
macro_rules! musicbrainz_url { | macro_rules! musicbrainz_file {
($id : expr) => ( format!("mb_{}.json", $id))
}
macro_rules! cover_art_url {
($id : expr) => ( format!("http://coverartarchive.org/release-group/{}", $id) )
}
macro_rules! cover_art_file {
($id : expr) => ( format!("ca_{}.json", $id) )
}
#[allow(dead_code)]
#[allow(unused_must_use)]
fn filter_successful(resource: &str, mut resp : hyper::client::response::Response) -> Result<String, TypedIOError>
{
match resp.status {
StatusCode::Ok => {
let mut s = String::new();
resp.read_to_string(&mut s);
Ok(s)
},
code @ _ => Err( TypedIOError {
resource : resource.to_string(),
cause: hyper::Error::Io(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Service responded with statuscode {}", code)))
})
}
}
#[allow(dead_code)]
struct SimpleFs {
directory : String
}
#[allow(dead_code)]
#[allow(unused_must_use)]
impl SimpleFs {
fn read(&self, id: String) -> Result<String, TypedIOError> {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
read_resource_from_file(path.as_path())
}
fn store(&self, id : &str, content: &str) {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
if !path.exists() {
File::create(path)
.and_then(|mut f| f.write_all(content.as_bytes()));
};
}
}
#[allow(unused_must_use)]
#[allow(dead_code)]
fn save_response_to_file(url : &str, content : &str, provider : &Provider) {
let fs = provider.fs();
let id = provider.extract_id(url);
fs.store(&provider.format_file_name(&id), content);
}
trait Meshup {
fn artist_resource_by_id (&self, id : &str) -> String;
fn album_resource_by_id (&self, id : &str) -> String;
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError>;
fn query_cover_art<F>(&self, artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
}
struct FileMeshup;
struct WebMeshup;
fn read_resource_from_url(url : &str, provider : &Provider) -> Result<String, TypedIOError> {
println_stderr!("invoking {}", url);
let client = Client::new();
let mime: Mime = "text/json".parse().unwrap();
let response = client.get(url)
.header(ContentType::json())
.header(UserAgent(USER_ARGENT.to_owned()))
.header(Connection::keep_alive())
.header(Accept(vec![qitem(mime)]))
.send()
.map_err(|err| TypedIOError {
resource : url.to_string(),
cause : err
})
.and_then(|resp| filter_successful(url, resp))
.map(|resp| {
if cfg!(feature="meshup_mode_save_web") {
save_response_to_file(url, &resp, provider);
}
resp
});
response
}
impl Meshup for WebMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
cover_art_url!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
musicbrainz_url!(id)
}
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_query_url = self.artist_resource_by_id(id);
print!("{}", mb_query_url);
let mb_response = try!(read_resource_from_url(&mb_query_url, &Provider::Musicbrainz).map_err(|err| ArtistError {
artist_id: id.to_string(),
cause: err
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(artist_ref.name.clone(), artist_ref.albums, |id| {
let url = cover_art_url!(id);
read_resource_from_url(&url, &Provider::CoverArt)
});
Ok(ArtistReference {
name : artist_ref.name.clone(),
albums : albums
})
}
}
fn read_resource_from_file(path : &Path) -> Result<String, TypedIOError> {
let mut content = String::new();
File::open(&path)
.and_then(|mut file| file.read_to_string(&mut content))
.map(|_| {
//return the content rather than the size
content
})
.map_err(|err| TypedIOError {
resource : path.to_str().unwrap_or("").to_string(),
cause : hyper::Error::from(err)
})
}
impl Meshup for FileMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
musicbrainz_file!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
cover_art_file!(id)
}
fn query (&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_file = self.album_resource_by_id(id);
let fs = Provider::Musicbrainz.fs();
let mb_response = try!(fs.read(mb_file).map_err(|err| {
ArtistError {
artist_id : id.to_string(),
cause: err.into()
}
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(id.to_string(), artist_ref.albums, |id| {
let file_name = cover_art_file!(id);
let fs = Provider::CoverArt.fs();
fs.read(file_name)
});
Ok(ArtistReference {
name: artist_ref.name,
albums: albums
})
}
}
#[allow(dead_code)]
fn query_cover_art<F>(artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
fn image_from_cover_art_response(payload : &str) -> String {
let body : CoverArtResponse = json::decode(&payload).unwrap();
body.images.into_iter().find(|item| item.front).unwrap().image
}
#[test]
fn test_image_from_cover_art_response() {
let payload = "{\"images\":[{\"front\":true,\"image\":\"http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg\"}]}";
let response = image_from_cover_art_response(payload);
assert_eq!("http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg", response);
}
fn process_mb_response(payload: &str) -> ArtistReference {
let a: ArtistReference = json::decode(payload).unwrap();
a
}
enum Provider {
Musicbrainz,
CoverArt
}
impl Provider {
fn fs(&self) -> SimpleFs {
match *self {
Provider::Musicbrainz => SimpleFs { directory : "tmp".to_string() },
Provider::CoverArt => SimpleFs { directory : "tmp".to_string() }
}
}
fn extract_id(&self, url: &str) -> String {
let parsed : Url = Url::parse(url).unwrap();
parsed.path_segments().unwrap().last().unwrap().to_string()
}
fn format_file_name (&self, id : &str) -> String {
match *self {
Provider::Musicbrainz => musicbrainz_file!(id),
Provider::CoverArt => cover_art_file!(id)
}
}
}
#[test]
fn test_extract_id_from_url() {
let mb : String = musicbrainz_url!("1289836171-250");
assert_eq!("1289836171-250", Provider::extract_id(&mb));
}
#[test]
fn test_format_file_name() {
assert_eq!("ca_123", Provider::COVER_ART.format_file_name("123"));
assert_eq!("mb_123", Provider::MUSICBRAINZ.format_file_name("123"));
}
#[cfg(not(feature="meshup_mode_web"))]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
FileMeshup.query(id)
}
#[cfg(feature="meshup_mode_web")]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
WebMeshup.query(id)
}
fn main() {
let args : Vec<String> = std::env::args().into_iter().collect();
let id = &args[1];
let web_response = query(id).unwrap();
print!("{}", json::encode(&web_response).unwrap())
} | ($id : expr) => ( format!("http://musicbrainz.org/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
//($id : expr) => ( format!("http://localhost:8000/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
}
| random_line_split |
memory.rs | use std::mem::transmute;
use world::World;
use geometry::Point;
use sprite::Sprite;
use util;
// In Hex's Cellar, the player's spells manipulate an u8[40] of bytes that
// affect the world around her. This file gives a "RAM map" for that array.
// Color (3 bits = 8 bright colors) and character (5 bits, offset added to '!')
pub const PLAYER_APPEARANCE: u8 = 0x00;
// Begins a char[15].
pub const PLAYER_NAME: u8 = 0x01;
// Monster data is a (struct {u8, u8, u8})[5].
// So add (n * 3) to this, where 0 <= n <= 4, to get the address for the n-th monster,
// then add the offset of which byte you want.
pub const MONSTERS: u8 = 0x10;
pub const MONSTER_FLAGS: u8 = 0;
pub const MONSTER_POSITION: u8 = 1;
pub const MONSTER_HP: u8 = 2;
// An 8-bit bitmask (there are eight spells).
pub const SPELL_MEMORY: u8 = 0x1f;
// A 32-bit bitmask (there are thirty-two items).
pub const IDENTIFICATION: u8 = 0x20;
// Begins a u8[4]: one byte for each timer (poison, haste, charge, protect).
pub const TIMERS: u8 = 0x24;
// Begins a u8[8].
pub const INVENTORY: u8 = 0x28;
// Same as player appearance.
pub const DOOR_APPEARANCE: u8 = 0x30;
pub const WALL_APPEARANCE: u8 = 0x31;
pub const FLOOR_COLOR: u8 = 0x32;
// u8 to add/subtract from depth when stairs are used; normally 1.
pub const STAIRS_DELTA: u8 = 0x33;
// u8 to add to timers each turn if non-zero; normally 0xff.
pub const TIMER_DELTA: u8 = 0x34;
// s8 to add to each damage roll; normally 0x00.
pub const DAMAGE_OFFSET: u8 = 0x35;
// ??? = 0x36;
// ??? = 0x37;
// The higher this is, the more text gets screwed up.
pub const TEXT_SYNC: u8 = 0x38;
// Player stats.
pub const PLAYER_HP: u8 = 0x39;
pub const PLAYER_TP: u8 = 0x3a;
pub const PLAYER_XLDEF: u8 = 0x3b; // hi-bits XL, lo-bits Def
pub const PLAYER_POSITION: u8 = 0x3c;
pub const PLAYER_DEPTH: u8 = 0x3d;
pub const METAL_ACID_RESISTANCE: u8 = 0x3e; // hi-bits Metal, lo-bits Acid
pub const FIRE_ELEC_RESISTANCE: u8 = 0x3f; // hi-bits Fire, lo-bits Elect
pub fn peek(world: &World, address: u8) -> u8 {
match address {
PLAYER_APPEARANCE =>
world.player_appearance_byte,
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize],
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &world.current_level()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => ((monster.kind as u8) << 4)
| ((monster.charged as u8) << 3)
| ((monster.vulnerable as u8) << 2)
| ((monster.venomous as u8) << 1)
| monster.corrupted as u8,
MONSTER_POSITION => monster.position.as_byte(),
MONSTER_HP => monster.hp,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
world.player.spell_memory.iter().enumerate()
.map(|(index, &known)| (known as u8) << index).sum(),
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
0,
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize],
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte,
DOOR_APPEARANCE =>
world.door_appearance_byte,
WALL_APPEARANCE =>
world.wall_appearance_byte,
FLOOR_COLOR =>
// TODO: remove floor color
0,
STAIRS_DELTA =>
world.player.stairs_delta,
TIMER_DELTA =>
world.player.timer_delta,
DAMAGE_OFFSET =>
unsafe { transmute(world.player.damage_offset) },
0x36 =>
// TODO: ???
0,
0x37 =>
// TODO: ???
0,
TEXT_SYNC =>
world.player.text_sync,
PLAYER_HP =>
world.player.hp,
PLAYER_TP =>
world.player.tp,
PLAYER_XLDEF =>
(world.player.xl << 4) | unsafe { transmute::<i8,u8>(world.player.def) },
PLAYER_POSITION =>
world.player.position.as_byte(),
PLAYER_DEPTH =>
world.player.depth,
METAL_ACID_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[0] << 4)
| (world.player.aptitude[1] & 0x0f))
},
FIRE_ELEC_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[2] << 4)
| (world.player.aptitude[3] & 0x0f))
},
_ => panic!("memory::peek - invalid address {}", address)
}
}
pub fn poke(world: &mut World, address: u8, value: u8) {
match address {
PLAYER_APPEARANCE => {
let old_sprite = Sprite::of_byte(world.player_appearance_byte, true);
let new_sprite = Sprite::of_byte(value, true);
report_player_appearance_change(world, old_sprite, new_sprite);
world.player_appearance_byte = value;
},
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize] = value,
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &mut world.current_level_mut()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => {
monster.kind = unsafe { transmute(value >> 4) };
monster.charged = value & 0b1000 != 0;
monster.vulnerable = value & 0b0100 != 0;
monster.venomous = value & 0b0010 != 0;
monster.corrupted = value & 0b0001 != 0;
},
MONSTER_POSITION => monster.position = Point::of_byte(value),
MONSTER_HP => monster.hp = value,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
for (index, known) in world.player.spell_memory.iter_mut().enumerate() {
*known = value & (1 << index) != 0
},
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
{},
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize] = value,
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte = value,
DOOR_APPEARANCE =>
world.door_appearance_byte = value,
WALL_APPEARANCE =>
world.wall_appearance_byte = value,
FLOOR_COLOR =>
// TODO: remove floor color
{}, | world.player.timer_delta = value,
DAMAGE_OFFSET =>
world.player.damage_offset = unsafe { transmute(value) },
0x36 =>
// TODO: ???
{},
0x37 =>
// TODO: ???
{},
TEXT_SYNC =>
world.player.text_sync = value,
PLAYER_HP =>
world.player.hp = value,
PLAYER_TP =>
world.player.tp = value,
PLAYER_XLDEF => {
world.player.xl = value >> 4;
world.player.def = upcast_i4(value)
},
PLAYER_POSITION =>
world.player.position = Point::of_byte(value),
PLAYER_DEPTH =>
world.player.depth = value,
METAL_ACID_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[0] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[1] = upcast_i4(value)
},
FIRE_ELEC_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[2] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[3] = upcast_i4(value)
},
_ => panic!("memory::poke - invalid address")
}
}
// pretend the low four bits of our u8 argument are an "i4" and sign-extend to i8
fn upcast_i4(the_i4: u8) -> i8 {
(unsafe { transmute::<u8, i8>(the_i4) } << 4) >> 4
}
fn report_player_appearance_change(world: &mut World, old: Sprite, new: Sprite) {
let new_color_name = util::color_name(new.color[0]);
let new_char_name = util::punctuation_name(new.character);
if old.character != new.character && old.color != new.color {
world.log.tell(format!("You turn into {} {}!", util::a_or_an(new_color_name), new_char_name));
} else if old.character != new.character {
world.log.tell(format!("You turn into {}!", util::a_or_an(new_char_name)));
} else if old.color != new.color {
world.log.tell(format!("You turn {}!", new_color_name));
}
}
const CP437: &'static [char; 256] = &[
' ', '☺', '☻', '♥', '♦', '♣', '♠', '•', '◘', '○', '◙', '♂', '♀', '♪', '♫', '☼',
'►', '◄', '↕', '‼', '¶', '§', '▬', '↨', '↑', '↓', '→', '←', '∟', '↔', '▲', '▼',
' ', '!', '"', '#', '$', '%', '&', '\'','(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\',']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '',
'Ç', 'ü', 'é', 'â', 'ä', 'à', 'å', 'ç', 'ê', 'ë', 'è', 'ï', 'î', 'ì', 'Ä', 'Å',
'É', 'æ', 'Æ', 'ô', 'ö', 'ò', 'û', 'ù', 'ÿ', 'Ö', 'Ü', '¢', '£', '¥', '₧', 'ƒ',
'á', 'í', 'ó', 'ú', 'ñ', 'Ñ', 'ª', 'º', '¿', '⌐', '¬', '½', '¼', '¡', '«', '»',
'░', '▒', '▓', '│', '┤', '╡', '╢', '╖', '╕', '╣', '║', '╗', '╝', '╜', '╛', '┐',
'└', '┴', '┬', '├', '─', '┼', '╞', '╟', '╚', '╔', '╩', '╦', '╠', '═', '╬', '╧',
'╨', '╤', '╥', '╙', '╘', '╒', '╓', '╫', '╪', '┘', '┌', '█', '▄', '▌', '▐', '▀',
'α', 'ß', 'Γ', 'π', 'Σ', 'σ', 'µ', 'τ', 'Φ', 'Θ', 'Ω', 'δ', '∞', 'φ', 'ε', '∩',
'≡', '±', '≥', '≤', '⌠', '⌡', '÷', '≈', '°', '∙', '·', '√', 'ⁿ', '²', '■', ' ',
];
pub fn player_name(world: &World) -> String {
let mut i: u8 = PLAYER_NAME;
let mut name = String::new();
while i < 0x40 {
let c = peek(world, i);
if c == 0 { break };
name.push(CP437[c as usize]);
i += 1;
}
return name;
} |
STAIRS_DELTA =>
world.player.stairs_delta = value,
TIMER_DELTA => | random_line_split |
memory.rs | use std::mem::transmute;
use world::World;
use geometry::Point;
use sprite::Sprite;
use util;
// In Hex's Cellar, the player's spells manipulate an u8[40] of bytes that
// affect the world around her. This file gives a "RAM map" for that array.
// Color (3 bits = 8 bright colors) and character (5 bits, offset added to '!')
pub const PLAYER_APPEARANCE: u8 = 0x00;
// Begins a char[15].
pub const PLAYER_NAME: u8 = 0x01;
// Monster data is a (struct {u8, u8, u8})[5].
// So add (n * 3) to this, where 0 <= n <= 4, to get the address for the n-th monster,
// then add the offset of which byte you want.
pub const MONSTERS: u8 = 0x10;
pub const MONSTER_FLAGS: u8 = 0;
pub const MONSTER_POSITION: u8 = 1;
pub const MONSTER_HP: u8 = 2;
// An 8-bit bitmask (there are eight spells).
pub const SPELL_MEMORY: u8 = 0x1f;
// A 32-bit bitmask (there are thirty-two items).
pub const IDENTIFICATION: u8 = 0x20;
// Begins a u8[4]: one byte for each timer (poison, haste, charge, protect).
pub const TIMERS: u8 = 0x24;
// Begins a u8[8].
pub const INVENTORY: u8 = 0x28;
// Same as player appearance.
pub const DOOR_APPEARANCE: u8 = 0x30;
pub const WALL_APPEARANCE: u8 = 0x31;
pub const FLOOR_COLOR: u8 = 0x32;
// u8 to add/subtract from depth when stairs are used; normally 1.
pub const STAIRS_DELTA: u8 = 0x33;
// u8 to add to timers each turn if non-zero; normally 0xff.
pub const TIMER_DELTA: u8 = 0x34;
// s8 to add to each damage roll; normally 0x00.
pub const DAMAGE_OFFSET: u8 = 0x35;
// ??? = 0x36;
// ??? = 0x37;
// The higher this is, the more text gets screwed up.
pub const TEXT_SYNC: u8 = 0x38;
// Player stats.
pub const PLAYER_HP: u8 = 0x39;
pub const PLAYER_TP: u8 = 0x3a;
pub const PLAYER_XLDEF: u8 = 0x3b; // hi-bits XL, lo-bits Def
pub const PLAYER_POSITION: u8 = 0x3c;
pub const PLAYER_DEPTH: u8 = 0x3d;
pub const METAL_ACID_RESISTANCE: u8 = 0x3e; // hi-bits Metal, lo-bits Acid
pub const FIRE_ELEC_RESISTANCE: u8 = 0x3f; // hi-bits Fire, lo-bits Elect
pub fn | (world: &World, address: u8) -> u8 {
match address {
PLAYER_APPEARANCE =>
world.player_appearance_byte,
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize],
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &world.current_level()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => ((monster.kind as u8) << 4)
| ((monster.charged as u8) << 3)
| ((monster.vulnerable as u8) << 2)
| ((monster.venomous as u8) << 1)
| monster.corrupted as u8,
MONSTER_POSITION => monster.position.as_byte(),
MONSTER_HP => monster.hp,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
world.player.spell_memory.iter().enumerate()
.map(|(index, &known)| (known as u8) << index).sum(),
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
0,
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize],
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte,
DOOR_APPEARANCE =>
world.door_appearance_byte,
WALL_APPEARANCE =>
world.wall_appearance_byte,
FLOOR_COLOR =>
// TODO: remove floor color
0,
STAIRS_DELTA =>
world.player.stairs_delta,
TIMER_DELTA =>
world.player.timer_delta,
DAMAGE_OFFSET =>
unsafe { transmute(world.player.damage_offset) },
0x36 =>
// TODO: ???
0,
0x37 =>
// TODO: ???
0,
TEXT_SYNC =>
world.player.text_sync,
PLAYER_HP =>
world.player.hp,
PLAYER_TP =>
world.player.tp,
PLAYER_XLDEF =>
(world.player.xl << 4) | unsafe { transmute::<i8,u8>(world.player.def) },
PLAYER_POSITION =>
world.player.position.as_byte(),
PLAYER_DEPTH =>
world.player.depth,
METAL_ACID_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[0] << 4)
| (world.player.aptitude[1] & 0x0f))
},
FIRE_ELEC_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[2] << 4)
| (world.player.aptitude[3] & 0x0f))
},
_ => panic!("memory::peek - invalid address {}", address)
}
}
pub fn poke(world: &mut World, address: u8, value: u8) {
match address {
PLAYER_APPEARANCE => {
let old_sprite = Sprite::of_byte(world.player_appearance_byte, true);
let new_sprite = Sprite::of_byte(value, true);
report_player_appearance_change(world, old_sprite, new_sprite);
world.player_appearance_byte = value;
},
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize] = value,
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &mut world.current_level_mut()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => {
monster.kind = unsafe { transmute(value >> 4) };
monster.charged = value & 0b1000 != 0;
monster.vulnerable = value & 0b0100 != 0;
monster.venomous = value & 0b0010 != 0;
monster.corrupted = value & 0b0001 != 0;
},
MONSTER_POSITION => monster.position = Point::of_byte(value),
MONSTER_HP => monster.hp = value,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
for (index, known) in world.player.spell_memory.iter_mut().enumerate() {
*known = value & (1 << index) != 0
},
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
{},
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize] = value,
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte = value,
DOOR_APPEARANCE =>
world.door_appearance_byte = value,
WALL_APPEARANCE =>
world.wall_appearance_byte = value,
FLOOR_COLOR =>
// TODO: remove floor color
{},
STAIRS_DELTA =>
world.player.stairs_delta = value,
TIMER_DELTA =>
world.player.timer_delta = value,
DAMAGE_OFFSET =>
world.player.damage_offset = unsafe { transmute(value) },
0x36 =>
// TODO: ???
{},
0x37 =>
// TODO: ???
{},
TEXT_SYNC =>
world.player.text_sync = value,
PLAYER_HP =>
world.player.hp = value,
PLAYER_TP =>
world.player.tp = value,
PLAYER_XLDEF => {
world.player.xl = value >> 4;
world.player.def = upcast_i4(value)
},
PLAYER_POSITION =>
world.player.position = Point::of_byte(value),
PLAYER_DEPTH =>
world.player.depth = value,
METAL_ACID_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[0] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[1] = upcast_i4(value)
},
FIRE_ELEC_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[2] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[3] = upcast_i4(value)
},
_ => panic!("memory::poke - invalid address")
}
}
// pretend the low four bits of our u8 argument are an "i4" and sign-extend to i8
fn upcast_i4(the_i4: u8) -> i8 {
(unsafe { transmute::<u8, i8>(the_i4) } << 4) >> 4
}
fn report_player_appearance_change(world: &mut World, old: Sprite, new: Sprite) {
let new_color_name = util::color_name(new.color[0]);
let new_char_name = util::punctuation_name(new.character);
if old.character != new.character && old.color != new.color {
world.log.tell(format!("You turn into {} {}!", util::a_or_an(new_color_name), new_char_name));
} else if old.character != new.character {
world.log.tell(format!("You turn into {}!", util::a_or_an(new_char_name)));
} else if old.color != new.color {
world.log.tell(format!("You turn {}!", new_color_name));
}
}
const CP437: &'static [char; 256] = &[
' ', '☺', '☻', '♥', '♦', '♣', '♠', '•', '◘', '○', '◙', '♂', '♀', '♪', '♫', '☼',
'►', '◄', '↕', '‼', '¶', '§', '▬', '↨', '↑', '↓', '→', '←', '∟', '↔', '▲', '▼',
' ', '!', '"', '#', '$', '%', '&', '\'','(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\',']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '',
'Ç', 'ü', 'é', 'â', 'ä', 'à', 'å', 'ç', 'ê', 'ë', 'è', 'ï', 'î', 'ì', 'Ä', 'Å',
'É', 'æ', 'Æ', 'ô', 'ö', 'ò', 'û', 'ù', 'ÿ', 'Ö', 'Ü', '¢', '£', '¥', '₧', 'ƒ',
'á', 'í', 'ó', 'ú', 'ñ', 'Ñ', 'ª', 'º', '¿', '⌐', '¬', '½', '¼', '¡', '«', '»',
'░', '▒', '▓', '│', '┤', '╡', '╢', '╖', '╕', '╣', '║', '╗', '╝', '╜', '╛', '┐',
'└', '┴', '┬', '├', '─', '┼', '╞', '╟', '╚', '╔', '╩', '╦', '╠', '═', '╬', '╧',
'╨', '╤', '╥', '╙', '╘', '╒', '╓', '╫', '╪', '┘', '┌', '█', '▄', '▌', '▐', '▀',
'α', 'ß', 'Γ', 'π', 'Σ', 'σ', 'µ', 'τ', 'Φ', 'Θ', 'Ω', 'δ', '∞', 'φ', 'ε', '∩',
'≡', '±', '≥', '≤', '⌠', '⌡', '÷', '≈', '°', '∙', '·', '√', 'ⁿ', '²', '■', ' ',
];
pub fn player_name(world: &World) -> String {
let mut i: u8 = PLAYER_NAME;
let mut name = String::new();
while i < 0x40 {
let c = peek(world, i);
if c == 0 { break };
name.push(CP437[c as usize]);
i += 1;
}
return name;
}
| peek | identifier_name |
memory.rs | use std::mem::transmute;
use world::World;
use geometry::Point;
use sprite::Sprite;
use util;
// In Hex's Cellar, the player's spells manipulate an u8[40] of bytes that
// affect the world around her. This file gives a "RAM map" for that array.
// Color (3 bits = 8 bright colors) and character (5 bits, offset added to '!')
pub const PLAYER_APPEARANCE: u8 = 0x00;
// Begins a char[15].
pub const PLAYER_NAME: u8 = 0x01;
// Monster data is a (struct {u8, u8, u8})[5].
// So add (n * 3) to this, where 0 <= n <= 4, to get the address for the n-th monster,
// then add the offset of which byte you want.
pub const MONSTERS: u8 = 0x10;
pub const MONSTER_FLAGS: u8 = 0;
pub const MONSTER_POSITION: u8 = 1;
pub const MONSTER_HP: u8 = 2;
// An 8-bit bitmask (there are eight spells).
pub const SPELL_MEMORY: u8 = 0x1f;
// A 32-bit bitmask (there are thirty-two items).
pub const IDENTIFICATION: u8 = 0x20;
// Begins a u8[4]: one byte for each timer (poison, haste, charge, protect).
pub const TIMERS: u8 = 0x24;
// Begins a u8[8].
pub const INVENTORY: u8 = 0x28;
// Same as player appearance.
pub const DOOR_APPEARANCE: u8 = 0x30;
pub const WALL_APPEARANCE: u8 = 0x31;
pub const FLOOR_COLOR: u8 = 0x32;
// u8 to add/subtract from depth when stairs are used; normally 1.
pub const STAIRS_DELTA: u8 = 0x33;
// u8 to add to timers each turn if non-zero; normally 0xff.
pub const TIMER_DELTA: u8 = 0x34;
// s8 to add to each damage roll; normally 0x00.
pub const DAMAGE_OFFSET: u8 = 0x35;
// ??? = 0x36;
// ??? = 0x37;
// The higher this is, the more text gets screwed up.
pub const TEXT_SYNC: u8 = 0x38;
// Player stats.
pub const PLAYER_HP: u8 = 0x39;
pub const PLAYER_TP: u8 = 0x3a;
pub const PLAYER_XLDEF: u8 = 0x3b; // hi-bits XL, lo-bits Def
pub const PLAYER_POSITION: u8 = 0x3c;
pub const PLAYER_DEPTH: u8 = 0x3d;
pub const METAL_ACID_RESISTANCE: u8 = 0x3e; // hi-bits Metal, lo-bits Acid
pub const FIRE_ELEC_RESISTANCE: u8 = 0x3f; // hi-bits Fire, lo-bits Elect
pub fn peek(world: &World, address: u8) -> u8 {
match address {
PLAYER_APPEARANCE =>
world.player_appearance_byte,
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize],
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &world.current_level()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => ((monster.kind as u8) << 4)
| ((monster.charged as u8) << 3)
| ((monster.vulnerable as u8) << 2)
| ((monster.venomous as u8) << 1)
| monster.corrupted as u8,
MONSTER_POSITION => monster.position.as_byte(),
MONSTER_HP => monster.hp,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
world.player.spell_memory.iter().enumerate()
.map(|(index, &known)| (known as u8) << index).sum(),
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
0,
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize],
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte,
DOOR_APPEARANCE =>
world.door_appearance_byte,
WALL_APPEARANCE =>
world.wall_appearance_byte,
FLOOR_COLOR =>
// TODO: remove floor color
0,
STAIRS_DELTA =>
world.player.stairs_delta,
TIMER_DELTA =>
world.player.timer_delta,
DAMAGE_OFFSET =>
unsafe { transmute(world.player.damage_offset) },
0x36 =>
// TODO: ???
0,
0x37 =>
// TODO: ???
0,
TEXT_SYNC =>
world.player.text_sync,
PLAYER_HP =>
world.player.hp,
PLAYER_TP =>
world.player.tp,
PLAYER_XLDEF =>
(world.player.xl << 4) | unsafe { transmute::<i8,u8>(world.player.def) },
PLAYER_POSITION =>
world.player.position.as_byte(),
PLAYER_DEPTH =>
world.player.depth,
METAL_ACID_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[0] << 4)
| (world.player.aptitude[1] & 0x0f))
},
FIRE_ELEC_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[2] << 4)
| (world.player.aptitude[3] & 0x0f))
},
_ => panic!("memory::peek - invalid address {}", address)
}
}
pub fn poke(world: &mut World, address: u8, value: u8) |
// pretend the low four bits of our u8 argument are an "i4" and sign-extend to i8
fn upcast_i4(the_i4: u8) -> i8 {
(unsafe { transmute::<u8, i8>(the_i4) } << 4) >> 4
}
fn report_player_appearance_change(world: &mut World, old: Sprite, new: Sprite) {
let new_color_name = util::color_name(new.color[0]);
let new_char_name = util::punctuation_name(new.character);
if old.character != new.character && old.color != new.color {
world.log.tell(format!("You turn into {} {}!", util::a_or_an(new_color_name), new_char_name));
} else if old.character != new.character {
world.log.tell(format!("You turn into {}!", util::a_or_an(new_char_name)));
} else if old.color != new.color {
world.log.tell(format!("You turn {}!", new_color_name));
}
}
const CP437: &'static [char; 256] = &[
' ', '☺', '☻', '♥', '♦', '♣', '♠', '•', '◘', '○', '◙', '♂', '♀', '♪', '♫', '☼',
'►', '◄', '↕', '‼', '¶', '§', '▬', '↨', '↑', '↓', '→', '←', '∟', '↔', '▲', '▼',
' ', '!', '"', '#', '$', '%', '&', '\'','(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\',']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '',
'Ç', 'ü', 'é', 'â', 'ä', 'à', 'å', 'ç', 'ê', 'ë', 'è', 'ï', 'î', 'ì', 'Ä', 'Å',
'É', 'æ', 'Æ', 'ô', 'ö', 'ò', 'û', 'ù', 'ÿ', 'Ö', 'Ü', '¢', '£', '¥', '₧', 'ƒ',
'á', 'í', 'ó', 'ú', 'ñ', 'Ñ', 'ª', 'º', '¿', '⌐', '¬', '½', '¼', '¡', '«', '»',
'░', '▒', '▓', '│', '┤', '╡', '╢', '╖', '╕', '╣', '║', '╗', '╝', '╜', '╛', '┐',
'└', '┴', '┬', '├', '─', '┼', '╞', '╟', '╚', '╔', '╩', '╦', '╠', '═', '╬', '╧',
'╨', '╤', '╥', '╙', '╘', '╒', '╓', '╫', '╪', '┘', '┌', '█', '▄', '▌', '▐', '▀',
'α', 'ß', 'Γ', 'π', 'Σ', 'σ', 'µ', 'τ', 'Φ', 'Θ', 'Ω', 'δ', '∞', 'φ', 'ε', '∩',
'≡', '±', '≥', '≤', '⌠', '⌡', '÷', '≈', '°', '∙', '·', '√', 'ⁿ', '²', '■', ' ',
];
pub fn player_name(world: &World) -> String {
let mut i: u8 = PLAYER_NAME;
let mut name = String::new();
while i < 0x40 {
let c = peek(world, i);
if c == 0 { break };
name.push(CP437[c as usize]);
i += 1;
}
return name;
}
| {
match address {
PLAYER_APPEARANCE => {
let old_sprite = Sprite::of_byte(world.player_appearance_byte, true);
let new_sprite = Sprite::of_byte(value, true);
report_player_appearance_change(world, old_sprite, new_sprite);
world.player_appearance_byte = value;
},
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize] = value,
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &mut world.current_level_mut()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => {
monster.kind = unsafe { transmute(value >> 4) };
monster.charged = value & 0b1000 != 0;
monster.vulnerable = value & 0b0100 != 0;
monster.venomous = value & 0b0010 != 0;
monster.corrupted = value & 0b0001 != 0;
},
MONSTER_POSITION => monster.position = Point::of_byte(value),
MONSTER_HP => monster.hp = value,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
for (index, known) in world.player.spell_memory.iter_mut().enumerate() {
*known = value & (1 << index) != 0
},
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
{},
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize] = value,
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte = value,
DOOR_APPEARANCE =>
world.door_appearance_byte = value,
WALL_APPEARANCE =>
world.wall_appearance_byte = value,
FLOOR_COLOR =>
// TODO: remove floor color
{},
STAIRS_DELTA =>
world.player.stairs_delta = value,
TIMER_DELTA =>
world.player.timer_delta = value,
DAMAGE_OFFSET =>
world.player.damage_offset = unsafe { transmute(value) },
0x36 =>
// TODO: ???
{},
0x37 =>
// TODO: ???
{},
TEXT_SYNC =>
world.player.text_sync = value,
PLAYER_HP =>
world.player.hp = value,
PLAYER_TP =>
world.player.tp = value,
PLAYER_XLDEF => {
world.player.xl = value >> 4;
world.player.def = upcast_i4(value)
},
PLAYER_POSITION =>
world.player.position = Point::of_byte(value),
PLAYER_DEPTH =>
world.player.depth = value,
METAL_ACID_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[0] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[1] = upcast_i4(value)
},
FIRE_ELEC_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[2] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[3] = upcast_i4(value)
},
_ => panic!("memory::poke - invalid address")
}
} | identifier_body |
client.rs | use crate::parser::*;
use crate::*;
use ::reqwest::blocking as reqwest;
use itertools::*;
use std::cell::RefCell;
use std::env;
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::ops::Range;
use std::time::SystemTime;
use std::sync::{Arc, Mutex};
use chrono::{Utc, Local, DateTime, NaiveDateTime};
lazy_static! {
static ref last_send: Arc<Mutex<Option<DateTime<Utc>>>> =
Arc::new(Mutex::new(None));
}
#[derive(Debug, Clone)]
pub struct Response {
pub stage: i32,
pub info: Info,
pub state: State,
}
fn response_to_json(x: &Response) -> String {
map_to_json(vec![
("stage", format!("{}", x.stage)),
("info", info_to_json(&x.info)),
("state", state_to_json(&x.state)),
])
}
#[derive(Debug, Clone)]
pub struct Info {
pub deadline: i32,
pub role: i32,
pub ability: Ability,
pub range: Range<i32>,
pub opponent_params: Params,
}
fn info_to_json(x: &Info) -> String {
map_to_json(vec![
("deadline", format!("{}", x.deadline)),
("role", format!("{}", x.role)),
("opponent_params", params_to_json(&x.opponent_params)),
])
}
#[derive(Debug, Clone)]
pub struct Ability {
pub potential: i32,
pub max_accelarate: i32,
pub max_heat: i32,
}
#[derive(Debug, Clone)]
pub struct State {
pub tick: i32,
pub range: Range<i32>, // 侵入可能エリアの x,y の絶対値の範囲
pub ships: Vec<Ship>,
}
fn state_to_json(x: &State) -> String {
let mut ships = Vec::new();
for s in &x.ships {
ships.push(ship_to_json(&s));
}
map_to_json(vec![
("tick", format!("{}", x.tick)),
("ships", format!("[{}]", ships.join(","))),
])
}
#[derive(Debug, Clone)]
pub struct Ship {
pub role: i32,
pub id: i32,
pub pos: (i32, i32),
pub v: (i32, i32),
pub status: Params,
pub heat: i32,
pub max_heat: i32,
pub max_accelarate: i32,
pub commands: Vec<Command>,
}
fn ship_to_json(x: &Ship) -> String {
let mut commands = Vec::new();
for c in &x.commands {
commands.push(command_to_json(&c));
}
map_to_json(vec![
("role", format!("{}", x.role)),
("x", format!("{}", x.pos.0)),
("y", format!("{}", x.pos.1)),
("vx", format!("{}", x.v.0)),
("vy", format!("{}", x.v.1)),
("status", params_to_json(&x.status)),
("heat", format!("{}", x.heat)),
("max_heat", format!("{}", x.max_heat)),
("max_accelarate", format!("{}", x.max_accelarate)),
("commands", format!("[{}]", commands.connect(","))),
])
}
#[derive(Debug, Clone)]
pub enum Command {
Accelerate(i32, (i32, i32)),
Detonate(i32, Option<(i32, i32)>), // 1, (impact, 32)
Shoot(i32, (i32, i32), i32, Option<(i32, i32)>), // 2, target, power, (impact, 4)
Split(i32, Params),
Unknown,
}
fn command_to_json(x: &Command) -> String {
match x {
Command::Accelerate(id, (x, y)) => format!(
"{{\"type\":\"accelerate\",\"id\":{},\"x\":{},\"y\":{}}}", id, x, y),
Command::Detonate(id, _) => format!(
"{{\"type\":\"detonate\",\"id\":{}}}", id),
Command::Shoot(id, (x, y), power, _) => format!(
"{{\"type\":\"shoot\",\"id\":{},\"x\":{},\"y\":{},\"power\":{}}}",
id, x, y, power),
Command::Split(id, params) => format!(
"{{\"type\":\"split\",\"id\":{},\"params\":{}}}",
id, params_to_json(¶ms)),
_ => format!("{{}}"),
}
}
#[derive(Debug, Clone)]
pub struct Params {
pub energy: i32,
pub power: i32,
pub cool: i32,
pub life: i32,
}
fn params_to_json(x: &Params) -> String {
format!("{{\"energy\":{},\"power\":{},\"cool\":{},\"life\":{}}}",
x.energy, x.power, x.cool, x.life)
}
fn map_to_json(m: Vec<(&str, String)>) -> String {
let mut kvs = Vec::new();
for kv in m {
kvs.push(format!("\"{}\":{}", kv.0, kv.1));
}
format!("{{{}}}", kvs.join(","))
}
impl std::fmt::Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Accelerate(id, v) => write!(f, "[0, {}, <{}, {}>]", id, v.0, v.1)?,
Command::Detonate(id, None) => write!(f, "[1, {}]", id)?,
Command::Detonate(id, Some((a, b))) => write!(f, "[1, {}, {}, {}]", id, a, b)?,
Command::Shoot(id, t, p, None) => write!(f, "[2, {}, <{}, {}>, {}]", id, t.0, t.1, p)?,
Command::Shoot(id, t, p, Some((a, b))) => {
write!(f, "[2, {}, <{}, {}>, {}, {}, {}]", id, t.0, t.1, p, a, b)?
}
Command::Split(id, params) => write!(
f,
"[3, {}, [{}, {}, {}, {}]]",
id, params.energy, params.power, params.cool, params.life
)?,
_ => {
panic!("unreachable");
}
}
Ok(())
}
}
impl From<&E> for Command {
fn from(e: &E) -> Command {
let e = get_list(e).unwrap();
match get_num(&e[0]) {
0 => Command::Accelerate(-1, get_pair(&e[1])),
1 => Command::Detonate(
-1,
if e.len() < 3 {
None
} else {
Some((get_num(&e[1]), get_num(&e[2])))
},
),
2 => Command::Shoot(
-1,
get_pair(&e[1]),
get_num(&e[2]),
if e.len() < 5 {
None
} else {
Some((get_num(&e[3]), get_num(&e[4])))
},
),
3 => {
let params = get_list(&e[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
Command::Split(
-1,
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
},
)
}
_ => Command::Unknown,
}
}
}
pub struct Client {
server_url: String,
player_key: String,
file: Option<RefCell<BufWriter<File>>>,
client: reqwest::Client,
}
impl Client {
pub fn new(server_url: String) -> Self {
let server_url = if server_url.contains("?apiKey") {
server_url
} else {
server_url + "/aliens/send"
};
Self {
server_url,
player_key: String::new(),
file: None,
client: reqwest::Client::new(),
}
}
pub fn gui(&self, name: &str, msg: &str) {
if let Ok(_) = env::var("JUDGE_SERVER") {
return;
}
let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(t) => t.as_nanos(),
_ => 0,
};
let msg = format!("###GUI\t{}\t{}\t{}\t{}\n", t, self.player_key, name, msg);
let mut printed = false;
if let Some(f) = &self.file {
f.borrow_mut()
.write_all(msg.as_bytes())
.expect("Failed to write to file");
printed = true;
}
if let Ok(_) = env::var("GUI") {
print!("{}", &msg);
} else if !printed {
print!("{}", &msg);
}
}
pub fn send(&self, msg: &str) -> E {
eprintln!("send: {}", msg);
let msg = to_text(&parse_lisp(msg).0);
let ss = msg.split_whitespace().collect::<Vec<_>>();
let (exp, n) = parser::parse(&ss, 0);
assert_eq!(n, ss.len());
let e = parser::eval(&exp, true);
let msg = modulation::modulate(&e);
// eprintln!("send: {}", msg);
if let Ok(mut guard) = last_send.lock() {
if let Some(t) = guard.clone() {
let duration = Utc::now() - t;
if duration.num_milliseconds() > 500 {
eprintln!("############################################################");
eprintln!("AI took too much CPU time! ({} ms)", duration.num_milliseconds());
eprintln!("############################################################");
}
eprintln!("AI took {} ms.", duration.num_milliseconds());
} else {
eprintln!("First send request.");
}
}
let resp = self
.client
.post(&self.server_url)
.body(msg)
.send()
.unwrap()
.text()
.unwrap();
if let Ok(mut guard) = last_send.lock() {
*guard = Some(Utc::now());
}
// eprintln!("resp: {}", resp);
let resp = modulation::demodulate(&resp);
eprintln!("resp: {}", &resp);
if let Some(state) = &resp.into_iter().skip(3).next() {
if let Some(ship_and_cmds) = state.into_iter().skip(2).next() {
for ship_and_cmd in ship_and_cmds {
eprintln!("ship: {}", &ship_and_cmd);
}
}
}
resp
}
pub fn join(&mut self, player_key: &str) -> Response {
self.player_key = player_key.to_owned();
if let Err(_) = env::var("JUDGE_SERVER") {
self.file = Some(RefCell::new(BufWriter::new(
File::create(&format!("out/{}", self.player_key)).expect("out directory is missing"),
)));
}
let resp = self.send(&format!("[2, {}, [192496425430, 103652820]]", player_key));
parse(resp)
}
pub fn start(&self, energy: i32, power: i32, cool: i32, life: i32) -> Response {
let resp = self.send(&form | Command]) -> Response {
let resp = self.send(&format!(
"[4, {}, [{}]]",
self.player_key,
cs.iter().join(", ")
));
let resp = parse(resp);
self.gui("RESP", &response_to_json(&resp));
return resp;
}
}
pub fn get_num(a: &E) -> i32 {
if let E::Num(a) = a {
*a as i32
} else {
panic!("not number");
}
}
pub fn get_pair(a: &E) -> (i32, i32) {
if let E::Pair(a, b) = a {
(get_num(a), get_num(b))
} else {
panic!("not pair");
}
}
pub fn parse(e: E) -> Response {
let a = get_list(&e).unwrap();
assert_eq!(a.len(), 4);
assert_eq!(get_num(&a[0]), 1);
let stage = get_num(&a[1]);
let info = get_list(&a[2]).unwrap();
let deadline = get_num(&info[0]);
let role = get_num(&info[1]);
let ability = get_list(&info[2])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let ability = Ability {
potential: ability[0],
max_heat: ability[1],
max_accelarate: ability[2],
};
let range = get_list(&info[3])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let range = range[0]..range[1];
let params = get_list(&info[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let opponent_params = if params.len() != 4 {
Params {
energy: -1,
power: -1,
cool: -1,
life: -1,
}
} else {
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
}
};
let state = get_list(&a[3]).unwrap();
let (tick, strange, ships) = if state.len() > 0 {
let tick = get_num(&state[0]);
let strange = get_list(&state[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<i32>>();
let strange = strange[0]..strange[1];
let ships = get_list(&state[2])
.unwrap()
.into_iter()
.map(|a| {
let tmp = get_list(&a).unwrap();
let s = get_list(&tmp[0]).unwrap();
let commands = get_list(&tmp[1]).unwrap();
let role = get_num(&s[0]);
let id = get_num(&s[1]); // shipId
let pos = get_pair(&s[2]);
let v = get_pair(&s[3]);
let status = get_list(&s[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let status = Params {
energy: status[0],
power: status[1],
cool: status[2],
life: status[3],
};
let heat = get_num(&s[5]);
let max_heat = get_num(&s[6]);
let max_accelarate = get_num(&s[7]);
// [1, 1, [256, 1, [448, 2, 128], [16, 128], []], [1, [16, 128], [[[1, 0, <34, -46>, <0, 2>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]], [[0, 1, <-34, 48>, <0, 0>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]]]]]
// [src/bin/app.rs:177] &commands = [
// Pair(
// Num(
// 0,
// ),
// Pair(
// Pair(
// Num(
// 0,
// ),
// Num(
// -1,
// ),
// ),
// Nil,
// ),
// ),
// ]
let commands = commands.into_iter().map(|e| e.as_ref().into()).collect();
Ship {
role,
id,
pos,
v,
status,
heat,
max_heat,
max_accelarate,
commands,
}
})
.collect();
(tick, strange, ships)
} else {
(0, 0..0, vec![])
};
Response {
stage,
info: Info {
deadline,
role,
ability,
range,
opponent_params,
},
state: State {
tick,
range: strange,
ships,
},
}
}
| at!(
"[3, {}, [{}, {}, {}, {}]]",
self.player_key, energy, power, cool, life
));
parse(resp)
}
pub fn command(&self, cs: &[ | identifier_body |
client.rs | use crate::parser::*;
use crate::*;
use ::reqwest::blocking as reqwest;
use itertools::*;
use std::cell::RefCell;
use std::env;
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::ops::Range;
use std::time::SystemTime;
use std::sync::{Arc, Mutex};
use chrono::{Utc, Local, DateTime, NaiveDateTime};
lazy_static! {
static ref last_send: Arc<Mutex<Option<DateTime<Utc>>>> =
Arc::new(Mutex::new(None));
}
#[derive(Debug, Clone)]
pub struct Response {
pub stage: i32,
pub info: Info,
pub state: State,
}
fn response_to_json(x: &Response) -> String {
map_to_json(vec![
("stage", format!("{}", x.stage)),
("info", info_to_json(&x.info)),
("state", state_to_json(&x.state)),
])
}
#[derive(Debug, Clone)]
pub struct Info {
pub deadline: i32,
pub role: i32,
pub ability: Ability,
pub range: Range<i32>,
pub opponent_params: Params,
}
fn info_to_json(x: &Info) -> String {
map_to_json(vec![
("deadline", format!("{}", x.deadline)),
("role", format!("{}", x.role)),
("opponent_params", params_to_json(&x.opponent_params)),
])
}
#[derive(Debug, Clone)]
pub struct Ability {
pub potential: i32,
pub max_accelarate: i32,
pub max_heat: i32,
}
#[derive(Debug, Clone)]
pub struct State {
pub tick: i32,
pub range: Range<i32>, // 侵入可能エリアの x,y の絶対値の範囲
pub ships: Vec<Ship>,
}
fn state_to_json(x: &State) -> String {
let mut ships = Vec::new();
for s in &x.ships {
ships.push(ship_to_json(&s));
}
map_to_json(vec![
("tick", format!("{}", x.tick)),
("ships", format!("[{}]", ships.join(","))),
])
}
#[derive(Debug, Clone)]
pub struct Ship {
pub role: i32,
pub id: i32,
pub pos: (i32, i32),
pub v: (i32, i32),
pub status: Params,
pub heat: i32,
pub max_heat: i32,
pub max_accelarate: i32,
pub commands: Vec<Command>,
}
fn ship_to_json(x: &Ship) -> String {
let mut commands = Vec::new();
for c in &x.commands {
commands.push(command_to_json(&c));
}
map_to_json(vec![
("role", format!("{}", x.role)),
("x", format!("{}", x.pos.0)),
("y", format!("{}", x.pos.1)),
("vx", format!("{}", x.v.0)),
("vy", format!("{}", x.v.1)),
("status", params_to_json(&x.status)),
("heat", format!("{}", x.heat)),
("max_heat", format!("{}", x.max_heat)),
("max_accelarate", format!("{}", x.max_accelarate)),
("commands", format!("[{}]", commands.connect(","))),
])
}
#[derive(Debug, Clone)]
pub enum Command {
Accelerate(i32, (i32, i32)),
Detonate(i32, Option<(i32, i32)>), // 1, (impact, 32)
Shoot(i32, (i32, i32), i32, Option<(i32, i32)>), // 2, target, power, (impact, 4)
Split(i32, Params),
Unknown,
}
fn command_to_json(x: &Command) -> String {
match x {
Command::Accelerate(id, (x, y)) => format!(
"{{\"type\":\"accelerate\",\"id\":{},\"x\":{},\"y\":{}}}", id, x, y),
Command::Detonate(id, _) => format!(
"{{\"type\":\"detonate\",\"id\":{}}}", id),
Command::Shoot(id, (x, y), power, _) => format!(
"{{\"type\":\"shoot\",\"id\":{},\"x\":{},\"y\":{},\"power\":{}}}",
id, x, y, power),
Command::Split(id, params) => format!(
"{{\"type\":\"split\",\"id\":{},\"params\":{}}}",
id, params_to_json(¶ms)),
_ => format!("{{}}"),
}
}
#[derive(Debug, Clone)]
pub struct Params {
pub energy: i32,
pub power: i32,
pub cool: i32,
pub life: i32,
}
fn params_to_json(x: &Params) -> String {
format!("{{\"energy\":{},\"power\":{},\"cool\":{},\"life\":{}}}",
x.energy, x.power, x.cool, x.life)
}
fn map_to_json(m: Vec<(&str, String)>) -> String {
let mut kvs = Vec::new();
for kv in m {
kvs.push(format!("\"{}\":{}", kv.0, kv.1));
}
format!("{{{}}}", kvs.join(","))
}
impl std::fmt::Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Accelerate(id, v) => write!(f, "[0, {}, <{}, {}>]", id, v.0, v.1)?,
Command::Detonate(id, None) => write!(f, "[1, {}]", id)?,
Command::Detonate(id, Some((a, b))) => write!(f, "[1, {}, {}, {}]", id, a, b)?,
Command::Shoot(id, t, p, None) => write!(f, "[2, {}, <{}, {}>, {}]", id, t.0, t.1, p)?,
Command::Shoot(id, t, p, Some((a, b))) => {
write!(f, "[2, {}, <{}, {}>, {}, {}, {}]", id, t.0, t.1, p, a, b)?
}
Command::Split(id, params) => write!(
f,
"[3, {}, [{}, {}, {}, {}]]",
id, params.energy, params.power, params.cool, params.life
)?,
_ => {
panic!("unreachable");
}
}
Ok(())
}
}
impl From<&E> for Command {
fn from(e: &E) -> Command {
let e = get_list(e).unwrap();
match get_num(&e[0]) {
0 => Command::Accelerate(-1, get_pair(&e[1])),
1 => Command::Detonate(
-1,
if e.len() < 3 {
None
} else {
Some((get_num(&e[1]), get_num(&e[2])))
},
),
2 => Command::Shoot(
-1,
get_pair(&e[1]),
get_num(&e[2]),
if e.len() < 5 {
None
} else {
Some((get_num(&e[3]), get_num(&e[4])))
},
),
3 => {
let params = get_list(&e[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
Command::Split(
-1,
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
},
)
}
_ => Command::Unknown,
}
}
}
pub struct Client {
server_url: String,
player_key: String,
file: Option<RefCell<BufWriter<File>>>,
client: reqwest::Client,
}
impl Client {
pub fn new(server_url: String) -> Self {
let server_url = if server_url.contains("?apiKey") {
server_url
} else {
server_url + "/aliens/send"
};
Self {
server_url,
player_key: String::new(),
file: None,
client: reqwest::Client::new(),
}
}
pub fn gui(&self, name: &str, msg: &s | {
if let Ok(_) = env::var("JUDGE_SERVER") {
return;
}
let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(t) => t.as_nanos(),
_ => 0,
};
let msg = format!("###GUI\t{}\t{}\t{}\t{}\n", t, self.player_key, name, msg);
let mut printed = false;
if let Some(f) = &self.file {
f.borrow_mut()
.write_all(msg.as_bytes())
.expect("Failed to write to file");
printed = true;
}
if let Ok(_) = env::var("GUI") {
print!("{}", &msg);
} else if !printed {
print!("{}", &msg);
}
}
pub fn send(&self, msg: &str) -> E {
eprintln!("send: {}", msg);
let msg = to_text(&parse_lisp(msg).0);
let ss = msg.split_whitespace().collect::<Vec<_>>();
let (exp, n) = parser::parse(&ss, 0);
assert_eq!(n, ss.len());
let e = parser::eval(&exp, true);
let msg = modulation::modulate(&e);
// eprintln!("send: {}", msg);
if let Ok(mut guard) = last_send.lock() {
if let Some(t) = guard.clone() {
let duration = Utc::now() - t;
if duration.num_milliseconds() > 500 {
eprintln!("############################################################");
eprintln!("AI took too much CPU time! ({} ms)", duration.num_milliseconds());
eprintln!("############################################################");
}
eprintln!("AI took {} ms.", duration.num_milliseconds());
} else {
eprintln!("First send request.");
}
}
let resp = self
.client
.post(&self.server_url)
.body(msg)
.send()
.unwrap()
.text()
.unwrap();
if let Ok(mut guard) = last_send.lock() {
*guard = Some(Utc::now());
}
// eprintln!("resp: {}", resp);
let resp = modulation::demodulate(&resp);
eprintln!("resp: {}", &resp);
if let Some(state) = &resp.into_iter().skip(3).next() {
if let Some(ship_and_cmds) = state.into_iter().skip(2).next() {
for ship_and_cmd in ship_and_cmds {
eprintln!("ship: {}", &ship_and_cmd);
}
}
}
resp
}
pub fn join(&mut self, player_key: &str) -> Response {
self.player_key = player_key.to_owned();
if let Err(_) = env::var("JUDGE_SERVER") {
self.file = Some(RefCell::new(BufWriter::new(
File::create(&format!("out/{}", self.player_key)).expect("out directory is missing"),
)));
}
let resp = self.send(&format!("[2, {}, [192496425430, 103652820]]", player_key));
parse(resp)
}
pub fn start(&self, energy: i32, power: i32, cool: i32, life: i32) -> Response {
let resp = self.send(&format!(
"[3, {}, [{}, {}, {}, {}]]",
self.player_key, energy, power, cool, life
));
parse(resp)
}
pub fn command(&self, cs: &[Command]) -> Response {
let resp = self.send(&format!(
"[4, {}, [{}]]",
self.player_key,
cs.iter().join(", ")
));
let resp = parse(resp);
self.gui("RESP", &response_to_json(&resp));
return resp;
}
}
pub fn get_num(a: &E) -> i32 {
if let E::Num(a) = a {
*a as i32
} else {
panic!("not number");
}
}
pub fn get_pair(a: &E) -> (i32, i32) {
if let E::Pair(a, b) = a {
(get_num(a), get_num(b))
} else {
panic!("not pair");
}
}
pub fn parse(e: E) -> Response {
let a = get_list(&e).unwrap();
assert_eq!(a.len(), 4);
assert_eq!(get_num(&a[0]), 1);
let stage = get_num(&a[1]);
let info = get_list(&a[2]).unwrap();
let deadline = get_num(&info[0]);
let role = get_num(&info[1]);
let ability = get_list(&info[2])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let ability = Ability {
potential: ability[0],
max_heat: ability[1],
max_accelarate: ability[2],
};
let range = get_list(&info[3])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let range = range[0]..range[1];
let params = get_list(&info[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let opponent_params = if params.len() != 4 {
Params {
energy: -1,
power: -1,
cool: -1,
life: -1,
}
} else {
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
}
};
let state = get_list(&a[3]).unwrap();
let (tick, strange, ships) = if state.len() > 0 {
let tick = get_num(&state[0]);
let strange = get_list(&state[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<i32>>();
let strange = strange[0]..strange[1];
let ships = get_list(&state[2])
.unwrap()
.into_iter()
.map(|a| {
let tmp = get_list(&a).unwrap();
let s = get_list(&tmp[0]).unwrap();
let commands = get_list(&tmp[1]).unwrap();
let role = get_num(&s[0]);
let id = get_num(&s[1]); // shipId
let pos = get_pair(&s[2]);
let v = get_pair(&s[3]);
let status = get_list(&s[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let status = Params {
energy: status[0],
power: status[1],
cool: status[2],
life: status[3],
};
let heat = get_num(&s[5]);
let max_heat = get_num(&s[6]);
let max_accelarate = get_num(&s[7]);
// [1, 1, [256, 1, [448, 2, 128], [16, 128], []], [1, [16, 128], [[[1, 0, <34, -46>, <0, 2>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]], [[0, 1, <-34, 48>, <0, 0>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]]]]]
// [src/bin/app.rs:177] &commands = [
// Pair(
// Num(
// 0,
// ),
// Pair(
// Pair(
// Num(
// 0,
// ),
// Num(
// -1,
// ),
// ),
// Nil,
// ),
// ),
// ]
let commands = commands.into_iter().map(|e| e.as_ref().into()).collect();
Ship {
role,
id,
pos,
v,
status,
heat,
max_heat,
max_accelarate,
commands,
}
})
.collect();
(tick, strange, ships)
} else {
(0, 0..0, vec![])
};
Response {
stage,
info: Info {
deadline,
role,
ability,
range,
opponent_params,
},
state: State {
tick,
range: strange,
ships,
},
}
}
| tr) | identifier_name |
client.rs | use crate::parser::*;
use crate::*;
use ::reqwest::blocking as reqwest;
use itertools::*;
use std::cell::RefCell;
use std::env;
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::ops::Range;
use std::time::SystemTime;
use std::sync::{Arc, Mutex};
use chrono::{Utc, Local, DateTime, NaiveDateTime};
lazy_static! {
static ref last_send: Arc<Mutex<Option<DateTime<Utc>>>> =
Arc::new(Mutex::new(None));
}
#[derive(Debug, Clone)]
pub struct Response {
pub stage: i32,
pub info: Info,
pub state: State,
}
fn response_to_json(x: &Response) -> String {
map_to_json(vec![
("stage", format!("{}", x.stage)),
("info", info_to_json(&x.info)),
("state", state_to_json(&x.state)),
])
}
#[derive(Debug, Clone)]
pub struct Info {
pub deadline: i32,
pub role: i32,
pub ability: Ability,
pub range: Range<i32>,
pub opponent_params: Params,
}
fn info_to_json(x: &Info) -> String {
map_to_json(vec![
("deadline", format!("{}", x.deadline)),
("role", format!("{}", x.role)),
("opponent_params", params_to_json(&x.opponent_params)),
])
}
#[derive(Debug, Clone)]
pub struct Ability {
pub potential: i32,
pub max_accelarate: i32,
pub max_heat: i32,
}
#[derive(Debug, Clone)]
pub struct State {
pub tick: i32,
pub range: Range<i32>, // 侵入可能エリアの x,y の絶対値の範囲
pub ships: Vec<Ship>,
}
fn state_to_json(x: &State) -> String {
let mut ships = Vec::new();
for s in &x.ships {
ships.push(ship_to_json(&s));
}
map_to_json(vec![
("tick", format!("{}", x.tick)),
("ships", format!("[{}]", ships.join(","))),
])
}
#[derive(Debug, Clone)]
pub struct Ship {
pub role: i32,
pub id: i32,
pub pos: (i32, i32),
pub v: (i32, i32),
pub status: Params,
pub heat: i32,
pub max_heat: i32,
pub max_accelarate: i32,
pub commands: Vec<Command>,
}
fn ship_to_json(x: &Ship) -> String {
let mut commands = Vec::new();
for c in &x.commands {
commands.push(command_to_json(&c));
}
map_to_json(vec![
("role", format!("{}", x.role)),
("x", format!("{}", x.pos.0)),
("y", format!("{}", x.pos.1)),
("vx", format!("{}", x.v.0)),
("vy", format!("{}", x.v.1)),
("status", params_to_json(&x.status)),
("heat", format!("{}", x.heat)),
("max_heat", format!("{}", x.max_heat)),
("max_accelarate", format!("{}", x.max_accelarate)),
("commands", format!("[{}]", commands.connect(","))),
])
}
#[derive(Debug, Clone)]
pub enum Command {
Accelerate(i32, (i32, i32)),
Detonate(i32, Option<(i32, i32)>), // 1, (impact, 32)
Shoot(i32, (i32, i32), i32, Option<(i32, i32)>), // 2, target, power, (impact, 4)
Split(i32, Params),
Unknown,
}
fn command_to_json(x: &Command) -> String {
match x {
Command::Accelerate(id, (x, y)) => format!(
"{{\"type\":\"accelerate\",\"id\":{},\"x\":{},\"y\":{}}}", id, x, y),
Command::Detonate(id, _) => format!(
"{{\"type\":\"detonate\",\"id\":{}}}", id),
Command::Shoot(id, (x, y), power, _) => format!(
"{{\"type\":\"shoot\",\"id\":{},\"x\":{},\"y\":{},\"power\":{}}}",
id, x, y, power),
Command::Split(id, params) => format!(
"{{\"type\":\"split\",\"id\":{},\"params\":{}}}",
id, params_to_json(¶ms)),
_ => format!("{{}}"),
}
}
#[derive(Debug, Clone)]
pub struct Params {
pub energy: i32,
pub power: i32,
pub cool: i32,
pub life: i32,
}
fn params_to_json(x: &Params) -> String {
format!("{{\"energy\":{},\"power\":{},\"cool\":{},\"life\":{}}}",
x.energy, x.power, x.cool, x.life)
}
fn map_to_json(m: Vec<(&str, String)>) -> String {
let mut kvs = Vec::new();
for kv in m {
kvs.push(format!("\"{}\":{}", kv.0, kv.1));
}
format!("{{{}}}", kvs.join(","))
}
impl std::fmt::Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Accelerate(id, v) => write!(f, "[0, {}, <{}, {}>]", id, v.0, v.1)?,
Command::Detonate(id, None) => write!(f, "[1, {}]", id)?,
Command::Detonate(id, Some((a, b))) => write!(f, "[1, {}, {}, {}]", id, a, b)?,
Command::Shoot(id, t, p, None) => write!(f, "[2, {}, <{}, {}>, {}]", id, t.0, t.1, p)?,
Command::Shoot(id, t, p, Some((a, b))) => {
write!(f, "[2, {}, <{}, {}>, {}, {}, {}]", id, t.0, t.1, p, a, b)?
}
Command::Split(id, params) => write!(
f,
"[3, {}, [{}, {}, {}, {}]]",
id, params.energy, params.power, params.cool, params.life
)?,
_ => {
panic!("unreachable");
}
}
Ok(())
}
}
impl From<&E> for Command {
fn from(e: &E) -> Command {
let e = get_list(e).unwrap();
match get_num(&e[0]) {
0 => Command::Accelerate(-1, get_pair(&e[1])),
1 => Command::Detonate(
-1,
if e.len() < 3 {
None
} else {
Some((get_num(&e[1]), get_num(&e[2])))
},
),
2 => Command::Shoot(
-1,
get_pair(&e[1]),
get_num(&e[2]),
if e.len() < 5 {
None
} else {
Some((get_num(&e[3]), get_num(&e[4])))
},
),
3 => {
let params = get_list(&e[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
Command::Split(
-1,
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
},
)
}
_ => Command::Unknown,
}
}
}
pub struct Client {
server_url: String,
player_key: String,
file: Option<RefCell<BufWriter<File>>>,
client: reqwest::Client,
}
impl Client {
pub fn new(server_url: String) -> Self {
let server_url = if server_url.contains("?apiKey") {
server_url
} else {
server_url + "/aliens/send"
};
Self {
server_url,
player_key: String::new(),
file: None,
client: reqwest::Client::new(),
}
}
pub fn gui(&self, name: &str, msg: &str) {
if let Ok(_) = env::var("JUDGE_SERVER") {
return;
}
let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(t) => t.as_nanos(),
_ => 0,
};
let msg = format!("###GUI\t{}\t{}\t{}\t{}\n", t, self.player_key, name, msg);
let mut printed = false;
if let Some(f) = &self.file {
f.borrow_mut()
.write_all(msg.as_bytes())
.expect("Failed to write to file");
printed = true;
}
if let Ok(_) = env::var("GUI") {
print!("{}", &msg);
} else if !printed {
print!("{}", &msg);
}
}
pub fn send(&self, msg: &str) -> E {
eprintln!("send: {}", msg);
let msg = to_text(&parse_lisp(msg).0);
let ss = msg.split_whitespace().collect::<Vec<_>>();
let (exp, n) = parser::parse(&ss, 0);
assert_eq!(n, ss.len());
let e = parser::eval(&exp, true);
let msg = modulation::modulate(&e);
// eprintln!("send: {}", msg);
if let Ok(mut guard) = last_send.lock() {
if let Some(t) = guard.clone() {
let duration = Utc::now() - t;
if duration.num_milliseconds() > 500 {
eprintln!("############################################################");
eprintln!("AI took too much CPU time! ({} ms)", duration.num_milliseconds());
eprintln!("############################################################");
}
eprintln!("AI took {} ms.", duration.num_milliseconds());
} else {
eprintln!("First send request.");
}
}
let resp = self
.client
.post(&self.server_url)
.body(msg)
.send()
.unwrap()
.text()
.unwrap();
if let Ok(mut guard) = last_send.lock() {
*guard = Some(Utc::now());
}
// eprintln!("resp: {}", resp);
let resp = modulation::demodulate(&resp);
eprintln!("resp: {}", &resp);
if let Some(state) = &resp.into_iter().skip(3).next() {
if let Some(ship_and_cmds) = state.into_iter().skip(2).next() {
for ship_and_cmd in ship_and_cmds {
eprintln!("ship: {}", &ship_and_cmd);
}
}
}
resp
}
pub fn join(&mut self, player_key: &str) -> Response {
self.player_key = player_key.to_owned();
if let Err(_) = env::var("JUDGE_SERVER") {
self.file = Some(RefCell::new(BufWriter::new(
File::create(&format!("out/{}", self.player_key)).expect("out directory is missing"),
)));
}
let resp = self.send(&format!("[2, {}, [192496425430, 103652820]]", player_key));
parse(resp)
}
pub fn start(&self, energy: i32, power: i32, cool: i32, life: i32) -> Response {
let resp = self.send(&format!(
"[3, {}, [{}, {}, {}, {}]]",
self.player_key, energy, power, cool, life
));
parse(resp)
}
pub fn command(&self, cs: &[Command]) -> Response {
let resp = self.send(&format!(
"[4, {}, [{}]]",
self.player_key,
cs.iter().join(", ")
));
let resp = parse(resp);
self.gui("RESP", &response_to_json(&resp));
return resp;
}
}
pub fn get_num(a: &E) -> i32 {
if let E::Num(a) = a {
*a as i32
} else {
panic!("not number");
}
}
pub fn get_pair(a: &E) -> (i32, i32) {
if let E::Pair(a, b) = a {
(get_num(a), get_num(b))
} else {
panic!("not pair");
}
}
pub fn parse(e: E) -> Response {
let a = get_list(&e).unwrap();
assert_eq!(a.len(), 4);
assert_eq!(get_num(&a[0]), 1);
let stage = get_num(&a[1]);
let info = get_list(&a[2]).unwrap();
let deadline = get_num(&info[0]);
let role = get_num(&info[1]);
let ability = get_list(&info[2])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let ability = Ability {
potential: ability[0],
max_heat: ability[1],
max_accelarate: ability[2],
};
let range = get_list(&info[3])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let range = range[0]..range[1];
let params = get_list(&info[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let opponent_params = if params.len() != 4 {
Params {
energy: -1,
power: -1,
cool: -1,
life: -1,
}
} else {
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
}
};
let state = get_list(&a[3]).unwrap();
let (tick, strange, ships) = if state.len() > 0 {
let tick = get_num(&state[0]);
let strange = get_list(&state[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<i32>>();
let strange = strange[0]..strange[1];
let ships = get_list(&state[2])
.unwrap()
.into_iter()
.map(|a| {
let tmp = get_list(&a).unwrap();
let s = get_list(&tmp[0]).unwrap();
let commands = get_list(&tmp[1]).unwrap();
let role = get_num(&s[0]);
let id = get_num(&s[1]); // shipId
let pos = get_pair(&s[2]);
let v = get_pair(&s[3]);
let status = get_list(&s[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let status = Params {
energy: status[0],
power: status[1],
cool: status[2],
life: status[3],
};
let heat = get_num(&s[5]);
let max_heat = get_num(&s[6]); | // [src/bin/app.rs:177] &commands = [
// Pair(
// Num(
// 0,
// ),
// Pair(
// Pair(
// Num(
// 0,
// ),
// Num(
// -1,
// ),
// ),
// Nil,
// ),
// ),
// ]
let commands = commands.into_iter().map(|e| e.as_ref().into()).collect();
Ship {
role,
id,
pos,
v,
status,
heat,
max_heat,
max_accelarate,
commands,
}
})
.collect();
(tick, strange, ships)
} else {
(0, 0..0, vec![])
};
Response {
stage,
info: Info {
deadline,
role,
ability,
range,
opponent_params,
},
state: State {
tick,
range: strange,
ships,
},
}
} | let max_accelarate = get_num(&s[7]);
// [1, 1, [256, 1, [448, 2, 128], [16, 128], []], [1, [16, 128], [[[1, 0, <34, -46>, <0, 2>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]], [[0, 1, <-34, 48>, <0, 0>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]]]]] | random_line_split |
xds.go | /*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package xds provides a transport credentials implementation where the
// security configuration is pushed by a management server using xDS APIs.
//
// Experimental
//
// Notice: All APIs in this package are EXPERIMENTAL and may be removed in a
// later release.
package xds
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"sync"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal"
credinternal "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/resolver"
)
func init() {
internal.GetXDSHandshakeInfoForTesting = getHandshakeInfo
}
// ClientOptions contains parameters to configure a new client-side xDS
// credentials implementation.
type ClientOptions struct {
// FallbackCreds specifies the fallback credentials to be used when either
// the `xds` scheme is not used in the user's dial target or when the xDS
// server does not return any security configuration. Attempts to create
// client credentials without a fallback credentials will fail.
FallbackCreds credentials.TransportCredentials
}
// NewClientCredentials returns a new client-side transport credentials
// implementation which uses xDS APIs to fetch its security configuration.
func | (opts ClientOptions) (credentials.TransportCredentials, error) {
if opts.FallbackCreds == nil {
return nil, errors.New("missing fallback credentials")
}
return &credsImpl{
isClient: true,
fallback: opts.FallbackCreds,
}, nil
}
// credsImpl is an implementation of the credentials.TransportCredentials
// interface which uses xDS APIs to fetch its security configuration.
type credsImpl struct {
isClient bool
fallback credentials.TransportCredentials
}
// handshakeAttrKey is the type used as the key to store HandshakeInfo in
// the Attributes field of resolver.Address.
type handshakeAttrKey struct{}
// SetHandshakeInfo returns a copy of addr in which the Attributes field is
// updated with hInfo.
func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address {
addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo)
return addr
}
// getHandshakeInfo returns a pointer to the HandshakeInfo stored in attr.
func getHandshakeInfo(attr *attributes.Attributes) *HandshakeInfo {
v := attr.Value(handshakeAttrKey{})
hi, _ := v.(*HandshakeInfo)
return hi
}
// HandshakeInfo wraps all the security configuration required by client and
// server handshake methods in credsImpl. The xDS implementation will be
// responsible for populating these fields.
//
// Safe for concurrent access.
type HandshakeInfo struct {
mu sync.Mutex
rootProvider certprovider.Provider
identityProvider certprovider.Provider
acceptedSANs map[string]bool // Only on the client side.
}
// SetRootCertProvider updates the root certificate provider.
func (hi *HandshakeInfo) SetRootCertProvider(root certprovider.Provider) {
hi.mu.Lock()
hi.rootProvider = root
hi.mu.Unlock()
}
// SetIdentityCertProvider updates the identity certificate provider.
func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) {
hi.mu.Lock()
hi.identityProvider = identity
hi.mu.Unlock()
}
// SetAcceptedSANs updates the list of accepted SANs.
func (hi *HandshakeInfo) SetAcceptedSANs(sans []string) {
hi.mu.Lock()
hi.acceptedSANs = make(map[string]bool, len(sans))
for _, san := range sans {
hi.acceptedSANs[san] = true
}
hi.mu.Unlock()
}
// UseFallbackCreds returns true when fallback credentials are to be used based
// on the contents of the HandshakeInfo.
func (hi *HandshakeInfo) UseFallbackCreds() bool {
if hi == nil {
return true
}
hi.mu.Lock()
defer hi.mu.Unlock()
return hi.identityProvider == nil && hi.rootProvider == nil
}
func (hi *HandshakeInfo) validate(isClient bool) error {
hi.mu.Lock()
defer hi.mu.Unlock()
// On the client side, rootProvider is mandatory. IdentityProvider is
// optional based on whether the client is doing TLS or mTLS.
if isClient && hi.rootProvider == nil {
return errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
// On the server side, identityProvider is mandatory. RootProvider is
// optional based on whether the server is doing TLS or mTLS.
if !isClient && hi.identityProvider == nil {
return errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
return nil
}
func (hi *HandshakeInfo) makeTLSConfig(ctx context.Context) (*tls.Config, error) {
hi.mu.Lock()
// Since the call to KeyMaterial() can block, we read the providers under
// the lock but call the actual function after releasing the lock.
rootProv, idProv := hi.rootProvider, hi.identityProvider
hi.mu.Unlock()
// InsecureSkipVerify needs to be set to true because we need to perform
// custom verification to check the SAN on the received certificate.
// Currently the Go stdlib does complete verification of the cert (which
// includes hostname verification) or none. We are forced to go with the
// latter and perform the normal cert validation ourselves.
cfg := &tls.Config{InsecureSkipVerify: true}
if rootProv != nil {
km, err := rootProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err)
}
cfg.RootCAs = km.Roots
}
if idProv != nil {
km, err := idProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err)
}
cfg.Certificates = km.Certs
}
return cfg, nil
}
func (hi *HandshakeInfo) matchingSANExists(cert *x509.Certificate) bool {
if len(hi.acceptedSANs) == 0 {
// An empty list of acceptedSANs means "accept everything".
return true
}
var sans []string
// SANs can be specified in any of these four fields on the parsed cert.
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
sans = append(sans, ip.String())
}
for _, uri := range cert.URIs {
sans = append(sans, uri.String())
}
hi.mu.Lock()
defer hi.mu.Unlock()
for _, san := range sans {
if hi.acceptedSANs[san] {
return true
}
}
return false
}
// NewHandshakeInfo returns a new instance of HandshakeInfo with the given root
// and identity certificate providers.
func NewHandshakeInfo(root, identity certprovider.Provider, sans ...string) *HandshakeInfo {
acceptedSANs := make(map[string]bool, len(sans))
for _, san := range sans {
acceptedSANs[san] = true
}
return &HandshakeInfo{
rootProvider: root,
identityProvider: identity,
acceptedSANs: acceptedSANs,
}
}
// ClientHandshake performs the TLS handshake on the client-side.
//
// It looks for the presence of a HandshakeInfo value in the passed in context
// (added using a call to NewContextWithHandshakeInfo()), and retrieves identity
// and root certificates from there. It also retrieves a list of acceptable SANs
// and uses a custom verification function to validate the certificate presented
// by the peer. It uses fallback credentials if no HandshakeInfo is present in
// the passed in context.
func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if !c.isClient {
return nil, nil, errors.New("ClientHandshake() is not supported for server credentials")
}
// The CDS balancer constructs a new HandshakeInfo using a call to
// NewHandshakeInfo(), and then adds it to the attributes field of the
// resolver.Address when handling calls to NewSubConn(). The transport layer
// takes care of shipping these attributes in the context to this handshake
// function. We first read the credentials.ClientHandshakeInfo type from the
// context, which contains the attributes added by the CDS balancer. We then
// read the HandshakeInfo from the attributes to get to the actual data that
// we need here for the handshake.
chi := credentials.ClientHandshakeInfoFromContext(ctx)
// If there are no attributes in the received context or the attributes does
// not contain a HandshakeInfo, it could either mean that the user did not
// specify an `xds` scheme in their dial target or that the xDS server did
// not provide any security configuration. In both of these cases, we use
// the fallback credentials specified by the user.
if chi.Attributes == nil {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
hi := getHandshakeInfo(chi.Attributes)
if hi.UseFallbackCreds() {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
if err := hi.validate(c.isClient); err != nil {
return nil, nil, err
}
// We build the tls.Config with the following values
// 1. Root certificate as returned by the root provider.
// 2. Identity certificate as returned by the identity provider. This may be
// empty on the client side, if the client is not doing mTLS.
// 3. InsecureSkipVerify to true. Certificates used in Mesh environments
// usually contains the identity of the workload presenting the
// certificate as a SAN (instead of a hostname in the CommonName field).
// This means that normal certificate verification as done by the
// standard library will fail.
// 4. Key usage to match whether client/server usage.
// 5. A `VerifyPeerCertificate` function which performs normal peer
// cert verification using configured roots, and the custom SAN checks.
cfg, err := hi.makeTLSConfig(ctx)
if err != nil {
return nil, nil, err
}
cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
// Parse all raw certificates presented by the peer.
var certs []*x509.Certificate
for _, rc := range rawCerts {
cert, err := x509.ParseCertificate(rc)
if err != nil {
return err
}
certs = append(certs, cert)
}
// Build the intermediates list and verify that the leaf certificate
// is signed by one of the root certificates.
intermediates := x509.NewCertPool()
for _, cert := range certs[1:] {
intermediates.AddCert(cert)
}
opts := x509.VerifyOptions{
Roots: cfg.RootCAs,
Intermediates: intermediates,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
if _, err := certs[0].Verify(opts); err != nil {
return err
}
// The SANs sent by the MeshCA are encoded as SPIFFE IDs. We need to
// only look at the SANs on the leaf cert.
if !hi.matchingSANExists(certs[0]) {
return fmt.Errorf("SANs received in leaf certificate %+v does not match any of the accepted SANs", certs[0])
}
return nil
}
// Perform the TLS handshake with the tls.Config that we have. We run the
// actual Handshake() function in a goroutine because we need to respect the
// deadline specified on the passed in context, and we need a way to cancel
// the handshake if the context is cancelled.
conn := tls.Client(rawConn, cfg)
errCh := make(chan error, 1)
go func() {
errCh <- conn.Handshake()
close(errCh)
}()
select {
case err := <-errCh:
if err != nil {
conn.Close()
return nil, nil, err
}
case <-ctx.Done():
conn.Close()
return nil, nil, ctx.Err()
}
info := credentials.TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: credentials.CommonAuthInfo{
SecurityLevel: credentials.PrivacyAndIntegrity,
},
SPIFFEID: credinternal.SPIFFEIDFromState(conn.ConnectionState()),
}
return credinternal.WrapSyscallConn(rawConn, conn), info, nil
}
// ServerHandshake performs the TLS handshake on the server-side.
func (c *credsImpl) ServerHandshake(net.Conn) (net.Conn, credentials.AuthInfo, error) {
if c.isClient {
return nil, nil, errors.New("ServerHandshake is not supported for client credentials")
}
// TODO(easwars): Implement along with server side xDS implementation.
return nil, nil, errors.New("not implemented")
}
// Info provides the ProtocolInfo of this TransportCredentials.
func (c *credsImpl) Info() credentials.ProtocolInfo {
return credentials.ProtocolInfo{SecurityProtocol: "tls"}
}
// Clone makes a copy of this TransportCredentials.
func (c *credsImpl) Clone() credentials.TransportCredentials {
clone := *c
return &clone
}
func (c *credsImpl) OverrideServerName(_ string) error {
return errors.New("serverName for peer validation must be configured as a list of acceptable SANs")
}
// UsesXDS returns true if c uses xDS to fetch security configuration
// used at handshake time, and false otherwise.
func (c *credsImpl) UsesXDS() bool {
return true
}
| NewClientCredentials | identifier_name |
xds.go | /*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package xds provides a transport credentials implementation where the
// security configuration is pushed by a management server using xDS APIs.
//
// Experimental
//
// Notice: All APIs in this package are EXPERIMENTAL and may be removed in a
// later release.
package xds
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"sync"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal"
credinternal "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/resolver"
)
func init() {
internal.GetXDSHandshakeInfoForTesting = getHandshakeInfo
}
// ClientOptions contains parameters to configure a new client-side xDS
// credentials implementation.
type ClientOptions struct {
// FallbackCreds specifies the fallback credentials to be used when either
// the `xds` scheme is not used in the user's dial target or when the xDS
// server does not return any security configuration. Attempts to create
// client credentials without a fallback credentials will fail.
FallbackCreds credentials.TransportCredentials
}
// NewClientCredentials returns a new client-side transport credentials
// implementation which uses xDS APIs to fetch its security configuration.
func NewClientCredentials(opts ClientOptions) (credentials.TransportCredentials, error) {
if opts.FallbackCreds == nil {
return nil, errors.New("missing fallback credentials")
}
return &credsImpl{
isClient: true,
fallback: opts.FallbackCreds,
}, nil
}
// credsImpl is an implementation of the credentials.TransportCredentials
// interface which uses xDS APIs to fetch its security configuration.
type credsImpl struct {
isClient bool
fallback credentials.TransportCredentials
}
// handshakeAttrKey is the type used as the key to store HandshakeInfo in
// the Attributes field of resolver.Address.
type handshakeAttrKey struct{}
// SetHandshakeInfo returns a copy of addr in which the Attributes field is
// updated with hInfo.
func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address {
addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo)
return addr
}
// getHandshakeInfo returns a pointer to the HandshakeInfo stored in attr.
func getHandshakeInfo(attr *attributes.Attributes) *HandshakeInfo {
v := attr.Value(handshakeAttrKey{})
hi, _ := v.(*HandshakeInfo)
return hi
}
// HandshakeInfo wraps all the security configuration required by client and
// server handshake methods in credsImpl. The xDS implementation will be
// responsible for populating these fields.
//
// Safe for concurrent access.
type HandshakeInfo struct {
mu sync.Mutex
rootProvider certprovider.Provider
identityProvider certprovider.Provider
acceptedSANs map[string]bool // Only on the client side.
}
// SetRootCertProvider updates the root certificate provider.
func (hi *HandshakeInfo) SetRootCertProvider(root certprovider.Provider) {
hi.mu.Lock()
hi.rootProvider = root
hi.mu.Unlock()
}
// SetIdentityCertProvider updates the identity certificate provider.
func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) {
hi.mu.Lock()
hi.identityProvider = identity
hi.mu.Unlock()
}
// SetAcceptedSANs updates the list of accepted SANs.
func (hi *HandshakeInfo) SetAcceptedSANs(sans []string) {
hi.mu.Lock()
hi.acceptedSANs = make(map[string]bool, len(sans))
for _, san := range sans {
hi.acceptedSANs[san] = true
}
hi.mu.Unlock()
}
// UseFallbackCreds returns true when fallback credentials are to be used based
// on the contents of the HandshakeInfo.
func (hi *HandshakeInfo) UseFallbackCreds() bool {
if hi == nil {
return true
}
hi.mu.Lock()
defer hi.mu.Unlock()
return hi.identityProvider == nil && hi.rootProvider == nil
}
func (hi *HandshakeInfo) validate(isClient bool) error {
hi.mu.Lock()
defer hi.mu.Unlock()
// On the client side, rootProvider is mandatory. IdentityProvider is
// optional based on whether the client is doing TLS or mTLS.
if isClient && hi.rootProvider == nil {
return errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
// On the server side, identityProvider is mandatory. RootProvider is
// optional based on whether the server is doing TLS or mTLS.
if !isClient && hi.identityProvider == nil |
return nil
}
func (hi *HandshakeInfo) makeTLSConfig(ctx context.Context) (*tls.Config, error) {
hi.mu.Lock()
// Since the call to KeyMaterial() can block, we read the providers under
// the lock but call the actual function after releasing the lock.
rootProv, idProv := hi.rootProvider, hi.identityProvider
hi.mu.Unlock()
// InsecureSkipVerify needs to be set to true because we need to perform
// custom verification to check the SAN on the received certificate.
// Currently the Go stdlib does complete verification of the cert (which
// includes hostname verification) or none. We are forced to go with the
// latter and perform the normal cert validation ourselves.
cfg := &tls.Config{InsecureSkipVerify: true}
if rootProv != nil {
km, err := rootProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err)
}
cfg.RootCAs = km.Roots
}
if idProv != nil {
km, err := idProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err)
}
cfg.Certificates = km.Certs
}
return cfg, nil
}
func (hi *HandshakeInfo) matchingSANExists(cert *x509.Certificate) bool {
if len(hi.acceptedSANs) == 0 {
// An empty list of acceptedSANs means "accept everything".
return true
}
var sans []string
// SANs can be specified in any of these four fields on the parsed cert.
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
sans = append(sans, ip.String())
}
for _, uri := range cert.URIs {
sans = append(sans, uri.String())
}
hi.mu.Lock()
defer hi.mu.Unlock()
for _, san := range sans {
if hi.acceptedSANs[san] {
return true
}
}
return false
}
// NewHandshakeInfo returns a new instance of HandshakeInfo with the given root
// and identity certificate providers.
func NewHandshakeInfo(root, identity certprovider.Provider, sans ...string) *HandshakeInfo {
acceptedSANs := make(map[string]bool, len(sans))
for _, san := range sans {
acceptedSANs[san] = true
}
return &HandshakeInfo{
rootProvider: root,
identityProvider: identity,
acceptedSANs: acceptedSANs,
}
}
// ClientHandshake performs the TLS handshake on the client-side.
//
// It looks for the presence of a HandshakeInfo value in the passed in context
// (added using a call to NewContextWithHandshakeInfo()), and retrieves identity
// and root certificates from there. It also retrieves a list of acceptable SANs
// and uses a custom verification function to validate the certificate presented
// by the peer. It uses fallback credentials if no HandshakeInfo is present in
// the passed in context.
func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if !c.isClient {
return nil, nil, errors.New("ClientHandshake() is not supported for server credentials")
}
// The CDS balancer constructs a new HandshakeInfo using a call to
// NewHandshakeInfo(), and then adds it to the attributes field of the
// resolver.Address when handling calls to NewSubConn(). The transport layer
// takes care of shipping these attributes in the context to this handshake
// function. We first read the credentials.ClientHandshakeInfo type from the
// context, which contains the attributes added by the CDS balancer. We then
// read the HandshakeInfo from the attributes to get to the actual data that
// we need here for the handshake.
chi := credentials.ClientHandshakeInfoFromContext(ctx)
// If there are no attributes in the received context or the attributes does
// not contain a HandshakeInfo, it could either mean that the user did not
// specify an `xds` scheme in their dial target or that the xDS server did
// not provide any security configuration. In both of these cases, we use
// the fallback credentials specified by the user.
if chi.Attributes == nil {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
hi := getHandshakeInfo(chi.Attributes)
if hi.UseFallbackCreds() {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
if err := hi.validate(c.isClient); err != nil {
return nil, nil, err
}
// We build the tls.Config with the following values
// 1. Root certificate as returned by the root provider.
// 2. Identity certificate as returned by the identity provider. This may be
// empty on the client side, if the client is not doing mTLS.
// 3. InsecureSkipVerify to true. Certificates used in Mesh environments
// usually contains the identity of the workload presenting the
// certificate as a SAN (instead of a hostname in the CommonName field).
// This means that normal certificate verification as done by the
// standard library will fail.
// 4. Key usage to match whether client/server usage.
// 5. A `VerifyPeerCertificate` function which performs normal peer
// cert verification using configured roots, and the custom SAN checks.
cfg, err := hi.makeTLSConfig(ctx)
if err != nil {
return nil, nil, err
}
cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
// Parse all raw certificates presented by the peer.
var certs []*x509.Certificate
for _, rc := range rawCerts {
cert, err := x509.ParseCertificate(rc)
if err != nil {
return err
}
certs = append(certs, cert)
}
// Build the intermediates list and verify that the leaf certificate
// is signed by one of the root certificates.
intermediates := x509.NewCertPool()
for _, cert := range certs[1:] {
intermediates.AddCert(cert)
}
opts := x509.VerifyOptions{
Roots: cfg.RootCAs,
Intermediates: intermediates,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
if _, err := certs[0].Verify(opts); err != nil {
return err
}
// The SANs sent by the MeshCA are encoded as SPIFFE IDs. We need to
// only look at the SANs on the leaf cert.
if !hi.matchingSANExists(certs[0]) {
return fmt.Errorf("SANs received in leaf certificate %+v does not match any of the accepted SANs", certs[0])
}
return nil
}
// Perform the TLS handshake with the tls.Config that we have. We run the
// actual Handshake() function in a goroutine because we need to respect the
// deadline specified on the passed in context, and we need a way to cancel
// the handshake if the context is cancelled.
conn := tls.Client(rawConn, cfg)
errCh := make(chan error, 1)
go func() {
errCh <- conn.Handshake()
close(errCh)
}()
select {
case err := <-errCh:
if err != nil {
conn.Close()
return nil, nil, err
}
case <-ctx.Done():
conn.Close()
return nil, nil, ctx.Err()
}
info := credentials.TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: credentials.CommonAuthInfo{
SecurityLevel: credentials.PrivacyAndIntegrity,
},
SPIFFEID: credinternal.SPIFFEIDFromState(conn.ConnectionState()),
}
return credinternal.WrapSyscallConn(rawConn, conn), info, nil
}
// ServerHandshake performs the TLS handshake on the server-side.
func (c *credsImpl) ServerHandshake(net.Conn) (net.Conn, credentials.AuthInfo, error) {
if c.isClient {
return nil, nil, errors.New("ServerHandshake is not supported for client credentials")
}
// TODO(easwars): Implement along with server side xDS implementation.
return nil, nil, errors.New("not implemented")
}
// Info provides the ProtocolInfo of this TransportCredentials.
func (c *credsImpl) Info() credentials.ProtocolInfo {
return credentials.ProtocolInfo{SecurityProtocol: "tls"}
}
// Clone makes a copy of this TransportCredentials.
func (c *credsImpl) Clone() credentials.TransportCredentials {
clone := *c
return &clone
}
func (c *credsImpl) OverrideServerName(_ string) error {
return errors.New("serverName for peer validation must be configured as a list of acceptable SANs")
}
// UsesXDS returns true if c uses xDS to fetch security configuration
// used at handshake time, and false otherwise.
func (c *credsImpl) UsesXDS() bool {
return true
}
| {
return errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server")
} | conditional_block |
xds.go | /*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package xds provides a transport credentials implementation where the
// security configuration is pushed by a management server using xDS APIs.
//
// Experimental
//
// Notice: All APIs in this package are EXPERIMENTAL and may be removed in a
// later release.
package xds
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"sync"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal"
credinternal "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/resolver"
)
func init() {
internal.GetXDSHandshakeInfoForTesting = getHandshakeInfo
}
// ClientOptions contains parameters to configure a new client-side xDS
// credentials implementation.
type ClientOptions struct {
// FallbackCreds specifies the fallback credentials to be used when either
// the `xds` scheme is not used in the user's dial target or when the xDS
// server does not return any security configuration. Attempts to create
// client credentials without a fallback credentials will fail.
FallbackCreds credentials.TransportCredentials
}
// NewClientCredentials returns a new client-side transport credentials
// implementation which uses xDS APIs to fetch its security configuration.
func NewClientCredentials(opts ClientOptions) (credentials.TransportCredentials, error) {
if opts.FallbackCreds == nil {
return nil, errors.New("missing fallback credentials")
}
return &credsImpl{
isClient: true,
fallback: opts.FallbackCreds,
}, nil
}
// credsImpl is an implementation of the credentials.TransportCredentials |
// handshakeAttrKey is the type used as the key to store HandshakeInfo in
// the Attributes field of resolver.Address.
type handshakeAttrKey struct{}
// SetHandshakeInfo returns a copy of addr in which the Attributes field is
// updated with hInfo.
func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address {
addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo)
return addr
}
// getHandshakeInfo returns a pointer to the HandshakeInfo stored in attr.
func getHandshakeInfo(attr *attributes.Attributes) *HandshakeInfo {
v := attr.Value(handshakeAttrKey{})
hi, _ := v.(*HandshakeInfo)
return hi
}
// HandshakeInfo wraps all the security configuration required by client and
// server handshake methods in credsImpl. The xDS implementation will be
// responsible for populating these fields.
//
// Safe for concurrent access.
type HandshakeInfo struct {
mu sync.Mutex
rootProvider certprovider.Provider
identityProvider certprovider.Provider
acceptedSANs map[string]bool // Only on the client side.
}
// SetRootCertProvider updates the root certificate provider.
func (hi *HandshakeInfo) SetRootCertProvider(root certprovider.Provider) {
hi.mu.Lock()
hi.rootProvider = root
hi.mu.Unlock()
}
// SetIdentityCertProvider updates the identity certificate provider.
func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) {
hi.mu.Lock()
hi.identityProvider = identity
hi.mu.Unlock()
}
// SetAcceptedSANs updates the list of accepted SANs.
func (hi *HandshakeInfo) SetAcceptedSANs(sans []string) {
hi.mu.Lock()
hi.acceptedSANs = make(map[string]bool, len(sans))
for _, san := range sans {
hi.acceptedSANs[san] = true
}
hi.mu.Unlock()
}
// UseFallbackCreds returns true when fallback credentials are to be used based
// on the contents of the HandshakeInfo.
func (hi *HandshakeInfo) UseFallbackCreds() bool {
if hi == nil {
return true
}
hi.mu.Lock()
defer hi.mu.Unlock()
return hi.identityProvider == nil && hi.rootProvider == nil
}
func (hi *HandshakeInfo) validate(isClient bool) error {
hi.mu.Lock()
defer hi.mu.Unlock()
// On the client side, rootProvider is mandatory. IdentityProvider is
// optional based on whether the client is doing TLS or mTLS.
if isClient && hi.rootProvider == nil {
return errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
// On the server side, identityProvider is mandatory. RootProvider is
// optional based on whether the server is doing TLS or mTLS.
if !isClient && hi.identityProvider == nil {
return errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
return nil
}
func (hi *HandshakeInfo) makeTLSConfig(ctx context.Context) (*tls.Config, error) {
hi.mu.Lock()
// Since the call to KeyMaterial() can block, we read the providers under
// the lock but call the actual function after releasing the lock.
rootProv, idProv := hi.rootProvider, hi.identityProvider
hi.mu.Unlock()
// InsecureSkipVerify needs to be set to true because we need to perform
// custom verification to check the SAN on the received certificate.
// Currently the Go stdlib does complete verification of the cert (which
// includes hostname verification) or none. We are forced to go with the
// latter and perform the normal cert validation ourselves.
cfg := &tls.Config{InsecureSkipVerify: true}
if rootProv != nil {
km, err := rootProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err)
}
cfg.RootCAs = km.Roots
}
if idProv != nil {
km, err := idProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err)
}
cfg.Certificates = km.Certs
}
return cfg, nil
}
func (hi *HandshakeInfo) matchingSANExists(cert *x509.Certificate) bool {
if len(hi.acceptedSANs) == 0 {
// An empty list of acceptedSANs means "accept everything".
return true
}
var sans []string
// SANs can be specified in any of these four fields on the parsed cert.
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
sans = append(sans, ip.String())
}
for _, uri := range cert.URIs {
sans = append(sans, uri.String())
}
hi.mu.Lock()
defer hi.mu.Unlock()
for _, san := range sans {
if hi.acceptedSANs[san] {
return true
}
}
return false
}
// NewHandshakeInfo returns a new instance of HandshakeInfo with the given root
// and identity certificate providers.
func NewHandshakeInfo(root, identity certprovider.Provider, sans ...string) *HandshakeInfo {
acceptedSANs := make(map[string]bool, len(sans))
for _, san := range sans {
acceptedSANs[san] = true
}
return &HandshakeInfo{
rootProvider: root,
identityProvider: identity,
acceptedSANs: acceptedSANs,
}
}
// ClientHandshake performs the TLS handshake on the client-side.
//
// It looks for the presence of a HandshakeInfo value in the passed in context
// (added using a call to NewContextWithHandshakeInfo()), and retrieves identity
// and root certificates from there. It also retrieves a list of acceptable SANs
// and uses a custom verification function to validate the certificate presented
// by the peer. It uses fallback credentials if no HandshakeInfo is present in
// the passed in context.
func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if !c.isClient {
return nil, nil, errors.New("ClientHandshake() is not supported for server credentials")
}
// The CDS balancer constructs a new HandshakeInfo using a call to
// NewHandshakeInfo(), and then adds it to the attributes field of the
// resolver.Address when handling calls to NewSubConn(). The transport layer
// takes care of shipping these attributes in the context to this handshake
// function. We first read the credentials.ClientHandshakeInfo type from the
// context, which contains the attributes added by the CDS balancer. We then
// read the HandshakeInfo from the attributes to get to the actual data that
// we need here for the handshake.
chi := credentials.ClientHandshakeInfoFromContext(ctx)
// If there are no attributes in the received context or the attributes does
// not contain a HandshakeInfo, it could either mean that the user did not
// specify an `xds` scheme in their dial target or that the xDS server did
// not provide any security configuration. In both of these cases, we use
// the fallback credentials specified by the user.
if chi.Attributes == nil {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
hi := getHandshakeInfo(chi.Attributes)
if hi.UseFallbackCreds() {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
if err := hi.validate(c.isClient); err != nil {
return nil, nil, err
}
// We build the tls.Config with the following values
// 1. Root certificate as returned by the root provider.
// 2. Identity certificate as returned by the identity provider. This may be
// empty on the client side, if the client is not doing mTLS.
// 3. InsecureSkipVerify to true. Certificates used in Mesh environments
// usually contains the identity of the workload presenting the
// certificate as a SAN (instead of a hostname in the CommonName field).
// This means that normal certificate verification as done by the
// standard library will fail.
// 4. Key usage to match whether client/server usage.
// 5. A `VerifyPeerCertificate` function which performs normal peer
// cert verification using configured roots, and the custom SAN checks.
cfg, err := hi.makeTLSConfig(ctx)
if err != nil {
return nil, nil, err
}
cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
// Parse all raw certificates presented by the peer.
var certs []*x509.Certificate
for _, rc := range rawCerts {
cert, err := x509.ParseCertificate(rc)
if err != nil {
return err
}
certs = append(certs, cert)
}
// Build the intermediates list and verify that the leaf certificate
// is signed by one of the root certificates.
intermediates := x509.NewCertPool()
for _, cert := range certs[1:] {
intermediates.AddCert(cert)
}
opts := x509.VerifyOptions{
Roots: cfg.RootCAs,
Intermediates: intermediates,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
if _, err := certs[0].Verify(opts); err != nil {
return err
}
// The SANs sent by the MeshCA are encoded as SPIFFE IDs. We need to
// only look at the SANs on the leaf cert.
if !hi.matchingSANExists(certs[0]) {
return fmt.Errorf("SANs received in leaf certificate %+v does not match any of the accepted SANs", certs[0])
}
return nil
}
// Perform the TLS handshake with the tls.Config that we have. We run the
// actual Handshake() function in a goroutine because we need to respect the
// deadline specified on the passed in context, and we need a way to cancel
// the handshake if the context is cancelled.
conn := tls.Client(rawConn, cfg)
errCh := make(chan error, 1)
go func() {
errCh <- conn.Handshake()
close(errCh)
}()
select {
case err := <-errCh:
if err != nil {
conn.Close()
return nil, nil, err
}
case <-ctx.Done():
conn.Close()
return nil, nil, ctx.Err()
}
info := credentials.TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: credentials.CommonAuthInfo{
SecurityLevel: credentials.PrivacyAndIntegrity,
},
SPIFFEID: credinternal.SPIFFEIDFromState(conn.ConnectionState()),
}
return credinternal.WrapSyscallConn(rawConn, conn), info, nil
}
// ServerHandshake performs the TLS handshake on the server-side.
func (c *credsImpl) ServerHandshake(net.Conn) (net.Conn, credentials.AuthInfo, error) {
if c.isClient {
return nil, nil, errors.New("ServerHandshake is not supported for client credentials")
}
// TODO(easwars): Implement along with server side xDS implementation.
return nil, nil, errors.New("not implemented")
}
// Info provides the ProtocolInfo of this TransportCredentials.
func (c *credsImpl) Info() credentials.ProtocolInfo {
return credentials.ProtocolInfo{SecurityProtocol: "tls"}
}
// Clone makes a copy of this TransportCredentials.
func (c *credsImpl) Clone() credentials.TransportCredentials {
clone := *c
return &clone
}
func (c *credsImpl) OverrideServerName(_ string) error {
return errors.New("serverName for peer validation must be configured as a list of acceptable SANs")
}
// UsesXDS returns true if c uses xDS to fetch security configuration
// used at handshake time, and false otherwise.
func (c *credsImpl) UsesXDS() bool {
return true
} | // interface which uses xDS APIs to fetch its security configuration.
type credsImpl struct {
isClient bool
fallback credentials.TransportCredentials
} | random_line_split |
xds.go | /*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package xds provides a transport credentials implementation where the
// security configuration is pushed by a management server using xDS APIs.
//
// Experimental
//
// Notice: All APIs in this package are EXPERIMENTAL and may be removed in a
// later release.
package xds
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"sync"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal"
credinternal "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/resolver"
)
func init() {
internal.GetXDSHandshakeInfoForTesting = getHandshakeInfo
}
// ClientOptions contains parameters to configure a new client-side xDS
// credentials implementation.
type ClientOptions struct {
// FallbackCreds specifies the fallback credentials to be used when either
// the `xds` scheme is not used in the user's dial target or when the xDS
// server does not return any security configuration. Attempts to create
// client credentials without a fallback credentials will fail.
FallbackCreds credentials.TransportCredentials
}
// NewClientCredentials returns a new client-side transport credentials
// implementation which uses xDS APIs to fetch its security configuration.
func NewClientCredentials(opts ClientOptions) (credentials.TransportCredentials, error) |
// credsImpl is an implementation of the credentials.TransportCredentials
// interface which uses xDS APIs to fetch its security configuration.
type credsImpl struct {
isClient bool
fallback credentials.TransportCredentials
}
// handshakeAttrKey is the type used as the key to store HandshakeInfo in
// the Attributes field of resolver.Address.
type handshakeAttrKey struct{}
// SetHandshakeInfo returns a copy of addr in which the Attributes field is
// updated with hInfo.
func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address {
addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo)
return addr
}
// getHandshakeInfo returns a pointer to the HandshakeInfo stored in attr.
func getHandshakeInfo(attr *attributes.Attributes) *HandshakeInfo {
v := attr.Value(handshakeAttrKey{})
hi, _ := v.(*HandshakeInfo)
return hi
}
// HandshakeInfo wraps all the security configuration required by client and
// server handshake methods in credsImpl. The xDS implementation will be
// responsible for populating these fields.
//
// Safe for concurrent access.
type HandshakeInfo struct {
mu sync.Mutex
rootProvider certprovider.Provider
identityProvider certprovider.Provider
acceptedSANs map[string]bool // Only on the client side.
}
// SetRootCertProvider updates the root certificate provider.
func (hi *HandshakeInfo) SetRootCertProvider(root certprovider.Provider) {
hi.mu.Lock()
hi.rootProvider = root
hi.mu.Unlock()
}
// SetIdentityCertProvider updates the identity certificate provider.
func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) {
hi.mu.Lock()
hi.identityProvider = identity
hi.mu.Unlock()
}
// SetAcceptedSANs updates the list of accepted SANs.
func (hi *HandshakeInfo) SetAcceptedSANs(sans []string) {
hi.mu.Lock()
hi.acceptedSANs = make(map[string]bool, len(sans))
for _, san := range sans {
hi.acceptedSANs[san] = true
}
hi.mu.Unlock()
}
// UseFallbackCreds returns true when fallback credentials are to be used based
// on the contents of the HandshakeInfo.
func (hi *HandshakeInfo) UseFallbackCreds() bool {
if hi == nil {
return true
}
hi.mu.Lock()
defer hi.mu.Unlock()
return hi.identityProvider == nil && hi.rootProvider == nil
}
func (hi *HandshakeInfo) validate(isClient bool) error {
hi.mu.Lock()
defer hi.mu.Unlock()
// On the client side, rootProvider is mandatory. IdentityProvider is
// optional based on whether the client is doing TLS or mTLS.
if isClient && hi.rootProvider == nil {
return errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
// On the server side, identityProvider is mandatory. RootProvider is
// optional based on whether the server is doing TLS or mTLS.
if !isClient && hi.identityProvider == nil {
return errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
return nil
}
func (hi *HandshakeInfo) makeTLSConfig(ctx context.Context) (*tls.Config, error) {
hi.mu.Lock()
// Since the call to KeyMaterial() can block, we read the providers under
// the lock but call the actual function after releasing the lock.
rootProv, idProv := hi.rootProvider, hi.identityProvider
hi.mu.Unlock()
// InsecureSkipVerify needs to be set to true because we need to perform
// custom verification to check the SAN on the received certificate.
// Currently the Go stdlib does complete verification of the cert (which
// includes hostname verification) or none. We are forced to go with the
// latter and perform the normal cert validation ourselves.
cfg := &tls.Config{InsecureSkipVerify: true}
if rootProv != nil {
km, err := rootProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err)
}
cfg.RootCAs = km.Roots
}
if idProv != nil {
km, err := idProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err)
}
cfg.Certificates = km.Certs
}
return cfg, nil
}
func (hi *HandshakeInfo) matchingSANExists(cert *x509.Certificate) bool {
if len(hi.acceptedSANs) == 0 {
// An empty list of acceptedSANs means "accept everything".
return true
}
var sans []string
// SANs can be specified in any of these four fields on the parsed cert.
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
sans = append(sans, ip.String())
}
for _, uri := range cert.URIs {
sans = append(sans, uri.String())
}
hi.mu.Lock()
defer hi.mu.Unlock()
for _, san := range sans {
if hi.acceptedSANs[san] {
return true
}
}
return false
}
// NewHandshakeInfo returns a new instance of HandshakeInfo with the given root
// and identity certificate providers.
func NewHandshakeInfo(root, identity certprovider.Provider, sans ...string) *HandshakeInfo {
acceptedSANs := make(map[string]bool, len(sans))
for _, san := range sans {
acceptedSANs[san] = true
}
return &HandshakeInfo{
rootProvider: root,
identityProvider: identity,
acceptedSANs: acceptedSANs,
}
}
// ClientHandshake performs the TLS handshake on the client-side.
//
// It looks for the presence of a HandshakeInfo value in the passed in context
// (added using a call to NewContextWithHandshakeInfo()), and retrieves identity
// and root certificates from there. It also retrieves a list of acceptable SANs
// and uses a custom verification function to validate the certificate presented
// by the peer. It uses fallback credentials if no HandshakeInfo is present in
// the passed in context.
func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if !c.isClient {
return nil, nil, errors.New("ClientHandshake() is not supported for server credentials")
}
// The CDS balancer constructs a new HandshakeInfo using a call to
// NewHandshakeInfo(), and then adds it to the attributes field of the
// resolver.Address when handling calls to NewSubConn(). The transport layer
// takes care of shipping these attributes in the context to this handshake
// function. We first read the credentials.ClientHandshakeInfo type from the
// context, which contains the attributes added by the CDS balancer. We then
// read the HandshakeInfo from the attributes to get to the actual data that
// we need here for the handshake.
chi := credentials.ClientHandshakeInfoFromContext(ctx)
// If there are no attributes in the received context or the attributes does
// not contain a HandshakeInfo, it could either mean that the user did not
// specify an `xds` scheme in their dial target or that the xDS server did
// not provide any security configuration. In both of these cases, we use
// the fallback credentials specified by the user.
if chi.Attributes == nil {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
hi := getHandshakeInfo(chi.Attributes)
if hi.UseFallbackCreds() {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
if err := hi.validate(c.isClient); err != nil {
return nil, nil, err
}
// We build the tls.Config with the following values
// 1. Root certificate as returned by the root provider.
// 2. Identity certificate as returned by the identity provider. This may be
// empty on the client side, if the client is not doing mTLS.
// 3. InsecureSkipVerify to true. Certificates used in Mesh environments
// usually contains the identity of the workload presenting the
// certificate as a SAN (instead of a hostname in the CommonName field).
// This means that normal certificate verification as done by the
// standard library will fail.
// 4. Key usage to match whether client/server usage.
// 5. A `VerifyPeerCertificate` function which performs normal peer
// cert verification using configured roots, and the custom SAN checks.
cfg, err := hi.makeTLSConfig(ctx)
if err != nil {
return nil, nil, err
}
cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
// Parse all raw certificates presented by the peer.
var certs []*x509.Certificate
for _, rc := range rawCerts {
cert, err := x509.ParseCertificate(rc)
if err != nil {
return err
}
certs = append(certs, cert)
}
// Build the intermediates list and verify that the leaf certificate
// is signed by one of the root certificates.
intermediates := x509.NewCertPool()
for _, cert := range certs[1:] {
intermediates.AddCert(cert)
}
opts := x509.VerifyOptions{
Roots: cfg.RootCAs,
Intermediates: intermediates,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
if _, err := certs[0].Verify(opts); err != nil {
return err
}
// The SANs sent by the MeshCA are encoded as SPIFFE IDs. We need to
// only look at the SANs on the leaf cert.
if !hi.matchingSANExists(certs[0]) {
return fmt.Errorf("SANs received in leaf certificate %+v does not match any of the accepted SANs", certs[0])
}
return nil
}
// Perform the TLS handshake with the tls.Config that we have. We run the
// actual Handshake() function in a goroutine because we need to respect the
// deadline specified on the passed in context, and we need a way to cancel
// the handshake if the context is cancelled.
conn := tls.Client(rawConn, cfg)
errCh := make(chan error, 1)
go func() {
errCh <- conn.Handshake()
close(errCh)
}()
select {
case err := <-errCh:
if err != nil {
conn.Close()
return nil, nil, err
}
case <-ctx.Done():
conn.Close()
return nil, nil, ctx.Err()
}
info := credentials.TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: credentials.CommonAuthInfo{
SecurityLevel: credentials.PrivacyAndIntegrity,
},
SPIFFEID: credinternal.SPIFFEIDFromState(conn.ConnectionState()),
}
return credinternal.WrapSyscallConn(rawConn, conn), info, nil
}
// ServerHandshake performs the TLS handshake on the server-side.
func (c *credsImpl) ServerHandshake(net.Conn) (net.Conn, credentials.AuthInfo, error) {
if c.isClient {
return nil, nil, errors.New("ServerHandshake is not supported for client credentials")
}
// TODO(easwars): Implement along with server side xDS implementation.
return nil, nil, errors.New("not implemented")
}
// Info provides the ProtocolInfo of this TransportCredentials.
func (c *credsImpl) Info() credentials.ProtocolInfo {
return credentials.ProtocolInfo{SecurityProtocol: "tls"}
}
// Clone makes a copy of this TransportCredentials.
func (c *credsImpl) Clone() credentials.TransportCredentials {
clone := *c
return &clone
}
func (c *credsImpl) OverrideServerName(_ string) error {
return errors.New("serverName for peer validation must be configured as a list of acceptable SANs")
}
// UsesXDS returns true if c uses xDS to fetch security configuration
// used at handshake time, and false otherwise.
func (c *credsImpl) UsesXDS() bool {
return true
}
| {
if opts.FallbackCreds == nil {
return nil, errors.New("missing fallback credentials")
}
return &credsImpl{
isClient: true,
fallback: opts.FallbackCreds,
}, nil
} | identifier_body |
oscillator.py | import numpy as np
import sys
import os
import sys
sys.path.append('src')
from scipy.constants import c, pi
from joblib import Parallel, delayed
from mpi4py.futures import MPIPoolExecutor
from mpi4py import MPI
from scipy.fftpack import fftshift, fft
import os
import time as timeit
os.system('export FONTCONFIG_PATH=/etc/fonts')
from functions import *
from time import time, sleep
import pickle
@profile
def oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P0_p1, P0_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, dAdzmm, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1, fopa):
mode_names = ['LP01a']
u = np.zeros(sim_wind.t.shape, dtype='complex128')
U = np.zeros(sim_wind.fv.shape, dtype='complex128') #
T0_p = TFWHM_p / 2 / (np.log(2))**0.5
T0_s = TFWHM_s / 2 / (np.log(2))**0.5
noise_new = noise_obj.noise_func(int_fwm)
u = noise_new
woff1 = (p_pos[1] + (int_fwm.nt) // 2) * 2 * pi * sim_wind.df[p_pos[0]]
u[p_pos[0], :] += (P0_p1)**0.5 * np.exp(1j *
(woff1) * sim_wind.t[p_pos[0]])
woff2 = -(s_pos[1] - (int_fwm.nt - 1) // 2) * \
2 * pi * sim_wind.df[s_pos[0]]
u[s_pos[0], :] += (P0_s)**0.5 * np.exp(-1j *
(woff2) * sim_wind.t[s_pos[0]])
U = fftshift(fft(u), axes=-1)
master_index = str(master_index)
max_rounds = arguments_determine(-1)
if fopa:
print('Fibre amplifier!')
max_rounds = 0
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, max_rounds, mode_names, master_index, '00', 'original pump', D_pic[0], plots)
U_original_pump = np.copy(U)
# Pass the original pump through the WDM1, port1 is in to the loop, port2
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
u, U = WDM_vec[0].pass_through((U, noise_new))[0]
ro = -1
t_total = 0
factors_xpm, factors_fwm,gama,tsh, w_tiled = \
dAdzmm.factors_xpm, dAdzmm.factors_fwm, dAdzmm.gama, dAdzmm.tsh, dAdzmm.w_tiled
dz,dzstep,maxerr = int_fwm.dz,int_fwm.dzstep,int_fwm.maxerr
Dop = np.ascontiguousarray(Dop/2)
factors_xpm = np.ascontiguousarray(factors_xpm)
factors_fwm = np.ascontiguousarray(factors_fwm)
gama = np.ascontiguousarray(gama)
tsh = np.ascontiguousarray(tsh)
w_tiled = np.ascontiguousarray(w_tiled)
while ro < max_rounds:
ro += 1
print('round', ro)
pulse_pos_dict = [
'round ' + str(ro) + ', ' + i for i in pulse_pos_dict_or]
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '1', pulse_pos_dict[3], D_pic[5], plots)
# Phase modulate before the Fibre
U = pm_fopa.modulate(U)
u = ifft(ifftshift(U, axes=-1))
#Pulse propagation
U, dz = pulse_propagation(u,dz,dzstep,maxerr, Dop,factors_xpm, factors_fwm, gama,tsh,w_tiled)
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '2', pulse_pos_dict[0], D_pic[2], plots)
max_noise = 10*noise_new.max()
#checks if the fft's are causing boundary condtion problems
if (U[:, 0] > max_noise).any() or (U[:, -1] > max_noise).any():
with open("error_log", "a") as myfile:
myfile.write("Pump: %5f, Seed: %5f, lamp: %5f, lams: %5f \n" % (
P0_p1, P0_s, 1e-3*c/f_p, 1e-3*c/f_s))
break
# pass through WDM2 port 2 continues and port 1 is out of the loop
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(out1, out2), (u, U) = WDM_vec[1].pass_through(
(U, noise_new))
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '3', pulse_pos_dict[3], D_pic[3], plots)
# Splice7 after WDM2 for the signal
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(u, U) = splicers_vec[2].pass_through(
(U, noise_new))[0]
#Phase modulate the oscillating signal so that to be in phase with the one coming in
U = pm_WDM1.modulate(U_original_pump, U)
# Pass again through WDM1 with the signal now
(u, U) = WDM_vec[0].pass_through(
(U_original_pump, U))[0]
################################The outbound stuff#####################
ex.exporter(index, int_fwm, sim_wind, out1, out2, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '4', pulse_pos_dict[4], D_pic[6], plots)
consolidate(ro, int_fwm,master_index, index)
return ro
def calc_P_out(U, U_original_pump, fv, t):
U = np.abs(U)**2
U_original_pump = np.abs(U_original_pump)**2
freq_band = 2
fp_id = np.where(U_original_pump == np.max(U_original_pump))[0][0]
plom = fp_id + 10
fv_id = np.where(U[plom:] == np.max(U[plom:]))[0][0]
fv_id += plom - 1
start, end = fv[fv_id] - freq_band, fv[fv_id] + freq_band
i = np.where(
np.abs(fv - start) == np.min(np.abs(fv - start)))[0][0]
j = np.where(
np.abs(fv - end) == np.min(np.abs(fv - end)))[0][0]
E_out = simps(U[i:j] * (t[1] - t[0])**2, fv[i:j])
P_out = E_out / (2 * np.abs(np.min(t)))
return P_out
@unpack_args
def formulate(index, n2, gama, alphadB, z, P_p, P_s, TFWHM_p, TFWHM_s, spl_losses, betas,
lamda_c, WDMS_pars, lamp, lams, num_cores, maxerr, ss, plots,
N, nplot, master_index, filesaves, Df_band, fr, fopa):
"------------------propagation paramaters------------------"
dzstep = z / nplot # distance per step
dz_less = 1e2
int_fwm = sim_parameters(n2, 1, alphadB)
int_fwm.general_options(maxerr, ss)
int_fwm.propagation_parameters(N, z, nplot, dz_less)
lamda = lamp * 1e-9 # central wavelength of the grid[m]
"-----------------------------f-----------------------------"
"---------------------Aeff-Qmatrixes-----------------------"
M = Q_matrixes(int_fwm.nm, int_fwm.n2, lamda_c, gama)
"----------------------------------------------------------"
"---------------------Grid&window-----------------------"
P_p_bef,P_s_bef = pre_fibre_init_power(WDMS_pars[0][0], WDMS_pars[0][1], lamp, P_p, P_s)
fv, where, f_centrals = fv_creator(
lamp, lams, lamda_c, int_fwm, betas, M, P_p_bef,P_s_bef, Df_band)
print(fv[0][1] - fv[0][0])
#print(1e-3 * c / np.array(f_centrals))
p_pos, s_pos, i_pos = where
sim_wind = sim_window(fv, lamda, f_centrals, lamda_c, int_fwm)
"----------------------------------------------------------"
"---------------------Loss-in-fibres-----------------------"
slice_from_edge = (sim_wind.fv[-1] - sim_wind.fv[0]) / 100
loss = Loss(int_fwm, sim_wind, amax=0)
int_fwm.alpha = loss.atten_func_full(fv)
int_fwm.gama = np.array(
[-1j * n2 * 2 * M * pi * (1e12 * f_c) / (c) for f_c in f_centrals])
#if ss == 0:
# int_fwm.gama[:] = -1j * n2 * 2 * M * pi * (1e12 * f_centrals[3]) / (c)
int_fwm.gama[0:2] = 0
int_fwm.gama[5:] = 0
#for i in range(len(int_fwm.gama)):
# print(i, int_fwm.gama[i])
#exit()
"----------------------------------------------------------"
"--------------------Dispersion----------------------------"
Dop = dispersion_operator(betas, lamda_c, int_fwm, sim_wind)
"----------------------------------------------------------"
"---------------------Raman Factors------------------------"
ram = Raman_factors(fr)
ram.set_raman_band(sim_wind)
"----------------------------------------------------------"
"--------------------Noise---------------------------------"
noise_obj = Noise(int_fwm, sim_wind)
"----------------------------------------------------------"
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
keys = ['loading_data/green_dot_fopo/pngs/' +
str(i) + str('.png') for i in range(7)]
D_pic = [plt.imread(i) for i in keys]
"----------------Construct the integrator----------------"
non_integrand = Integrand(int_fwm.gama, sim_wind.tsh,
sim_wind.w_tiled, ss,ram, cython_tick=True,
timer=False)
"--------------------------------------------------------"
"----------------------Formulate WDMS--------------------"
if WDMS_pars == 'signal_locked':
Omega = 2 * pi * c / (lamp * 1e-9) - 2 * pi * c / (lams * 1e-9)
omegai = 2 * pi * c / (lamp * 1e-9) + Omega
lami = 1e9 * 2 * pi * c / (omegai)
WDMS_pars = ([lamp, lams], # WDM up downs in wavelengths [m]
[lami, lams],
[lami, lamp],
[lami, lams])
WDM_vec = [WDM(i[0], i[1], sim_wind.fv, c,fopa)
for i in WDMS_pars] # WDM up downs in wavelengths [m]
# Phase modulators contructors
pm_fopa = Phase_modulation_FOPA(sim_wind.fv, where)
pm_WDM1 = Phase_modulation_infase_WDM(P_s, where, WDM_vec[0])
"--------------------------------------------------------"
# for ei,i in enumerate(WDM_vec):
# i.plot(filename = str(ei))
"----------------------Formulate splicers--------------------"
splicers_vec = [Splicer(loss=i) for i in spl_losses]
"------------------------------------------------------------"
f_p, f_s = sim_wind.fv[where[0][0], where[0][1]], sim_wind.fv[where[1][0], where[1][1]]
ex = Plotter_saver(plots, filesaves, sim_wind.fv,
sim_wind.t) # construct exporter
ro = oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P_p, P_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, non_integrand, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1,fopa)
return None
def main():
"-----------------------------Stable parameters----------------------------"
# Number of computing cores for sweep
num_cores = arguments_determine(1)
# maximum tolerable error per step in integration
maxerr = 1e-13
ss = 1 # includes self steepening term
Df_band_vec = [5, 5, 10, 20]
fr = 0.18
plots = False # Do you want plots, (slow!)
filesaves = True # Do you want data dump?
complete = False
nplot = 1 # number of plots within fibre min is 2
if arguments_determine(-1) == 0:
fopa = True # If no oscillations then the WDMs are deleted to
# make the system in to a FOPA
else:
fopa = False
if 'mpi' in sys.argv:
method = 'mpi'
elif 'joblib' in sys.argv:
method = 'joblib'
else:
method = 'single'
"--------------------------------------------------------------------------"
stable_dic = {'num_cores': num_cores, 'maxerr': maxerr, 'ss': ss, 'plots': plots,
'nplot': nplot, 'filesaves': filesaves,
'fr':fr, 'fopa':fopa}
"------------------------Can be variable parameters------------------------"
n2 = 2.5e-20 # Nonlinear index [m/W]
gama = 10e-3 # Overwirtes n2 and Aeff w/m
alphadB = 0 # 0.0011667#666666666668 # loss within fibre[dB/m]
z = 18 # Length of the fibre
wave_idx = 0
power_area_idx = 0
N = np.array([i for i in range(2,13)]) # 2**N grid points
# Power list. [wavelength, power_area]
P_p_vec = [[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 4.4, 0.1), my_arange(4.5, 5, 0.05),
my_arange(5.1, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ]]
Df_band = Df_band_vec[power_area_idx]
P_p = P_p_vec[wave_idx][power_area_idx]
P_p = [6]#[4.9,4.95,5]
P_s = 0#100e-3
TFWHM_p = 0 # full with half max of pump
TFWHM_s = 0 # full with half max of signal
# loss of each type of splices [dB]
spl_losses = [0, 0, 1.4]
betas = np.array([0, 0, 0, 6.756e-2, # propagation constants [ps^n/m]
-1.002e-4, 3.671e-7]) * 1e-3
lamda_c = 1051.85e-9
# Zero dispersion wavelength [nm]
# max at ls,li = 1095, 1010
WDMS_pars = ([1048., 1204.16],
[927.7, 1204.16]) # WDM up downs in wavelengths [m]
lamp_vec = [1046,1047, 1048, 1049, 1050]
lamp = [lamp_vec[wave_idx]]
lams = ['lock' for i in range(len(lamp))]
lamp = lamp_vec[wave_idx]
lams = 'lock'
var_dic = {'n2': n2, 'gama': gama, 'alphadB': alphadB, 'z': z, 'P_p': P_p,
'P_s': P_s, 'TFWHM_p': TFWHM_p, 'TFWHM_s': TFWHM_s,
'spl_losses': spl_losses, 'betas': betas,
'lamda_c': lamda_c, 'WDMS_pars': WDMS_pars,
'lamp': lamp, 'lams': lams, 'N':N, 'Df_band': Df_band}
"--------------------------------------------------------------------------"
outside_var_key = 'P_p'
inside_var_key = 'N'
inside_var = var_dic[inside_var_key]
outside_var = var_dic[outside_var_key]
del var_dic[outside_var_key]
del var_dic[inside_var_key]
"----------------------------Simulation------------------------------------"
D_ins = [{'index': i, inside_var_key: insvar}
for i, insvar in enumerate(inside_var)]
large_dic = {**stable_dic, **var_dic}
if len(inside_var) < num_cores:
num_cores = len(inside_var)
profiler_bool = arguments_determine(0)
for kk, variable in enumerate(outside_var):
|
print('\a')
return None
class Band_predict(object):
def __init__(self, Df_band, nt):
self.bands = []
self.df = Df_band / nt
self.ro = []
def calculate(self, A, Df_band, over_band):
self.bands.append(Df_band)
self.ro.append(A)
if len(bands) == 1:
return Df_band + 1
a = (self.bands[-1] - self.bands[-2]) / (self.ro[-1] - self.ro[-2])
b = self.bands[-1] - a * self.ro[-1]
for i in over_band:
try:
Df_band[i] = a * arguments_determine(-1) + b
except TypeError:
Df_band[i] = None
return Df_band
if __name__ == '__main__':
start = time()
main()
dt = time() - start
print(dt, 'sec', dt / 60, 'min', dt / 60 / 60, 'hours')
| create_file_structure(kk)
_temps = create_destroy(inside_var, str(kk))
_temps.prepare_folder()
large_dic['lams'] = lams[kk]
large_dic['master_index'] = kk
large_dic[outside_var_key] = variable
if profiler_bool:
for i in range(len(D_ins)):
formulate(**{**D_ins[i], ** large_dic})
elif method == 'mpi':
iterables = ({**D_ins[i], ** large_dic} for i in range(len(D_ins)))
with MPIPoolExecutor() as executor:
A = executor.map(formulate, iterables)
else:
A = Parallel(n_jobs=num_cores)(delayed(formulate)(**{**D_ins[i], ** large_dic}) for i in range(len(D_ins)))
_temps.cleanup_folder() | conditional_block |
oscillator.py | import numpy as np
import sys
import os
import sys
sys.path.append('src')
from scipy.constants import c, pi
from joblib import Parallel, delayed
from mpi4py.futures import MPIPoolExecutor
from mpi4py import MPI
from scipy.fftpack import fftshift, fft
import os
import time as timeit
os.system('export FONTCONFIG_PATH=/etc/fonts')
from functions import *
from time import time, sleep
import pickle
@profile
def oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P0_p1, P0_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, dAdzmm, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1, fopa):
mode_names = ['LP01a']
u = np.zeros(sim_wind.t.shape, dtype='complex128')
U = np.zeros(sim_wind.fv.shape, dtype='complex128') #
T0_p = TFWHM_p / 2 / (np.log(2))**0.5
T0_s = TFWHM_s / 2 / (np.log(2))**0.5
noise_new = noise_obj.noise_func(int_fwm)
u = noise_new
woff1 = (p_pos[1] + (int_fwm.nt) // 2) * 2 * pi * sim_wind.df[p_pos[0]]
u[p_pos[0], :] += (P0_p1)**0.5 * np.exp(1j *
(woff1) * sim_wind.t[p_pos[0]])
woff2 = -(s_pos[1] - (int_fwm.nt - 1) // 2) * \
2 * pi * sim_wind.df[s_pos[0]]
u[s_pos[0], :] += (P0_s)**0.5 * np.exp(-1j *
(woff2) * sim_wind.t[s_pos[0]])
U = fftshift(fft(u), axes=-1)
master_index = str(master_index)
max_rounds = arguments_determine(-1)
if fopa:
print('Fibre amplifier!')
max_rounds = 0
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, max_rounds, mode_names, master_index, '00', 'original pump', D_pic[0], plots) |
U_original_pump = np.copy(U)
# Pass the original pump through the WDM1, port1 is in to the loop, port2
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
u, U = WDM_vec[0].pass_through((U, noise_new))[0]
ro = -1
t_total = 0
factors_xpm, factors_fwm,gama,tsh, w_tiled = \
dAdzmm.factors_xpm, dAdzmm.factors_fwm, dAdzmm.gama, dAdzmm.tsh, dAdzmm.w_tiled
dz,dzstep,maxerr = int_fwm.dz,int_fwm.dzstep,int_fwm.maxerr
Dop = np.ascontiguousarray(Dop/2)
factors_xpm = np.ascontiguousarray(factors_xpm)
factors_fwm = np.ascontiguousarray(factors_fwm)
gama = np.ascontiguousarray(gama)
tsh = np.ascontiguousarray(tsh)
w_tiled = np.ascontiguousarray(w_tiled)
while ro < max_rounds:
ro += 1
print('round', ro)
pulse_pos_dict = [
'round ' + str(ro) + ', ' + i for i in pulse_pos_dict_or]
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '1', pulse_pos_dict[3], D_pic[5], plots)
# Phase modulate before the Fibre
U = pm_fopa.modulate(U)
u = ifft(ifftshift(U, axes=-1))
#Pulse propagation
U, dz = pulse_propagation(u,dz,dzstep,maxerr, Dop,factors_xpm, factors_fwm, gama,tsh,w_tiled)
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '2', pulse_pos_dict[0], D_pic[2], plots)
max_noise = 10*noise_new.max()
#checks if the fft's are causing boundary condtion problems
if (U[:, 0] > max_noise).any() or (U[:, -1] > max_noise).any():
with open("error_log", "a") as myfile:
myfile.write("Pump: %5f, Seed: %5f, lamp: %5f, lams: %5f \n" % (
P0_p1, P0_s, 1e-3*c/f_p, 1e-3*c/f_s))
break
# pass through WDM2 port 2 continues and port 1 is out of the loop
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(out1, out2), (u, U) = WDM_vec[1].pass_through(
(U, noise_new))
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '3', pulse_pos_dict[3], D_pic[3], plots)
# Splice7 after WDM2 for the signal
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(u, U) = splicers_vec[2].pass_through(
(U, noise_new))[0]
#Phase modulate the oscillating signal so that to be in phase with the one coming in
U = pm_WDM1.modulate(U_original_pump, U)
# Pass again through WDM1 with the signal now
(u, U) = WDM_vec[0].pass_through(
(U_original_pump, U))[0]
################################The outbound stuff#####################
ex.exporter(index, int_fwm, sim_wind, out1, out2, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '4', pulse_pos_dict[4], D_pic[6], plots)
consolidate(ro, int_fwm,master_index, index)
return ro
def calc_P_out(U, U_original_pump, fv, t):
U = np.abs(U)**2
U_original_pump = np.abs(U_original_pump)**2
freq_band = 2
fp_id = np.where(U_original_pump == np.max(U_original_pump))[0][0]
plom = fp_id + 10
fv_id = np.where(U[plom:] == np.max(U[plom:]))[0][0]
fv_id += plom - 1
start, end = fv[fv_id] - freq_band, fv[fv_id] + freq_band
i = np.where(
np.abs(fv - start) == np.min(np.abs(fv - start)))[0][0]
j = np.where(
np.abs(fv - end) == np.min(np.abs(fv - end)))[0][0]
E_out = simps(U[i:j] * (t[1] - t[0])**2, fv[i:j])
P_out = E_out / (2 * np.abs(np.min(t)))
return P_out
@unpack_args
def formulate(index, n2, gama, alphadB, z, P_p, P_s, TFWHM_p, TFWHM_s, spl_losses, betas,
lamda_c, WDMS_pars, lamp, lams, num_cores, maxerr, ss, plots,
N, nplot, master_index, filesaves, Df_band, fr, fopa):
"------------------propagation paramaters------------------"
dzstep = z / nplot # distance per step
dz_less = 1e2
int_fwm = sim_parameters(n2, 1, alphadB)
int_fwm.general_options(maxerr, ss)
int_fwm.propagation_parameters(N, z, nplot, dz_less)
lamda = lamp * 1e-9 # central wavelength of the grid[m]
"-----------------------------f-----------------------------"
"---------------------Aeff-Qmatrixes-----------------------"
M = Q_matrixes(int_fwm.nm, int_fwm.n2, lamda_c, gama)
"----------------------------------------------------------"
"---------------------Grid&window-----------------------"
P_p_bef,P_s_bef = pre_fibre_init_power(WDMS_pars[0][0], WDMS_pars[0][1], lamp, P_p, P_s)
fv, where, f_centrals = fv_creator(
lamp, lams, lamda_c, int_fwm, betas, M, P_p_bef,P_s_bef, Df_band)
print(fv[0][1] - fv[0][0])
#print(1e-3 * c / np.array(f_centrals))
p_pos, s_pos, i_pos = where
sim_wind = sim_window(fv, lamda, f_centrals, lamda_c, int_fwm)
"----------------------------------------------------------"
"---------------------Loss-in-fibres-----------------------"
slice_from_edge = (sim_wind.fv[-1] - sim_wind.fv[0]) / 100
loss = Loss(int_fwm, sim_wind, amax=0)
int_fwm.alpha = loss.atten_func_full(fv)
int_fwm.gama = np.array(
[-1j * n2 * 2 * M * pi * (1e12 * f_c) / (c) for f_c in f_centrals])
#if ss == 0:
# int_fwm.gama[:] = -1j * n2 * 2 * M * pi * (1e12 * f_centrals[3]) / (c)
int_fwm.gama[0:2] = 0
int_fwm.gama[5:] = 0
#for i in range(len(int_fwm.gama)):
# print(i, int_fwm.gama[i])
#exit()
"----------------------------------------------------------"
"--------------------Dispersion----------------------------"
Dop = dispersion_operator(betas, lamda_c, int_fwm, sim_wind)
"----------------------------------------------------------"
"---------------------Raman Factors------------------------"
ram = Raman_factors(fr)
ram.set_raman_band(sim_wind)
"----------------------------------------------------------"
"--------------------Noise---------------------------------"
noise_obj = Noise(int_fwm, sim_wind)
"----------------------------------------------------------"
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
keys = ['loading_data/green_dot_fopo/pngs/' +
str(i) + str('.png') for i in range(7)]
D_pic = [plt.imread(i) for i in keys]
"----------------Construct the integrator----------------"
non_integrand = Integrand(int_fwm.gama, sim_wind.tsh,
sim_wind.w_tiled, ss,ram, cython_tick=True,
timer=False)
"--------------------------------------------------------"
"----------------------Formulate WDMS--------------------"
if WDMS_pars == 'signal_locked':
Omega = 2 * pi * c / (lamp * 1e-9) - 2 * pi * c / (lams * 1e-9)
omegai = 2 * pi * c / (lamp * 1e-9) + Omega
lami = 1e9 * 2 * pi * c / (omegai)
WDMS_pars = ([lamp, lams], # WDM up downs in wavelengths [m]
[lami, lams],
[lami, lamp],
[lami, lams])
WDM_vec = [WDM(i[0], i[1], sim_wind.fv, c,fopa)
for i in WDMS_pars] # WDM up downs in wavelengths [m]
# Phase modulators contructors
pm_fopa = Phase_modulation_FOPA(sim_wind.fv, where)
pm_WDM1 = Phase_modulation_infase_WDM(P_s, where, WDM_vec[0])
"--------------------------------------------------------"
# for ei,i in enumerate(WDM_vec):
# i.plot(filename = str(ei))
"----------------------Formulate splicers--------------------"
splicers_vec = [Splicer(loss=i) for i in spl_losses]
"------------------------------------------------------------"
f_p, f_s = sim_wind.fv[where[0][0], where[0][1]], sim_wind.fv[where[1][0], where[1][1]]
ex = Plotter_saver(plots, filesaves, sim_wind.fv,
sim_wind.t) # construct exporter
ro = oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P_p, P_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, non_integrand, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1,fopa)
return None
def main():
"-----------------------------Stable parameters----------------------------"
# Number of computing cores for sweep
num_cores = arguments_determine(1)
# maximum tolerable error per step in integration
maxerr = 1e-13
ss = 1 # includes self steepening term
Df_band_vec = [5, 5, 10, 20]
fr = 0.18
plots = False # Do you want plots, (slow!)
filesaves = True # Do you want data dump?
complete = False
nplot = 1 # number of plots within fibre min is 2
if arguments_determine(-1) == 0:
fopa = True # If no oscillations then the WDMs are deleted to
# make the system in to a FOPA
else:
fopa = False
if 'mpi' in sys.argv:
method = 'mpi'
elif 'joblib' in sys.argv:
method = 'joblib'
else:
method = 'single'
"--------------------------------------------------------------------------"
stable_dic = {'num_cores': num_cores, 'maxerr': maxerr, 'ss': ss, 'plots': plots,
'nplot': nplot, 'filesaves': filesaves,
'fr':fr, 'fopa':fopa}
"------------------------Can be variable parameters------------------------"
n2 = 2.5e-20 # Nonlinear index [m/W]
gama = 10e-3 # Overwirtes n2 and Aeff w/m
alphadB = 0 # 0.0011667#666666666668 # loss within fibre[dB/m]
z = 18 # Length of the fibre
wave_idx = 0
power_area_idx = 0
N = np.array([i for i in range(2,13)]) # 2**N grid points
# Power list. [wavelength, power_area]
P_p_vec = [[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 4.4, 0.1), my_arange(4.5, 5, 0.05),
my_arange(5.1, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ]]
Df_band = Df_band_vec[power_area_idx]
P_p = P_p_vec[wave_idx][power_area_idx]
P_p = [6]#[4.9,4.95,5]
P_s = 0#100e-3
TFWHM_p = 0 # full with half max of pump
TFWHM_s = 0 # full with half max of signal
# loss of each type of splices [dB]
spl_losses = [0, 0, 1.4]
betas = np.array([0, 0, 0, 6.756e-2, # propagation constants [ps^n/m]
-1.002e-4, 3.671e-7]) * 1e-3
lamda_c = 1051.85e-9
# Zero dispersion wavelength [nm]
# max at ls,li = 1095, 1010
WDMS_pars = ([1048., 1204.16],
[927.7, 1204.16]) # WDM up downs in wavelengths [m]
lamp_vec = [1046,1047, 1048, 1049, 1050]
lamp = [lamp_vec[wave_idx]]
lams = ['lock' for i in range(len(lamp))]
lamp = lamp_vec[wave_idx]
lams = 'lock'
var_dic = {'n2': n2, 'gama': gama, 'alphadB': alphadB, 'z': z, 'P_p': P_p,
'P_s': P_s, 'TFWHM_p': TFWHM_p, 'TFWHM_s': TFWHM_s,
'spl_losses': spl_losses, 'betas': betas,
'lamda_c': lamda_c, 'WDMS_pars': WDMS_pars,
'lamp': lamp, 'lams': lams, 'N':N, 'Df_band': Df_band}
"--------------------------------------------------------------------------"
outside_var_key = 'P_p'
inside_var_key = 'N'
inside_var = var_dic[inside_var_key]
outside_var = var_dic[outside_var_key]
del var_dic[outside_var_key]
del var_dic[inside_var_key]
"----------------------------Simulation------------------------------------"
D_ins = [{'index': i, inside_var_key: insvar}
for i, insvar in enumerate(inside_var)]
large_dic = {**stable_dic, **var_dic}
if len(inside_var) < num_cores:
num_cores = len(inside_var)
profiler_bool = arguments_determine(0)
for kk, variable in enumerate(outside_var):
create_file_structure(kk)
_temps = create_destroy(inside_var, str(kk))
_temps.prepare_folder()
large_dic['lams'] = lams[kk]
large_dic['master_index'] = kk
large_dic[outside_var_key] = variable
if profiler_bool:
for i in range(len(D_ins)):
formulate(**{**D_ins[i], ** large_dic})
elif method == 'mpi':
iterables = ({**D_ins[i], ** large_dic} for i in range(len(D_ins)))
with MPIPoolExecutor() as executor:
A = executor.map(formulate, iterables)
else:
A = Parallel(n_jobs=num_cores)(delayed(formulate)(**{**D_ins[i], ** large_dic}) for i in range(len(D_ins)))
_temps.cleanup_folder()
print('\a')
return None
class Band_predict(object):
def __init__(self, Df_band, nt):
self.bands = []
self.df = Df_band / nt
self.ro = []
def calculate(self, A, Df_band, over_band):
self.bands.append(Df_band)
self.ro.append(A)
if len(bands) == 1:
return Df_band + 1
a = (self.bands[-1] - self.bands[-2]) / (self.ro[-1] - self.ro[-2])
b = self.bands[-1] - a * self.ro[-1]
for i in over_band:
try:
Df_band[i] = a * arguments_determine(-1) + b
except TypeError:
Df_band[i] = None
return Df_band
if __name__ == '__main__':
start = time()
main()
dt = time() - start
print(dt, 'sec', dt / 60, 'min', dt / 60 / 60, 'hours') | random_line_split | |
oscillator.py | import numpy as np
import sys
import os
import sys
sys.path.append('src')
from scipy.constants import c, pi
from joblib import Parallel, delayed
from mpi4py.futures import MPIPoolExecutor
from mpi4py import MPI
from scipy.fftpack import fftshift, fft
import os
import time as timeit
os.system('export FONTCONFIG_PATH=/etc/fonts')
from functions import *
from time import time, sleep
import pickle
@profile
def oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P0_p1, P0_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, dAdzmm, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1, fopa):
mode_names = ['LP01a']
u = np.zeros(sim_wind.t.shape, dtype='complex128')
U = np.zeros(sim_wind.fv.shape, dtype='complex128') #
T0_p = TFWHM_p / 2 / (np.log(2))**0.5
T0_s = TFWHM_s / 2 / (np.log(2))**0.5
noise_new = noise_obj.noise_func(int_fwm)
u = noise_new
woff1 = (p_pos[1] + (int_fwm.nt) // 2) * 2 * pi * sim_wind.df[p_pos[0]]
u[p_pos[0], :] += (P0_p1)**0.5 * np.exp(1j *
(woff1) * sim_wind.t[p_pos[0]])
woff2 = -(s_pos[1] - (int_fwm.nt - 1) // 2) * \
2 * pi * sim_wind.df[s_pos[0]]
u[s_pos[0], :] += (P0_s)**0.5 * np.exp(-1j *
(woff2) * sim_wind.t[s_pos[0]])
U = fftshift(fft(u), axes=-1)
master_index = str(master_index)
max_rounds = arguments_determine(-1)
if fopa:
print('Fibre amplifier!')
max_rounds = 0
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, max_rounds, mode_names, master_index, '00', 'original pump', D_pic[0], plots)
U_original_pump = np.copy(U)
# Pass the original pump through the WDM1, port1 is in to the loop, port2
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
u, U = WDM_vec[0].pass_through((U, noise_new))[0]
ro = -1
t_total = 0
factors_xpm, factors_fwm,gama,tsh, w_tiled = \
dAdzmm.factors_xpm, dAdzmm.factors_fwm, dAdzmm.gama, dAdzmm.tsh, dAdzmm.w_tiled
dz,dzstep,maxerr = int_fwm.dz,int_fwm.dzstep,int_fwm.maxerr
Dop = np.ascontiguousarray(Dop/2)
factors_xpm = np.ascontiguousarray(factors_xpm)
factors_fwm = np.ascontiguousarray(factors_fwm)
gama = np.ascontiguousarray(gama)
tsh = np.ascontiguousarray(tsh)
w_tiled = np.ascontiguousarray(w_tiled)
while ro < max_rounds:
ro += 1
print('round', ro)
pulse_pos_dict = [
'round ' + str(ro) + ', ' + i for i in pulse_pos_dict_or]
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '1', pulse_pos_dict[3], D_pic[5], plots)
# Phase modulate before the Fibre
U = pm_fopa.modulate(U)
u = ifft(ifftshift(U, axes=-1))
#Pulse propagation
U, dz = pulse_propagation(u,dz,dzstep,maxerr, Dop,factors_xpm, factors_fwm, gama,tsh,w_tiled)
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '2', pulse_pos_dict[0], D_pic[2], plots)
max_noise = 10*noise_new.max()
#checks if the fft's are causing boundary condtion problems
if (U[:, 0] > max_noise).any() or (U[:, -1] > max_noise).any():
with open("error_log", "a") as myfile:
myfile.write("Pump: %5f, Seed: %5f, lamp: %5f, lams: %5f \n" % (
P0_p1, P0_s, 1e-3*c/f_p, 1e-3*c/f_s))
break
# pass through WDM2 port 2 continues and port 1 is out of the loop
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(out1, out2), (u, U) = WDM_vec[1].pass_through(
(U, noise_new))
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '3', pulse_pos_dict[3], D_pic[3], plots)
# Splice7 after WDM2 for the signal
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(u, U) = splicers_vec[2].pass_through(
(U, noise_new))[0]
#Phase modulate the oscillating signal so that to be in phase with the one coming in
U = pm_WDM1.modulate(U_original_pump, U)
# Pass again through WDM1 with the signal now
(u, U) = WDM_vec[0].pass_through(
(U_original_pump, U))[0]
################################The outbound stuff#####################
ex.exporter(index, int_fwm, sim_wind, out1, out2, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '4', pulse_pos_dict[4], D_pic[6], plots)
consolidate(ro, int_fwm,master_index, index)
return ro
def calc_P_out(U, U_original_pump, fv, t):
U = np.abs(U)**2
U_original_pump = np.abs(U_original_pump)**2
freq_band = 2
fp_id = np.where(U_original_pump == np.max(U_original_pump))[0][0]
plom = fp_id + 10
fv_id = np.where(U[plom:] == np.max(U[plom:]))[0][0]
fv_id += plom - 1
start, end = fv[fv_id] - freq_band, fv[fv_id] + freq_band
i = np.where(
np.abs(fv - start) == np.min(np.abs(fv - start)))[0][0]
j = np.where(
np.abs(fv - end) == np.min(np.abs(fv - end)))[0][0]
E_out = simps(U[i:j] * (t[1] - t[0])**2, fv[i:j])
P_out = E_out / (2 * np.abs(np.min(t)))
return P_out
@unpack_args
def formulate(index, n2, gama, alphadB, z, P_p, P_s, TFWHM_p, TFWHM_s, spl_losses, betas,
lamda_c, WDMS_pars, lamp, lams, num_cores, maxerr, ss, plots,
N, nplot, master_index, filesaves, Df_band, fr, fopa):
"------------------propagation paramaters------------------"
dzstep = z / nplot # distance per step
dz_less = 1e2
int_fwm = sim_parameters(n2, 1, alphadB)
int_fwm.general_options(maxerr, ss)
int_fwm.propagation_parameters(N, z, nplot, dz_less)
lamda = lamp * 1e-9 # central wavelength of the grid[m]
"-----------------------------f-----------------------------"
"---------------------Aeff-Qmatrixes-----------------------"
M = Q_matrixes(int_fwm.nm, int_fwm.n2, lamda_c, gama)
"----------------------------------------------------------"
"---------------------Grid&window-----------------------"
P_p_bef,P_s_bef = pre_fibre_init_power(WDMS_pars[0][0], WDMS_pars[0][1], lamp, P_p, P_s)
fv, where, f_centrals = fv_creator(
lamp, lams, lamda_c, int_fwm, betas, M, P_p_bef,P_s_bef, Df_band)
print(fv[0][1] - fv[0][0])
#print(1e-3 * c / np.array(f_centrals))
p_pos, s_pos, i_pos = where
sim_wind = sim_window(fv, lamda, f_centrals, lamda_c, int_fwm)
"----------------------------------------------------------"
"---------------------Loss-in-fibres-----------------------"
slice_from_edge = (sim_wind.fv[-1] - sim_wind.fv[0]) / 100
loss = Loss(int_fwm, sim_wind, amax=0)
int_fwm.alpha = loss.atten_func_full(fv)
int_fwm.gama = np.array(
[-1j * n2 * 2 * M * pi * (1e12 * f_c) / (c) for f_c in f_centrals])
#if ss == 0:
# int_fwm.gama[:] = -1j * n2 * 2 * M * pi * (1e12 * f_centrals[3]) / (c)
int_fwm.gama[0:2] = 0
int_fwm.gama[5:] = 0
#for i in range(len(int_fwm.gama)):
# print(i, int_fwm.gama[i])
#exit()
"----------------------------------------------------------"
"--------------------Dispersion----------------------------"
Dop = dispersion_operator(betas, lamda_c, int_fwm, sim_wind)
"----------------------------------------------------------"
"---------------------Raman Factors------------------------"
ram = Raman_factors(fr)
ram.set_raman_band(sim_wind)
"----------------------------------------------------------"
"--------------------Noise---------------------------------"
noise_obj = Noise(int_fwm, sim_wind)
"----------------------------------------------------------"
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
keys = ['loading_data/green_dot_fopo/pngs/' +
str(i) + str('.png') for i in range(7)]
D_pic = [plt.imread(i) for i in keys]
"----------------Construct the integrator----------------"
non_integrand = Integrand(int_fwm.gama, sim_wind.tsh,
sim_wind.w_tiled, ss,ram, cython_tick=True,
timer=False)
"--------------------------------------------------------"
"----------------------Formulate WDMS--------------------"
if WDMS_pars == 'signal_locked':
Omega = 2 * pi * c / (lamp * 1e-9) - 2 * pi * c / (lams * 1e-9)
omegai = 2 * pi * c / (lamp * 1e-9) + Omega
lami = 1e9 * 2 * pi * c / (omegai)
WDMS_pars = ([lamp, lams], # WDM up downs in wavelengths [m]
[lami, lams],
[lami, lamp],
[lami, lams])
WDM_vec = [WDM(i[0], i[1], sim_wind.fv, c,fopa)
for i in WDMS_pars] # WDM up downs in wavelengths [m]
# Phase modulators contructors
pm_fopa = Phase_modulation_FOPA(sim_wind.fv, where)
pm_WDM1 = Phase_modulation_infase_WDM(P_s, where, WDM_vec[0])
"--------------------------------------------------------"
# for ei,i in enumerate(WDM_vec):
# i.plot(filename = str(ei))
"----------------------Formulate splicers--------------------"
splicers_vec = [Splicer(loss=i) for i in spl_losses]
"------------------------------------------------------------"
f_p, f_s = sim_wind.fv[where[0][0], where[0][1]], sim_wind.fv[where[1][0], where[1][1]]
ex = Plotter_saver(plots, filesaves, sim_wind.fv,
sim_wind.t) # construct exporter
ro = oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P_p, P_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, non_integrand, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1,fopa)
return None
def main():
|
class Band_predict(object):
def __init__(self, Df_band, nt):
self.bands = []
self.df = Df_band / nt
self.ro = []
def calculate(self, A, Df_band, over_band):
self.bands.append(Df_band)
self.ro.append(A)
if len(bands) == 1:
return Df_band + 1
a = (self.bands[-1] - self.bands[-2]) / (self.ro[-1] - self.ro[-2])
b = self.bands[-1] - a * self.ro[-1]
for i in over_band:
try:
Df_band[i] = a * arguments_determine(-1) + b
except TypeError:
Df_band[i] = None
return Df_band
if __name__ == '__main__':
start = time()
main()
dt = time() - start
print(dt, 'sec', dt / 60, 'min', dt / 60 / 60, 'hours')
| "-----------------------------Stable parameters----------------------------"
# Number of computing cores for sweep
num_cores = arguments_determine(1)
# maximum tolerable error per step in integration
maxerr = 1e-13
ss = 1 # includes self steepening term
Df_band_vec = [5, 5, 10, 20]
fr = 0.18
plots = False # Do you want plots, (slow!)
filesaves = True # Do you want data dump?
complete = False
nplot = 1 # number of plots within fibre min is 2
if arguments_determine(-1) == 0:
fopa = True # If no oscillations then the WDMs are deleted to
# make the system in to a FOPA
else:
fopa = False
if 'mpi' in sys.argv:
method = 'mpi'
elif 'joblib' in sys.argv:
method = 'joblib'
else:
method = 'single'
"--------------------------------------------------------------------------"
stable_dic = {'num_cores': num_cores, 'maxerr': maxerr, 'ss': ss, 'plots': plots,
'nplot': nplot, 'filesaves': filesaves,
'fr':fr, 'fopa':fopa}
"------------------------Can be variable parameters------------------------"
n2 = 2.5e-20 # Nonlinear index [m/W]
gama = 10e-3 # Overwirtes n2 and Aeff w/m
alphadB = 0 # 0.0011667#666666666668 # loss within fibre[dB/m]
z = 18 # Length of the fibre
wave_idx = 0
power_area_idx = 0
N = np.array([i for i in range(2,13)]) # 2**N grid points
# Power list. [wavelength, power_area]
P_p_vec = [[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 4.4, 0.1), my_arange(4.5, 5, 0.05),
my_arange(5.1, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ]]
Df_band = Df_band_vec[power_area_idx]
P_p = P_p_vec[wave_idx][power_area_idx]
P_p = [6]#[4.9,4.95,5]
P_s = 0#100e-3
TFWHM_p = 0 # full with half max of pump
TFWHM_s = 0 # full with half max of signal
# loss of each type of splices [dB]
spl_losses = [0, 0, 1.4]
betas = np.array([0, 0, 0, 6.756e-2, # propagation constants [ps^n/m]
-1.002e-4, 3.671e-7]) * 1e-3
lamda_c = 1051.85e-9
# Zero dispersion wavelength [nm]
# max at ls,li = 1095, 1010
WDMS_pars = ([1048., 1204.16],
[927.7, 1204.16]) # WDM up downs in wavelengths [m]
lamp_vec = [1046,1047, 1048, 1049, 1050]
lamp = [lamp_vec[wave_idx]]
lams = ['lock' for i in range(len(lamp))]
lamp = lamp_vec[wave_idx]
lams = 'lock'
var_dic = {'n2': n2, 'gama': gama, 'alphadB': alphadB, 'z': z, 'P_p': P_p,
'P_s': P_s, 'TFWHM_p': TFWHM_p, 'TFWHM_s': TFWHM_s,
'spl_losses': spl_losses, 'betas': betas,
'lamda_c': lamda_c, 'WDMS_pars': WDMS_pars,
'lamp': lamp, 'lams': lams, 'N':N, 'Df_band': Df_band}
"--------------------------------------------------------------------------"
outside_var_key = 'P_p'
inside_var_key = 'N'
inside_var = var_dic[inside_var_key]
outside_var = var_dic[outside_var_key]
del var_dic[outside_var_key]
del var_dic[inside_var_key]
"----------------------------Simulation------------------------------------"
D_ins = [{'index': i, inside_var_key: insvar}
for i, insvar in enumerate(inside_var)]
large_dic = {**stable_dic, **var_dic}
if len(inside_var) < num_cores:
num_cores = len(inside_var)
profiler_bool = arguments_determine(0)
for kk, variable in enumerate(outside_var):
create_file_structure(kk)
_temps = create_destroy(inside_var, str(kk))
_temps.prepare_folder()
large_dic['lams'] = lams[kk]
large_dic['master_index'] = kk
large_dic[outside_var_key] = variable
if profiler_bool:
for i in range(len(D_ins)):
formulate(**{**D_ins[i], ** large_dic})
elif method == 'mpi':
iterables = ({**D_ins[i], ** large_dic} for i in range(len(D_ins)))
with MPIPoolExecutor() as executor:
A = executor.map(formulate, iterables)
else:
A = Parallel(n_jobs=num_cores)(delayed(formulate)(**{**D_ins[i], ** large_dic}) for i in range(len(D_ins)))
_temps.cleanup_folder()
print('\a')
return None | identifier_body |
oscillator.py | import numpy as np
import sys
import os
import sys
sys.path.append('src')
from scipy.constants import c, pi
from joblib import Parallel, delayed
from mpi4py.futures import MPIPoolExecutor
from mpi4py import MPI
from scipy.fftpack import fftshift, fft
import os
import time as timeit
os.system('export FONTCONFIG_PATH=/etc/fonts')
from functions import *
from time import time, sleep
import pickle
@profile
def oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P0_p1, P0_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, dAdzmm, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1, fopa):
mode_names = ['LP01a']
u = np.zeros(sim_wind.t.shape, dtype='complex128')
U = np.zeros(sim_wind.fv.shape, dtype='complex128') #
T0_p = TFWHM_p / 2 / (np.log(2))**0.5
T0_s = TFWHM_s / 2 / (np.log(2))**0.5
noise_new = noise_obj.noise_func(int_fwm)
u = noise_new
woff1 = (p_pos[1] + (int_fwm.nt) // 2) * 2 * pi * sim_wind.df[p_pos[0]]
u[p_pos[0], :] += (P0_p1)**0.5 * np.exp(1j *
(woff1) * sim_wind.t[p_pos[0]])
woff2 = -(s_pos[1] - (int_fwm.nt - 1) // 2) * \
2 * pi * sim_wind.df[s_pos[0]]
u[s_pos[0], :] += (P0_s)**0.5 * np.exp(-1j *
(woff2) * sim_wind.t[s_pos[0]])
U = fftshift(fft(u), axes=-1)
master_index = str(master_index)
max_rounds = arguments_determine(-1)
if fopa:
print('Fibre amplifier!')
max_rounds = 0
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, max_rounds, mode_names, master_index, '00', 'original pump', D_pic[0], plots)
U_original_pump = np.copy(U)
# Pass the original pump through the WDM1, port1 is in to the loop, port2
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
u, U = WDM_vec[0].pass_through((U, noise_new))[0]
ro = -1
t_total = 0
factors_xpm, factors_fwm,gama,tsh, w_tiled = \
dAdzmm.factors_xpm, dAdzmm.factors_fwm, dAdzmm.gama, dAdzmm.tsh, dAdzmm.w_tiled
dz,dzstep,maxerr = int_fwm.dz,int_fwm.dzstep,int_fwm.maxerr
Dop = np.ascontiguousarray(Dop/2)
factors_xpm = np.ascontiguousarray(factors_xpm)
factors_fwm = np.ascontiguousarray(factors_fwm)
gama = np.ascontiguousarray(gama)
tsh = np.ascontiguousarray(tsh)
w_tiled = np.ascontiguousarray(w_tiled)
while ro < max_rounds:
ro += 1
print('round', ro)
pulse_pos_dict = [
'round ' + str(ro) + ', ' + i for i in pulse_pos_dict_or]
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '1', pulse_pos_dict[3], D_pic[5], plots)
# Phase modulate before the Fibre
U = pm_fopa.modulate(U)
u = ifft(ifftshift(U, axes=-1))
#Pulse propagation
U, dz = pulse_propagation(u,dz,dzstep,maxerr, Dop,factors_xpm, factors_fwm, gama,tsh,w_tiled)
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '2', pulse_pos_dict[0], D_pic[2], plots)
max_noise = 10*noise_new.max()
#checks if the fft's are causing boundary condtion problems
if (U[:, 0] > max_noise).any() or (U[:, -1] > max_noise).any():
with open("error_log", "a") as myfile:
myfile.write("Pump: %5f, Seed: %5f, lamp: %5f, lams: %5f \n" % (
P0_p1, P0_s, 1e-3*c/f_p, 1e-3*c/f_s))
break
# pass through WDM2 port 2 continues and port 1 is out of the loop
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(out1, out2), (u, U) = WDM_vec[1].pass_through(
(U, noise_new))
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '3', pulse_pos_dict[3], D_pic[3], plots)
# Splice7 after WDM2 for the signal
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(u, U) = splicers_vec[2].pass_through(
(U, noise_new))[0]
#Phase modulate the oscillating signal so that to be in phase with the one coming in
U = pm_WDM1.modulate(U_original_pump, U)
# Pass again through WDM1 with the signal now
(u, U) = WDM_vec[0].pass_through(
(U_original_pump, U))[0]
################################The outbound stuff#####################
ex.exporter(index, int_fwm, sim_wind, out1, out2, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '4', pulse_pos_dict[4], D_pic[6], plots)
consolidate(ro, int_fwm,master_index, index)
return ro
def calc_P_out(U, U_original_pump, fv, t):
U = np.abs(U)**2
U_original_pump = np.abs(U_original_pump)**2
freq_band = 2
fp_id = np.where(U_original_pump == np.max(U_original_pump))[0][0]
plom = fp_id + 10
fv_id = np.where(U[plom:] == np.max(U[plom:]))[0][0]
fv_id += plom - 1
start, end = fv[fv_id] - freq_band, fv[fv_id] + freq_band
i = np.where(
np.abs(fv - start) == np.min(np.abs(fv - start)))[0][0]
j = np.where(
np.abs(fv - end) == np.min(np.abs(fv - end)))[0][0]
E_out = simps(U[i:j] * (t[1] - t[0])**2, fv[i:j])
P_out = E_out / (2 * np.abs(np.min(t)))
return P_out
@unpack_args
def formulate(index, n2, gama, alphadB, z, P_p, P_s, TFWHM_p, TFWHM_s, spl_losses, betas,
lamda_c, WDMS_pars, lamp, lams, num_cores, maxerr, ss, plots,
N, nplot, master_index, filesaves, Df_band, fr, fopa):
"------------------propagation paramaters------------------"
dzstep = z / nplot # distance per step
dz_less = 1e2
int_fwm = sim_parameters(n2, 1, alphadB)
int_fwm.general_options(maxerr, ss)
int_fwm.propagation_parameters(N, z, nplot, dz_less)
lamda = lamp * 1e-9 # central wavelength of the grid[m]
"-----------------------------f-----------------------------"
"---------------------Aeff-Qmatrixes-----------------------"
M = Q_matrixes(int_fwm.nm, int_fwm.n2, lamda_c, gama)
"----------------------------------------------------------"
"---------------------Grid&window-----------------------"
P_p_bef,P_s_bef = pre_fibre_init_power(WDMS_pars[0][0], WDMS_pars[0][1], lamp, P_p, P_s)
fv, where, f_centrals = fv_creator(
lamp, lams, lamda_c, int_fwm, betas, M, P_p_bef,P_s_bef, Df_band)
print(fv[0][1] - fv[0][0])
#print(1e-3 * c / np.array(f_centrals))
p_pos, s_pos, i_pos = where
sim_wind = sim_window(fv, lamda, f_centrals, lamda_c, int_fwm)
"----------------------------------------------------------"
"---------------------Loss-in-fibres-----------------------"
slice_from_edge = (sim_wind.fv[-1] - sim_wind.fv[0]) / 100
loss = Loss(int_fwm, sim_wind, amax=0)
int_fwm.alpha = loss.atten_func_full(fv)
int_fwm.gama = np.array(
[-1j * n2 * 2 * M * pi * (1e12 * f_c) / (c) for f_c in f_centrals])
#if ss == 0:
# int_fwm.gama[:] = -1j * n2 * 2 * M * pi * (1e12 * f_centrals[3]) / (c)
int_fwm.gama[0:2] = 0
int_fwm.gama[5:] = 0
#for i in range(len(int_fwm.gama)):
# print(i, int_fwm.gama[i])
#exit()
"----------------------------------------------------------"
"--------------------Dispersion----------------------------"
Dop = dispersion_operator(betas, lamda_c, int_fwm, sim_wind)
"----------------------------------------------------------"
"---------------------Raman Factors------------------------"
ram = Raman_factors(fr)
ram.set_raman_band(sim_wind)
"----------------------------------------------------------"
"--------------------Noise---------------------------------"
noise_obj = Noise(int_fwm, sim_wind)
"----------------------------------------------------------"
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
keys = ['loading_data/green_dot_fopo/pngs/' +
str(i) + str('.png') for i in range(7)]
D_pic = [plt.imread(i) for i in keys]
"----------------Construct the integrator----------------"
non_integrand = Integrand(int_fwm.gama, sim_wind.tsh,
sim_wind.w_tiled, ss,ram, cython_tick=True,
timer=False)
"--------------------------------------------------------"
"----------------------Formulate WDMS--------------------"
if WDMS_pars == 'signal_locked':
Omega = 2 * pi * c / (lamp * 1e-9) - 2 * pi * c / (lams * 1e-9)
omegai = 2 * pi * c / (lamp * 1e-9) + Omega
lami = 1e9 * 2 * pi * c / (omegai)
WDMS_pars = ([lamp, lams], # WDM up downs in wavelengths [m]
[lami, lams],
[lami, lamp],
[lami, lams])
WDM_vec = [WDM(i[0], i[1], sim_wind.fv, c,fopa)
for i in WDMS_pars] # WDM up downs in wavelengths [m]
# Phase modulators contructors
pm_fopa = Phase_modulation_FOPA(sim_wind.fv, where)
pm_WDM1 = Phase_modulation_infase_WDM(P_s, where, WDM_vec[0])
"--------------------------------------------------------"
# for ei,i in enumerate(WDM_vec):
# i.plot(filename = str(ei))
"----------------------Formulate splicers--------------------"
splicers_vec = [Splicer(loss=i) for i in spl_losses]
"------------------------------------------------------------"
f_p, f_s = sim_wind.fv[where[0][0], where[0][1]], sim_wind.fv[where[1][0], where[1][1]]
ex = Plotter_saver(plots, filesaves, sim_wind.fv,
sim_wind.t) # construct exporter
ro = oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P_p, P_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, non_integrand, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1,fopa)
return None
def main():
"-----------------------------Stable parameters----------------------------"
# Number of computing cores for sweep
num_cores = arguments_determine(1)
# maximum tolerable error per step in integration
maxerr = 1e-13
ss = 1 # includes self steepening term
Df_band_vec = [5, 5, 10, 20]
fr = 0.18
plots = False # Do you want plots, (slow!)
filesaves = True # Do you want data dump?
complete = False
nplot = 1 # number of plots within fibre min is 2
if arguments_determine(-1) == 0:
fopa = True # If no oscillations then the WDMs are deleted to
# make the system in to a FOPA
else:
fopa = False
if 'mpi' in sys.argv:
method = 'mpi'
elif 'joblib' in sys.argv:
method = 'joblib'
else:
method = 'single'
"--------------------------------------------------------------------------"
stable_dic = {'num_cores': num_cores, 'maxerr': maxerr, 'ss': ss, 'plots': plots,
'nplot': nplot, 'filesaves': filesaves,
'fr':fr, 'fopa':fopa}
"------------------------Can be variable parameters------------------------"
n2 = 2.5e-20 # Nonlinear index [m/W]
gama = 10e-3 # Overwirtes n2 and Aeff w/m
alphadB = 0 # 0.0011667#666666666668 # loss within fibre[dB/m]
z = 18 # Length of the fibre
wave_idx = 0
power_area_idx = 0
N = np.array([i for i in range(2,13)]) # 2**N grid points
# Power list. [wavelength, power_area]
P_p_vec = [[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 4.4, 0.1), my_arange(4.5, 5, 0.05),
my_arange(5.1, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ]]
Df_band = Df_band_vec[power_area_idx]
P_p = P_p_vec[wave_idx][power_area_idx]
P_p = [6]#[4.9,4.95,5]
P_s = 0#100e-3
TFWHM_p = 0 # full with half max of pump
TFWHM_s = 0 # full with half max of signal
# loss of each type of splices [dB]
spl_losses = [0, 0, 1.4]
betas = np.array([0, 0, 0, 6.756e-2, # propagation constants [ps^n/m]
-1.002e-4, 3.671e-7]) * 1e-3
lamda_c = 1051.85e-9
# Zero dispersion wavelength [nm]
# max at ls,li = 1095, 1010
WDMS_pars = ([1048., 1204.16],
[927.7, 1204.16]) # WDM up downs in wavelengths [m]
lamp_vec = [1046,1047, 1048, 1049, 1050]
lamp = [lamp_vec[wave_idx]]
lams = ['lock' for i in range(len(lamp))]
lamp = lamp_vec[wave_idx]
lams = 'lock'
var_dic = {'n2': n2, 'gama': gama, 'alphadB': alphadB, 'z': z, 'P_p': P_p,
'P_s': P_s, 'TFWHM_p': TFWHM_p, 'TFWHM_s': TFWHM_s,
'spl_losses': spl_losses, 'betas': betas,
'lamda_c': lamda_c, 'WDMS_pars': WDMS_pars,
'lamp': lamp, 'lams': lams, 'N':N, 'Df_band': Df_band}
"--------------------------------------------------------------------------"
outside_var_key = 'P_p'
inside_var_key = 'N'
inside_var = var_dic[inside_var_key]
outside_var = var_dic[outside_var_key]
del var_dic[outside_var_key]
del var_dic[inside_var_key]
"----------------------------Simulation------------------------------------"
D_ins = [{'index': i, inside_var_key: insvar}
for i, insvar in enumerate(inside_var)]
large_dic = {**stable_dic, **var_dic}
if len(inside_var) < num_cores:
num_cores = len(inside_var)
profiler_bool = arguments_determine(0)
for kk, variable in enumerate(outside_var):
create_file_structure(kk)
_temps = create_destroy(inside_var, str(kk))
_temps.prepare_folder()
large_dic['lams'] = lams[kk]
large_dic['master_index'] = kk
large_dic[outside_var_key] = variable
if profiler_bool:
for i in range(len(D_ins)):
formulate(**{**D_ins[i], ** large_dic})
elif method == 'mpi':
iterables = ({**D_ins[i], ** large_dic} for i in range(len(D_ins)))
with MPIPoolExecutor() as executor:
A = executor.map(formulate, iterables)
else:
A = Parallel(n_jobs=num_cores)(delayed(formulate)(**{**D_ins[i], ** large_dic}) for i in range(len(D_ins)))
_temps.cleanup_folder()
print('\a')
return None
class Band_predict(object):
def | (self, Df_band, nt):
self.bands = []
self.df = Df_band / nt
self.ro = []
def calculate(self, A, Df_band, over_band):
self.bands.append(Df_band)
self.ro.append(A)
if len(bands) == 1:
return Df_band + 1
a = (self.bands[-1] - self.bands[-2]) / (self.ro[-1] - self.ro[-2])
b = self.bands[-1] - a * self.ro[-1]
for i in over_band:
try:
Df_band[i] = a * arguments_determine(-1) + b
except TypeError:
Df_band[i] = None
return Df_band
if __name__ == '__main__':
start = time()
main()
dt = time() - start
print(dt, 'sec', dt / 60, 'min', dt / 60 / 60, 'hours')
| __init__ | identifier_name |
server.go | // Copyright (C) 2018 by Alberto Bregliano <alberto.bregliano@pm.me>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//Package server provides the ability to forward Nagios Notifications
//to an Asterisk phone sytem.
package server
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"time"
"github.com/spf13/viper"
"github.com/axamon/sauron2/reperibili"
"github.com/gorilla/mux"
)
//Notifica sono le info che si ricevono dai nagios che vengono
//elaborate per creare le chiamate automatiche
type Notifica struct {
//Time time.Time `json:"timestamp,omitempty"`
Hostname string `json:"hostname,omitempty"`
Service string `json:"servizio,omitempty"`
Piattaforma string `json:"piattaforma,omitempty"`
Reperibile string `json:"reperibile,omitempty"`
Cellulare string `json:"cellulare,omitempty"`
Messaggio string `json:"messaggio,omitempty"`
}
//Dettagli non usato al momento ma servirà a gestire le
//risposte del centralino virtuale asterisk
type Dettagli struct {
Info string `json:"info,omitempty"`
State string `json:"state,omitempty"`
}
var people []Notifica
func respondWithError(w http.ResponseWriter, code int, message string) {
respondWithJSON(w, code, map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
//GetReper recupera il reperibile attuale per la piattaforma
//passata come argomento
func GetReper(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
vars := mux.Vars(r)
piattaforma := vars["piatta"]
reperibile, err := reperibili.GetReperibile(piattaforma)
if err != nil {
respondWithError(w, http.StatusNoContent, err.Error())
return
}
result := fmt.Sprintf("Il reperibile per %s è: %s. Cell: %s", piattaforma, reperibile.Cognome, reperibile.Cellulare)
respondWithJSON(w, http.StatusFound, result)
return
}
//SetReper inserisce reperibilità in un archivio condiviso
func SetReper(w http.ResponseWriter, r *http.Request) {
/* var p reperibili.Contatto
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close() */
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
r.ParseForm()
nome := r.PostFormValue("nome")
cognome := r.PostFormValue("cognome")
cellulare := r.PostFormValue("cellulare")
piattaforma := r.PostFormValue("piattaforma")
oggi := time.Now().Format("20060102")
err := reperibili.AddRuota(nome, cognome, cellulare, piattaforma, oggi, "gruppo6")
if err != nil {
fmt.Println("errorone", err.Error(), cellulare)
return
}
fmt.Println("inserito reperibile: ", nome, cognome, cellulare)
/* err := reperibili.AddRuota(p.Nome, p.Cognome, p.Cellulare, "CDN", "20180101", "gruppo6")
if err != nil {
fmt.Println("errorone")
}
*/
/* fmt.Println(p)
respondWithJSON(w, http.StatusCreated, p) */
return
}
//Callfile sends the file gerated for asterisk
func Callfile(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "/tmp/exampleTest.call")
return
}
//LogNotifica archivia la notifica in un Database
func LogNotifica(n Notifica) (err error) {
//recupera il timestamp di adesso
timestamp := time.Now()
//apre il database
database, _ := sql.Open("sqlite3", "./sarumann.db")
//chiudi Database una volta fatto
defer database.Close()
//prepara la creazione della tabella notifiche se non esite
statement, _ := database.Prepare("CREATE TABLE IF NOT EXISTS notifiche (id INTEGER PRIMARY KEY, server TEXT, servizio TEXT, piattaforma TEXT, reperibile TEXT, cellulare TEXT, messaggio TEXT, timestamp INT)")
//esegue la creazione della tabella notifiche se non esiste già nel database
statement.Exec()
//prepara l'inserimenti della notifica
statement, err = database.Prepare("INSERT INTO notifiche(server, servizio, piattaforma, reperibile, cellulare, messaggio, timestamp) VALUES(?,?,?,?,?,?,?)")
if err != nil {
fmt.Println(err.Error())
}
//esegue l'inserimento della notifica passata come argomento della funzione
_, err = statement.Exec(n.Hostname, n.Service, n.Piattaforma, n.Reperibile, n.Cellulare, n.Messaggio, timestamp.Unix())
if err != nil {
fmt.Println(err.Error())
}
return
}
//AntiStorm evita che il reperibile riceva troppe chiamate
func AntiStorm(piattaforma string) (err error) {
database, _ := sql.Open("sqlite3", "./sarumann.db")
defer database.Close()
row := database.QueryRow("SELECT timestamp FROM notifiche where piattaforma = ? order by timestamp desc limit 1", piattaforma)
var last string
row.Scan(&last)
fmt.Println(last) //debug
lastint, err := strconv.Atoi(last)
if err != nil {
fmt.Println("errore")
}
oraepoch := time.Now().Unix()
fmt.Println(oraepoch) //debug
//Se non sono passati tot secondi dall'ultima notifica allora esce
tot := 1800 ///1800 secondi uguale mezz'ora
if lastint+(tot) > int(oraepoch) {
err = fmt.Errorf("Troppe chiamate al reperibile per %s, è permessa una sola chiamata ogni %d secondi", piattaforma, tot)
return err
}
return nil
}
//CreateNotificaNoVoiceCall riceve gli alerts dei nagios
func Creat | tp.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
//Invia cmq la chiamata se è per la piattaforma CDN
if piattaforma == "CDN" {
Cellpertest := viper.GetString("Cellpertest")
if len(Cellpertest) != 0 {
reperibile = Cellpertest
log.Println("Impostato reperibile di test", reperibile)
}
orariofobstr := viper.GetString("OrarioFob")
orariofob, err := strconv.Atoi(orariofobstr)
if err != nil {
log.Println(err.Error())
}
log.Println("L'orario impostato per inizio FOB è", orariofob)
//Se siamo in fuori orario base
if fob := isfob(time.Now(), orariofob); fob == true {
fmt.Println("Siamo in FOB. Notifiche vocali attive!")
//Verifica che sia passato abbastanza tempo dall'ultima chiamata prima di chiamare nuovamente
errstorm := AntiStorm(p.Piattaforma)
if errstorm != nil {
log.Println(errstorm)
return
}
//Logga sul db la notifica in entrata
err := LogNotifica(p)
if err != nil {
log.Println(err.Error())
}
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
}
}
return
}
//CreateNotifica riceve gli alerts dei nagios e li utilizza per
//allertare telefonicamente il reperibile in turno
func CreateNotifica(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
return
}
func isfob(ora time.Time, foborainizio int) (ok bool) {
//ora := time.Now()
giorno := ora.Weekday()
//Partiamo che non siamo in FOB
ok = false
switch giorno {
//Se è sabato siamo in fob
case time.Saturday:
//fmt.Println("E' sabato")
ok = true
//Se è domenica siamo in fob
case time.Sunday:
//fmt.Println("E' Domenica")
ok = true
//Se invece è un giorno feriale dobbiamo vedere l'orario
default:
//se è dopo le 18 siamo in fob
//Si avviso il reperibile mezz'ora prima se è un problema si può cambiare
//Recupero l'ora del FOB dal file di configurazione
if ora.Hour() >= foborainizio {
//fmt.Println("Giorno feriale", viper.GetInt("foborainizio"))
ok = true
return ok
}
//se è prima delle 7 allora siamo in fob
if ora.Hour() < 7 {
ok = true
}
}
//Ritorna ok che sarà true o false a seconda se siamo in FOB o no
return ok
}
//CreateCall crea il file .call che serve ad Asterisk per contattare il reperibile
func CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio string) (err error) {
//Trasforma il campo passato in una stringa di 10 numeri
cell, err := verificaCell(reperibile)
if err != nil {
log.Printf("Cellulare non gestibile: %s\n", err.Error())
return
}
scheletro :=
`Channel: SIP/999` + cell + `@10.31.18.26
MaxRetries: 5
RetryTime: 300
WaitTime: 60
Context: nagios-notify
Extension: s
Archive: Yes
Set: CONTACT_NAME="Gringo"
Set: PLAT_NAME="` + piattaforma + `"
Set: NOT_TYPE="PROBLEM"
Set: HOST_ALIAS="` + hostname + `"
Set: SERVICE_NAME="` + service + `"
Set: STATUS="Critico"
Set: NOT_HEAD_MSG="è stato riscontrato un problema"
Set: SRV_MSG="sul server ` + hostname + ` il servizio ` + service + ` è in critical ` + messaggio + `"`
//dove salavare i file in maniera che asterisk li possa scaricare
//nel nostro caso equivale a dove nginx tiene i contenuti statici del webserver
//le informazioni sono nel file nascosto .sarumann.yaml che l'utente deve avere
//nella propria $HOME
//path := viper.GetString("CallPath")
//file, err := os.Create(path + "exampleTest.call") // Truncates if file already exists, be careful!
file, err := os.Create("/tmp/exampleTest.call")
if err != nil {
log.Fatalf("failed creating file: %s", err)
}
defer file.Close() // Make sure to close the file when you're done
_, err = file.WriteString(scheletro)
if err != nil {
log.Fatalf("failed writing to file: %s", err)
}
//fmt.Printf("\nLength: %d bytes", len)
fmt.Printf("\nFile Name: %s\n", file.Name())
return
}
//verificaCell verifica che il cell sia una stringa di 10 cifre
func verificaCell(value string) (cell string, err error) {
//se value ha meno di 10 cifre non è buono
if len(value) < 10 {
err := fmt.Errorf("Cellulare con poche cifre: %v", len(value))
log.Println(err.Error())
return "", err
}
//cell10cifre prende gli ultimi 10 caratteri del value
cell10cifre := string(value[len(value)-10:])
//test verifica che il valore sia composto da esattamente 10 cifre
test := regexp.MustCompile(`^[0-9]{10}$`)
switch {
case test.MatchString(cell10cifre) == true:
cell = cell10cifre
default:
cell = ""
err = fmt.Errorf("Il cellulare non è corretto")
log.Println(err.Error())
return "", err
}
return
}
| eNotificaNoVoiceCall(w ht | identifier_name |
server.go | // Copyright (C) 2018 by Alberto Bregliano <alberto.bregliano@pm.me>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//Package server provides the ability to forward Nagios Notifications
//to an Asterisk phone sytem.
package server
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"time"
"github.com/spf13/viper"
"github.com/axamon/sauron2/reperibili"
"github.com/gorilla/mux"
)
//Notifica sono le info che si ricevono dai nagios che vengono
//elaborate per creare le chiamate automatiche
type Notifica struct {
//Time time.Time `json:"timestamp,omitempty"`
Hostname string `json:"hostname,omitempty"`
Service string `json:"servizio,omitempty"`
Piattaforma string `json:"piattaforma,omitempty"`
Reperibile string `json:"reperibile,omitempty"`
Cellulare string `json:"cellulare,omitempty"`
Messaggio string `json:"messaggio,omitempty"`
}
//Dettagli non usato al momento ma servirà a gestire le
//risposte del centralino virtuale asterisk
type Dettagli struct {
Info string `json:"info,omitempty"`
State string `json:"state,omitempty"`
}
var people []Notifica
func respondWithError(w http.ResponseWriter, code int, message string) {
respondWithJSON(w, code, map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
//GetReper recupera il reperibile attuale per la piattaforma
//passata come argomento
func GetReper(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
vars := mux.Vars(r)
piattaforma := vars["piatta"]
reperibile, err := reperibili.GetReperibile(piattaforma)
if err != nil {
respondWithError(w, http.StatusNoContent, err.Error())
return
}
result := fmt.Sprintf("Il reperibile per %s è: %s. Cell: %s", piattaforma, reperibile.Cognome, reperibile.Cellulare)
respondWithJSON(w, http.StatusFound, result)
return
}
//SetReper inserisce reperibilità in un archivio condiviso
func SetReper(w http.ResponseWriter, r *http.Request) {
/* var p reperibili.Contatto
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close() */
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
r.ParseForm()
nome := r.PostFormValue("nome")
cognome := r.PostFormValue("cognome")
cellulare := r.PostFormValue("cellulare")
piattaforma := r.PostFormValue("piattaforma")
oggi := time.Now().Format("20060102")
err := reperibili.AddRuota(nome, cognome, cellulare, piattaforma, oggi, "gruppo6")
if err != nil {
fmt.Println("errorone", err.Error(), cellulare)
return
}
fmt.Println("inserito reperibile: ", nome, cognome, cellulare)
/* err := reperibili.AddRuota(p.Nome, p.Cognome, p.Cellulare, "CDN", "20180101", "gruppo6")
if err != nil {
fmt.Println("errorone")
}
*/
/* fmt.Println(p)
respondWithJSON(w, http.StatusCreated, p) */
return
}
//Callfile sends the file gerated for asterisk
func Callfile(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "/tmp/exampleTest.call")
return
}
//LogNotifica archivia la notifica in un Database
func LogNotifica(n Notifica) (err error) {
//recupera il timestamp di adesso
timestamp := time.Now()
//apre il database
database, _ := sql.Open("sqlite3", "./sarumann.db")
//chiudi Database una volta fatto
defer database.Close()
//prepara la creazione della tabella notifiche se non esite
statement, _ := database.Prepare("CREATE TABLE IF NOT EXISTS notifiche (id INTEGER PRIMARY KEY, server TEXT, servizio TEXT, piattaforma TEXT, reperibile TEXT, cellulare TEXT, messaggio TEXT, timestamp INT)")
//esegue la creazione della tabella notifiche se non esiste già nel database
statement.Exec()
//prepara l'inserimenti della notifica
statement, err = database.Prepare("INSERT INTO notifiche(server, servizio, piattaforma, reperibile, cellulare, messaggio, timestamp) VALUES(?,?,?,?,?,?,?)")
if err != nil {
fmt.Println(err.Error())
}
//esegue l'inserimento della notifica passata come argomento della funzione
_, err = statement.Exec(n.Hostname, n.Service, n.Piattaforma, n.Reperibile, n.Cellulare, n.Messaggio, timestamp.Unix())
if err != nil {
fmt.Println(err.Error())
}
return
}
//AntiStorm evita che il reperibile riceva troppe chiamate
func AntiStorm(piattaforma string) (err error) {
database, _ := sql.Open("sqlite3", "./sarumann.db")
defer database.Close()
row := database.QueryRow("SELECT timestamp FROM notifiche where piattaforma = ? order by timestamp desc limit 1", piattaforma)
var last string
row.Scan(&last)
fmt.Println(last) //debug
lastint, err := strconv.Atoi(last)
if err != nil {
fmt.Println("errore")
}
oraepoch := time.Now().Unix()
fmt.Println(oraepoch) //debug
//Se non sono passati tot secondi dall'ultima notifica allora esce
tot := 1800 ///1800 secondi uguale mezz'ora
if lastint+(tot) > int(oraepoch) {
err = fmt.Errorf("Troppe chiamate al reperibile per %s, è permessa una sola chiamata ogni %d secondi", piattaforma, tot)
return err
}
return nil
}
//CreateNotificaNoVoiceCall riceve gli alerts dei nagios
func CreateNotificaNoVoiceCall(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
//Invia cmq la chiamata se è per la piattaforma CDN
if piattaforma == "CDN" {
Cellpertest := viper.GetString("Cellpertest")
if len(Cellpertest) != 0 {
reperibile = Cellpertest
log.Println("Impostato reperibile di test", reperibile)
}
orariofobstr := viper.GetString("OrarioFob")
orariofob, err := strconv.Atoi(orariofobstr)
if err != nil {
log.Println(err.Error())
}
log.Println("L'orario impostato per inizio FOB è", orariofob)
//Se siamo in fuori orario base
if fob := isfob(time.Now(), orariofob); fob == true {
fmt.Println("Siamo in FOB. Notifiche vocali attive!")
//Verifica che sia passato abbastanza tempo dall'ultima chiamata prima di chiamare nuovamente
errstorm := AntiStorm(p.Piattaforma)
if errstorm != nil {
log.Println(errstorm)
return
}
//Logga sul db la notifica in entrata
err := LogNotifica(p)
if err != nil {
log.Println(err.Error())
}
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
}
}
return
}
//CreateNotifica riceve gli alerts dei nagios e li utilizza per
//allertare telefonicamente il reperibile in turno
func CreateNotifica(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
| cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
return
}
func isfob(ora time.Time, foborainizio int) (ok bool) {
//ora := time.Now()
giorno := ora.Weekday()
//Partiamo che non siamo in FOB
ok = false
switch giorno {
//Se è sabato siamo in fob
case time.Saturday:
//fmt.Println("E' sabato")
ok = true
//Se è domenica siamo in fob
case time.Sunday:
//fmt.Println("E' Domenica")
ok = true
//Se invece è un giorno feriale dobbiamo vedere l'orario
default:
//se è dopo le 18 siamo in fob
//Si avviso il reperibile mezz'ora prima se è un problema si può cambiare
//Recupero l'ora del FOB dal file di configurazione
if ora.Hour() >= foborainizio {
//fmt.Println("Giorno feriale", viper.GetInt("foborainizio"))
ok = true
return ok
}
//se è prima delle 7 allora siamo in fob
if ora.Hour() < 7 {
ok = true
}
}
//Ritorna ok che sarà true o false a seconda se siamo in FOB o no
return ok
}
//CreateCall crea il file .call che serve ad Asterisk per contattare il reperibile
func CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio string) (err error) {
//Trasforma il campo passato in una stringa di 10 numeri
cell, err := verificaCell(reperibile)
if err != nil {
log.Printf("Cellulare non gestibile: %s\n", err.Error())
return
}
scheletro :=
`Channel: SIP/999` + cell + `@10.31.18.26
MaxRetries: 5
RetryTime: 300
WaitTime: 60
Context: nagios-notify
Extension: s
Archive: Yes
Set: CONTACT_NAME="Gringo"
Set: PLAT_NAME="` + piattaforma + `"
Set: NOT_TYPE="PROBLEM"
Set: HOST_ALIAS="` + hostname + `"
Set: SERVICE_NAME="` + service + `"
Set: STATUS="Critico"
Set: NOT_HEAD_MSG="è stato riscontrato un problema"
Set: SRV_MSG="sul server ` + hostname + ` il servizio ` + service + ` è in critical ` + messaggio + `"`
//dove salavare i file in maniera che asterisk li possa scaricare
//nel nostro caso equivale a dove nginx tiene i contenuti statici del webserver
//le informazioni sono nel file nascosto .sarumann.yaml che l'utente deve avere
//nella propria $HOME
//path := viper.GetString("CallPath")
//file, err := os.Create(path + "exampleTest.call") // Truncates if file already exists, be careful!
file, err := os.Create("/tmp/exampleTest.call")
if err != nil {
log.Fatalf("failed creating file: %s", err)
}
defer file.Close() // Make sure to close the file when you're done
_, err = file.WriteString(scheletro)
if err != nil {
log.Fatalf("failed writing to file: %s", err)
}
//fmt.Printf("\nLength: %d bytes", len)
fmt.Printf("\nFile Name: %s\n", file.Name())
return
}
//verificaCell verifica che il cell sia una stringa di 10 cifre
func verificaCell(value string) (cell string, err error) {
//se value ha meno di 10 cifre non è buono
if len(value) < 10 {
err := fmt.Errorf("Cellulare con poche cifre: %v", len(value))
log.Println(err.Error())
return "", err
}
//cell10cifre prende gli ultimi 10 caratteri del value
cell10cifre := string(value[len(value)-10:])
//test verifica che il valore sia composto da esattamente 10 cifre
test := regexp.MustCompile(`^[0-9]{10}$`)
switch {
case test.MatchString(cell10cifre) == true:
cell = cell10cifre
default:
cell = ""
err = fmt.Errorf("Il cellulare non è corretto")
log.Println(err.Error())
return "", err
}
return
} | random_line_split | |
server.go | // Copyright (C) 2018 by Alberto Bregliano <alberto.bregliano@pm.me>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//Package server provides the ability to forward Nagios Notifications
//to an Asterisk phone sytem.
package server
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"time"
"github.com/spf13/viper"
"github.com/axamon/sauron2/reperibili"
"github.com/gorilla/mux"
)
//Notifica sono le info che si ricevono dai nagios che vengono
//elaborate per creare le chiamate automatiche
type Notifica struct {
//Time time.Time `json:"timestamp,omitempty"`
Hostname string `json:"hostname,omitempty"`
Service string `json:"servizio,omitempty"`
Piattaforma string `json:"piattaforma,omitempty"`
Reperibile string `json:"reperibile,omitempty"`
Cellulare string `json:"cellulare,omitempty"`
Messaggio string `json:"messaggio,omitempty"`
}
//Dettagli non usato al momento ma servirà a gestire le
//risposte del centralino virtuale asterisk
type Dettagli struct {
Info string `json:"info,omitempty"`
State string `json:"state,omitempty"`
}
var people []Notifica
func respondWithError(w http.ResponseWriter, code int, message string) {
respondWithJSON(w, code, map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
//GetReper recupera il reperibile attuale per la piattaforma
//passata come argomento
func GetReper(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
vars := mux.Vars(r)
piattaforma := vars["piatta"]
reperibile, err := reperibili.GetReperibile(piattaforma)
if err != nil {
respondWithError(w, http.StatusNoContent, err.Error())
return
}
result := fmt.Sprintf("Il reperibile per %s è: %s. Cell: %s", piattaforma, reperibile.Cognome, reperibile.Cellulare)
respondWithJSON(w, http.StatusFound, result)
return
}
//SetReper inserisce reperibilità in un archivio condiviso
func SetReper(w http.ResponseWriter, r *http.Request) {
/* var p reperibili.Contatto
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close() */
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
r.ParseForm()
nome := r.PostFormValue("nome")
cognome := r.PostFormValue("cognome")
cellulare := r.PostFormValue("cellulare")
piattaforma := r.PostFormValue("piattaforma")
oggi := time.Now().Format("20060102")
err := reperibili.AddRuota(nome, cognome, cellulare, piattaforma, oggi, "gruppo6")
if err != nil {
fmt.Println("errorone", err.Error(), cellulare)
return
}
fmt.Println("inserito reperibile: ", nome, cognome, cellulare)
/* err := reperibili.AddRuota(p.Nome, p.Cognome, p.Cellulare, "CDN", "20180101", "gruppo6")
if err != nil {
fmt.Println("errorone")
}
*/
/* fmt.Println(p)
respondWithJSON(w, http.StatusCreated, p) */
return
}
//Callfile sends the file gerated for asterisk
func Callfile(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "/tmp/exampleTest.call")
return
}
//LogNotifica archivia la notifica in un Database
func LogNotifica(n Notifica) (err error) {
//recupera il timestamp di adesso
timestamp := time.Now()
//apre il database
database, _ := sql.Open("sqlite3", "./sarumann.db")
//chiudi Database una volta fatto
defer database.Close()
//prepara la creazione della tabella notifiche se non esite
statement, _ := database.Prepare("CREATE TABLE IF NOT EXISTS notifiche (id INTEGER PRIMARY KEY, server TEXT, servizio TEXT, piattaforma TEXT, reperibile TEXT, cellulare TEXT, messaggio TEXT, timestamp INT)")
//esegue la creazione della tabella notifiche se non esiste già nel database
statement.Exec()
//prepara l'inserimenti della notifica
statement, err = database.Prepare("INSERT INTO notifiche(server, servizio, piattaforma, reperibile, cellulare, messaggio, timestamp) VALUES(?,?,?,?,?,?,?)")
if err != nil {
fmt.Println(err.Error())
}
//esegue l'inserimento della notifica passata come argomento della funzione
_, err = statement.Exec(n.Hostname, n.Service, n.Piattaforma, n.Reperibile, n.Cellulare, n.Messaggio, timestamp.Unix())
if err != nil {
fmt.Println(err.Error())
}
return
}
//AntiStorm evita che il reperibile riceva troppe chiamate
func AntiStorm(piattaforma string) (err error) {
database, _ := sql.Open("sqlite3", "./sarumann.db")
defer database.Close()
row := database.QueryRow("SELECT timestamp FROM notifiche where piattaforma = ? order by timestamp desc limit 1", piattaforma)
var last string
row.Scan(&last)
fmt.Println(last) //debug
lastint, err := strconv.Atoi(last)
if err != nil {
fmt.Println("errore")
}
oraepoch := time.Now().Unix()
fmt.Println(oraepoch) //debug
//Se non sono passati tot secondi dall'ultima notifica allora esce
tot := 1800 ///1800 secondi uguale mezz'ora
if lastint+(tot) > int(oraepoch) {
err = fmt.Errorf("Troppe chiamate al reperibile per %s, è permessa una sola chiamata ogni %d secondi", piattaforma, tot)
return err
}
return nil
}
//CreateNotificaNoVoiceCall riceve gli alerts dei nagios
func CreateNotificaNoVoiceCall(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
//Invia cmq la chiamata se è per la piattaforma CDN
if piattaforma == "CDN" {
Cellpertest := viper.GetString("Cellpertest")
if len(Cellpertest) != 0 {
reperibile = Cellpertest
log.Println("Impostato reperibile di test", reperibile)
}
orariofobstr := viper.GetString("OrarioFob")
orariofob, err := strconv.Atoi(orariofobstr)
if err != nil {
log.Println(err.Error())
}
log.Println("L'orario impostato per inizio FOB è", orariofob)
//Se siamo in fuori orario base
if fob := isfob(time.Now(), orariofob); fob == true {
fmt.Println("Siamo in FOB. Notifiche vocali attive!")
//Verifica che sia passato abbastanza tempo dall'ultima chiamata prima di chiamare nuovamente
errstorm := AntiStorm(p.Piattaforma)
if errstorm != nil {
log.Println(errstorm)
return
}
//Logga sul db la notifica in entrata
err := LogNotifica(p)
if err != nil {
log.Println(err.Error())
}
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
}
}
return
}
//CreateNotifica riceve gli alerts dei nagios e li utilizza per
//allertare telefonicamente il reperibile in turno
func CreateNotifica(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
return
}
func isfob(ora time.Time, foborainizio int) (ok bool) {
//or | crea il file .call che serve ad Asterisk per contattare il reperibile
func CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio string) (err error) {
//Trasforma il campo passato in una stringa di 10 numeri
cell, err := verificaCell(reperibile)
if err != nil {
log.Printf("Cellulare non gestibile: %s\n", err.Error())
return
}
scheletro :=
`Channel: SIP/999` + cell + `@10.31.18.26
MaxRetries: 5
RetryTime: 300
WaitTime: 60
Context: nagios-notify
Extension: s
Archive: Yes
Set: CONTACT_NAME="Gringo"
Set: PLAT_NAME="` + piattaforma + `"
Set: NOT_TYPE="PROBLEM"
Set: HOST_ALIAS="` + hostname + `"
Set: SERVICE_NAME="` + service + `"
Set: STATUS="Critico"
Set: NOT_HEAD_MSG="è stato riscontrato un problema"
Set: SRV_MSG="sul server ` + hostname + ` il servizio ` + service + ` è in critical ` + messaggio + `"`
//dove salavare i file in maniera che asterisk li possa scaricare
//nel nostro caso equivale a dove nginx tiene i contenuti statici del webserver
//le informazioni sono nel file nascosto .sarumann.yaml che l'utente deve avere
//nella propria $HOME
//path := viper.GetString("CallPath")
//file, err := os.Create(path + "exampleTest.call") // Truncates if file already exists, be careful!
file, err := os.Create("/tmp/exampleTest.call")
if err != nil {
log.Fatalf("failed creating file: %s", err)
}
defer file.Close() // Make sure to close the file when you're done
_, err = file.WriteString(scheletro)
if err != nil {
log.Fatalf("failed writing to file: %s", err)
}
//fmt.Printf("\nLength: %d bytes", len)
fmt.Printf("\nFile Name: %s\n", file.Name())
return
}
//verificaCell verifica che il cell sia una stringa di 10 cifre
func verificaCell(value string) (cell string, err error) {
//se value ha meno di 10 cifre non è buono
if len(value) < 10 {
err := fmt.Errorf("Cellulare con poche cifre: %v", len(value))
log.Println(err.Error())
return "", err
}
//cell10cifre prende gli ultimi 10 caratteri del value
cell10cifre := string(value[len(value)-10:])
//test verifica che il valore sia composto da esattamente 10 cifre
test := regexp.MustCompile(`^[0-9]{10}$`)
switch {
case test.MatchString(cell10cifre) == true:
cell = cell10cifre
default:
cell = ""
err = fmt.Errorf("Il cellulare non è corretto")
log.Println(err.Error())
return "", err
}
return
}
| a := time.Now()
giorno := ora.Weekday()
//Partiamo che non siamo in FOB
ok = false
switch giorno {
//Se è sabato siamo in fob
case time.Saturday:
//fmt.Println("E' sabato")
ok = true
//Se è domenica siamo in fob
case time.Sunday:
//fmt.Println("E' Domenica")
ok = true
//Se invece è un giorno feriale dobbiamo vedere l'orario
default:
//se è dopo le 18 siamo in fob
//Si avviso il reperibile mezz'ora prima se è un problema si può cambiare
//Recupero l'ora del FOB dal file di configurazione
if ora.Hour() >= foborainizio {
//fmt.Println("Giorno feriale", viper.GetInt("foborainizio"))
ok = true
return ok
}
//se è prima delle 7 allora siamo in fob
if ora.Hour() < 7 {
ok = true
}
}
//Ritorna ok che sarà true o false a seconda se siamo in FOB o no
return ok
}
//CreateCall | identifier_body |
server.go | // Copyright (C) 2018 by Alberto Bregliano <alberto.bregliano@pm.me>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//Package server provides the ability to forward Nagios Notifications
//to an Asterisk phone sytem.
package server
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"time"
"github.com/spf13/viper"
"github.com/axamon/sauron2/reperibili"
"github.com/gorilla/mux"
)
//Notifica sono le info che si ricevono dai nagios che vengono
//elaborate per creare le chiamate automatiche
type Notifica struct {
//Time time.Time `json:"timestamp,omitempty"`
Hostname string `json:"hostname,omitempty"`
Service string `json:"servizio,omitempty"`
Piattaforma string `json:"piattaforma,omitempty"`
Reperibile string `json:"reperibile,omitempty"`
Cellulare string `json:"cellulare,omitempty"`
Messaggio string `json:"messaggio,omitempty"`
}
//Dettagli non usato al momento ma servirà a gestire le
//risposte del centralino virtuale asterisk
type Dettagli struct {
Info string `json:"info,omitempty"`
State string `json:"state,omitempty"`
}
var people []Notifica
func respondWithError(w http.ResponseWriter, code int, message string) {
respondWithJSON(w, code, map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
//GetReper recupera il reperibile attuale per la piattaforma
//passata come argomento
func GetReper(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
vars := mux.Vars(r)
piattaforma := vars["piatta"]
reperibile, err := reperibili.GetReperibile(piattaforma)
if err != nil {
respondWithError(w, http.StatusNoContent, err.Error())
return
}
result := fmt.Sprintf("Il reperibile per %s è: %s. Cell: %s", piattaforma, reperibile.Cognome, reperibile.Cellulare)
respondWithJSON(w, http.StatusFound, result)
return
}
//SetReper inserisce reperibilità in un archivio condiviso
func SetReper(w http.ResponseWriter, r *http.Request) {
/* var p reperibili.Contatto
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close() */
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
r.ParseForm()
nome := r.PostFormValue("nome")
cognome := r.PostFormValue("cognome")
cellulare := r.PostFormValue("cellulare")
piattaforma := r.PostFormValue("piattaforma")
oggi := time.Now().Format("20060102")
err := reperibili.AddRuota(nome, cognome, cellulare, piattaforma, oggi, "gruppo6")
if err != nil {
fmt.Println("errorone", err.Error(), cellulare)
return
}
fmt.Println("inserito reperibile: ", nome, cognome, cellulare)
/* err := reperibili.AddRuota(p.Nome, p.Cognome, p.Cellulare, "CDN", "20180101", "gruppo6")
if err != nil {
fmt.Println("errorone")
}
*/
/* fmt.Println(p)
respondWithJSON(w, http.StatusCreated, p) */
return
}
//Callfile sends the file gerated for asterisk
func Callfile(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "/tmp/exampleTest.call")
return
}
//LogNotifica archivia la notifica in un Database
func LogNotifica(n Notifica) (err error) {
//recupera il timestamp di adesso
timestamp := time.Now()
//apre il database
database, _ := sql.Open("sqlite3", "./sarumann.db")
//chiudi Database una volta fatto
defer database.Close()
//prepara la creazione della tabella notifiche se non esite
statement, _ := database.Prepare("CREATE TABLE IF NOT EXISTS notifiche (id INTEGER PRIMARY KEY, server TEXT, servizio TEXT, piattaforma TEXT, reperibile TEXT, cellulare TEXT, messaggio TEXT, timestamp INT)")
//esegue la creazione della tabella notifiche se non esiste già nel database
statement.Exec()
//prepara l'inserimenti della notifica
statement, err = database.Prepare("INSERT INTO notifiche(server, servizio, piattaforma, reperibile, cellulare, messaggio, timestamp) VALUES(?,?,?,?,?,?,?)")
if err != nil {
fmt.Println(err.Error())
}
//esegue l'inserimento della notifica passata come argomento della funzione
_, err = statement.Exec(n.Hostname, n.Service, n.Piattaforma, n.Reperibile, n.Cellulare, n.Messaggio, timestamp.Unix())
if err != nil {
fmt.Println(err.Error())
}
return
}
//AntiStorm evita che il reperibile riceva troppe chiamate
func AntiStorm(piattaforma string) (err error) {
database, _ := sql.Open("sqlite3", "./sarumann.db")
defer database.Close()
row := database.QueryRow("SELECT timestamp FROM notifiche where piattaforma = ? order by timestamp desc limit 1", piattaforma)
var last string
row.Scan(&last)
fmt.Println(last) //debug
lastint, err := strconv.Atoi(last)
if err != nil {
fmt.Println("errore")
}
oraepoch := time.Now().Unix()
fmt.Println(oraepoch) //debug
//Se non sono passati tot secondi dall'ultima notifica allora esce
tot := 1800 ///1800 secondi uguale mezz'ora
if lastint+(tot) > int(oraepoch) {
err = fmt.Errorf("Troppe chiamate al reperibile per %s, è permessa una sola chiamata ogni %d secondi", piattaforma, tot)
return err
}
return nil
}
//CreateNotificaNoVoiceCall riceve gli alerts dei nagios
func CreateNotificaNoVoiceCall(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
//Invia cmq la chiamata se è per la piattaforma CDN
if piattaforma == "CDN" {
Cellpertest := viper.GetString("Cellpertest")
if len(Cellpertest) != 0 {
reperibile = Cellpertest
log.Println("Impostato reperibile di test", reperibile)
}
orariofobstr := viper.GetString("OrarioFob")
orariofob, err := strconv.Atoi(orariofobstr)
if err != nil {
log.Println(err.Error())
}
log.Println("L'orario impostato per inizio FOB è", orariofob)
//Se siamo in fuori orario base
if fob := isfob(time.Now(), orariofob); fob == true {
fmt.Println("Siamo in FOB. Notifiche vocali attive!")
//Verifica che sia passato abbastanza tempo dall'ultima chiamata prima di chiamare nuovamente
errstorm := AntiStorm(p.Piattaforma)
if errstorm != nil {
log.Println(errstorm)
return
}
//Logga sul db la notifica in entrata
err := LogNotifica(p)
if err != nil {
log.Println(err.Error())
}
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
}
}
return
}
//CreateNotifica riceve gli alerts dei nagios e li utilizza per
//allertare telefonicamente il reperibile in turno
func CreateNotifica(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
return
}
func isfob(ora time.Time, foborainizio int) (ok bool) {
//ora := time.Now()
giorno := ora.Weekday()
//Partiamo che non siamo in FOB
ok = false
switch giorno {
//Se è sabato siamo in fob
case time.Saturday:
//fmt.Println("E' sabato")
ok = true
//Se è domenica siamo in fob
case time.Sunday:
//fmt.Println("E' Domenica")
ok = true
//Se invece è un giorno feriale dobbiamo vedere l'orario
default:
//se è dopo le 18 siamo in fob
//Si avviso il reperibile mezz'ora prima se è un problema si può cambiare
//Recupero l'ora del FOB dal file di configurazione
if ora.Hour() >= foborainizio {
//fmt.Println("Giorno feriale", viper.GetInt("foborainizio"))
ok = true
return ok
}
//se è prima delle 7 allora siamo in fob
if ora.Hour() < 7 {
ok = true
}
}
//Ritorna ok che sarà true o false a seconda se siamo in FOB o no
return ok
}
//CreateCall crea il file .call che serve ad Asterisk per contattare il reperibile
func CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio string) (err error) {
//Trasforma il campo passato in una stringa di 10 numeri
cell, err := verificaCell(reperibile)
if err != nil {
log.Printf("Cellulare non gestibile: %s\n", err.Error())
return
}
scheletro :=
`Channel: SIP/999` + cell + `@10.31.18.26
MaxRetries: 5
RetryTime: 300
WaitTime: 60
Context: nagios-notify
Extension: s
Archive: Yes
Set: CONTACT_NAME="Gringo"
Set: PLAT_NAME="` + piattaforma + `"
Set: NOT_TYPE="PROBLEM"
Set: HOST_ALIAS="` + hostname + `"
Set: SERVICE_NAME="` + service + `"
Set: STATUS="Critico"
Set: NOT_HEAD_MSG="è stato riscontrato un problema"
Set: SRV_MSG="sul server ` + hostname + ` il servizio ` + service + ` è in critical ` + messaggio + `"`
//dove salavare i file in maniera che asterisk li possa scaricare
//nel nostro caso equivale a dove nginx tiene i contenuti statici del webserver
//le informazioni sono nel file nascosto .sarumann.yaml che l'utente deve avere
//nella propria $HOME
//path := viper.GetString("CallPath")
//file, err := os.Create(path + "exampleTest.call") // Truncates if file already exists, be careful!
file, err := os.Create("/tmp/exampleTest.call")
if err != nil {
log.Fatalf("failed creating file: %s", err)
}
defer file.Close() // Make sure to close the file when you're done
_, err = file.WriteString(scheletro)
if err != nil {
log.Fatalf("failed writing to file: %s", err)
}
//fmt.Printf("\nLength: %d bytes", len)
fmt.Printf("\nFile Name: %s\n", file.Name())
return
}
//verificaCell verifica che il cell sia una stringa di 10 cifre
func verificaCell(value string) (cell string, err error) {
//se value ha meno di 10 cifre non è buono
if len(value) < 10 {
err := fmt.Err | ende gli ultimi 10 caratteri del value
cell10cifre := string(value[len(value)-10:])
//test verifica che il valore sia composto da esattamente 10 cifre
test := regexp.MustCompile(`^[0-9]{10}$`)
switch {
case test.MatchString(cell10cifre) == true:
cell = cell10cifre
default:
cell = ""
err = fmt.Errorf("Il cellulare non è corretto")
log.Println(err.Error())
return "", err
}
return
}
| orf("Cellulare con poche cifre: %v", len(value))
log.Println(err.Error())
return "", err
}
//cell10cifre pr | conditional_block |
mod.rs | //! # Ready-to-use NLP pipelines and models
//!
//! Based on Huggingface's pipelines, ready to use end-to-end NLP pipelines are available as part of this crate. The following capabilities are currently available:
//!
//! **Disclaimer**
//! The contributors of this repository are not responsible for any generation from the 3rd party utilization of the pretrained systems proposed herein.
//!
//! #### 1. Question Answering
//! Extractive question answering from a given question and context. DistilBERT model finetuned on SQuAD (Stanford Question Answering Dataset)
//!
//! ```ignore
//! use rust_bert::pipelines::question_answering::{QaInput, QuestionAnsweringModel};
//! # fn main() -> anyhow::Result<()> {
//! let qa_model = QuestionAnsweringModel::new(Default::default())?;
//!
//! let question = String::from("Where does Amy live ?");
//! let context = String::from("Amy lives in Amsterdam");
//!
//! let answers = qa_model.predict(&[QaInput { question, context }], 1, 32);
//! # Ok(())
//! # }
//! ```
//!
//! Output: \
//! ```ignore
//! # use rust_bert::pipelines::question_answering::Answer;
//! # let output =
//! [Answer {
//! score: 0.9976,
//! start: 13,
//! end: 21,
//! answer: String::from("Amsterdam"),
//! }]
//! # ;
//! ```
//!
//! #### 2. Translation
//! Translation using the MarianMT architecture and pre-trained models from the Opus-MT team from Language Technology at the University of Helsinki.
//! Currently supported languages are :
//! - English <-> French
//! - English <-> Spanish
//! - English <-> Portuguese
//! - English <-> Italian
//! - English <-> Catalan
//! - English <-> German
//! - English <-> Russian
//! - English <-> Chinese (Simplified)
//! - English <-> Chinese (Traditional)
//! - English <-> Dutch
//! - English <-> Swedish
//! - English <-> Arabic
//! - English <-> Hebrew
//! - English <-> Hindi
//! - French <-> German
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! # use rust_bert::pipelines::generation_utils::LanguageGenerator;
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::translation::{
//! Language, TranslationConfig, TranslationModel, TranslationModelBuilder,
//! };
//! use tch::Device;
//! let model = TranslationModelBuilder::new()
//! .with_device(Device::cuda_if_available())
//! .with_model_type(ModelType::Marian)
//! .with_source_languages(vec![Language::English])
//! .with_target_languages(vec![Language::French])
//! .create_model()?;
//!
//! let input = ["This is a sentence to be translated"];
//!
//! let output = model.translate(&input, None, Language::French);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```ignore
//! # let output =
//! " Il s'agit d'une phrase à traduire"
//! # ;
//! ```
//!
//! Output: \
//! ```ignore
//! # let output =
//! "Il s'agit d'une phrase à traduire"
//! # ;
//! ```
//!
//! #### 3. Summarization
//! Abstractive summarization of texts based on the BART encoder-decoder architecture
//! Include techniques such as beam search, top-k and nucleus sampling, temperature setting and repetition penalty.
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! # use rust_bert::pipelines::generation_utils::LanguageGenerator;
//! use rust_bert::pipelines::summarization::SummarizationModel;
//!
//! let mut model = SummarizationModel::new(Default::default())?;
//!
//! let input = ["In findings published Tuesday in Cornell University's arXiv by a team of scientists
//! from the University of Montreal and a separate report published Wednesday in Nature Astronomy by a team
//! from University College London (UCL), the presence of water vapour was confirmed in the atmosphere of K2-18b,
//! a planet circling a star in the constellation Leo. This is the first such discovery in a planet in its star's
//! habitable zone — not too hot and not too cold for liquid water to exist. The Montreal team, led by Björn Benneke,
//! used data from the NASA's Hubble telescope to assess changes in the light coming from K2-18b's star as the planet
//! passed between it and Earth. They found that certain wavelengths of light, which are usually absorbed by water,
//! weakened when the planet was in the way, indicating not only does K2-18b have an atmosphere, but the atmosphere
//! contains water in vapour form. The team from UCL then analyzed the Montreal team's data using their own software
//! and confirmed their conclusion. This was not the first time scientists have found signs of water on an exoplanet,
//! but previous discoveries were made on planets with high temperatures or other pronounced differences from Earth.
//! \"This is the first potentially habitable planet where the temperature is right and where we now know there is water,\"
//! said UCL astronomer Angelos Tsiaras. \"It's the best candidate for habitability right now.\" \"It's a good sign\",
//! said Ryan Cloutier of the Harvard–Smithsonian Center for Astrophysics, who was not one of either study's authors.
//! \"Overall,\" he continued, \"the presence of water in its atmosphere certainly improves the prospect of K2-18b being
//! a potentially habitable planet, but further observations will be required to say for sure. \"
//! K2-18b was first identified in 2015 by the Kepler space telescope. It is about 110 light-years from Earth and larger
//! but less dense. Its star, a red dwarf, is cooler than the Sun, but the planet's orbit is much closer, such that a year
//! on K2-18b lasts 33 Earth days. According to The Guardian, astronomers were optimistic that NASA's James Webb space
//! telescope — scheduled for launch in 2021 — and the European Space Agency's 2028 ARIEL program, could reveal more
//! about exoplanets like K2-18b."];
//!
//! let output = model.summarize(&input);
//! # Ok(())
//! # }
//! ```
//! (example from: [WikiNews](https://en.wikinews.org/wiki/Astronomers_find_water_vapour_in_atmosphere_of_exoplanet_K2-18b))
//!
//! Example output: \
//! ```ignore
//! # let output =
//! "Scientists have found water vapour on K2-18b, a planet 110 light-years from Earth.
//! This is the first such discovery in a planet in its star's habitable zone.
//! The planet is not too hot and not too cold for liquid water to exist."
//! # ;
//! ```
//!
//!
//! #### 4. Dialogue Model
//! Conversation model based on Microsoft's [DialoGPT](https://github.com/microsoft/DialoGPT).
//! This pipeline allows the generation of single or multi-turn conversations between a human and a model.
//! The DialoGPT's page states that
//! > The human evaluation results indicate that the response generated from DialoGPT is comparable to human response quality
//! > under a single-turn conversation Turing test. ([DialoGPT repository](https://github.com/microsoft/DialoGPT))
//!
//! The model uses a `ConversationManager` to keep track of active conversations and generate responses to them.
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::conversation::{ConversationManager, ConversationModel};
//! let conversation_model = ConversationModel::new(Default::default())?;
//! let mut conversation_manager = ConversationManager::new();
//!
//! let conversation_id =
//! conversation_manager.create("Going to the movies tonight - any suggestions?");
//! let output = conversation_model.generate_responses(&mut conversation_manager);
//! # Ok(())
//! # }
//! ```
//! Example output: \
//! ```ignore
//! # let output =
//! "The Big Lebowski."
//! # ;
//! ```
//!
//! #### 5. Natural Language Generation
//! Generate language based on a prompt. GPT2 and GPT available as base models.
//! Include techniques such as beam search, top-k and nucleus sampling, temperature setting and repetition penalty.
//! Supports batch generation of sentences from several prompts. Sequences will be left-padded with the model's padding token if present, the unknown token otherwise.
//! This may impact the results and it is recommended to submit prompts of similar length for best results. Additional information on the input parameters for generation is provided in this module's documentation.
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::text_generation::TextGenerationModel;
//! use rust_bert::pipelines::common::ModelType;
//! let mut model = TextGenerationModel::new(Default::default())?;
//! let input_context_1 = "The dog";
//! let input_context_2 = "The cat was";
//!
//! let prefix = None; // Optional prefix to append prompts with, will be excluded from the generated output
//!
//! let output = model.generate(&[input_context_1, input_context_2], prefix);
//! # Ok(())
//! # }
//! ```
//! Example output: \
//! ```ignore
//! # let output =
//! [
//! "The dog's owners, however, did not want to be named. According to the lawsuit, the animal's owner, a 29-year",
//! "The dog has always been part of the family. \"He was always going to be my dog and he was always looking out for me",
//! "The dog has been able to stay in the home for more than three months now. \"It's a very good dog. She's",
//! "The cat was discovered earlier this month in the home of a relative of the deceased. The cat\'s owner, who wished to remain anonymous,",
//! "The cat was pulled from the street by two-year-old Jazmine.\"I didn't know what to do,\" she said",
//! "The cat was attacked by two stray dogs and was taken to a hospital. Two other cats were also injured in the attack and are being treated."
//! ]
//! # ;
//! ```
//!
//! #### 6. Zero-shot classification
//! Performs zero-shot classification on input sentences with provided labels using a model fine-tuned for Natural Language Inference.
//! ```ignore
//! # use rust_bert::pipelines::zero_shot_classification::ZeroShotClassificationModel;
//! # fn main() -> anyhow::Result<()> {
//! let sequence_classification_model = ZeroShotClassificationModel::new(Default::default())?;
//! let input_sentence = "Who are you voting for in 2020?";
//! let input_sequence_2 = "The prime minister has announced a stimulus package which was widely criticized by the opposition.";
//! let candidate_labels = &["politics", "public health", "economics", "sports"];
//! let output = sequence_classification_model.predict_multilabel(
//! &[input_sentence, input_sequence_2],
//! candidate_labels,
//! None,
//! 128,
//! ); | //! ```ignore
//! # use rust_bert::pipelines::sequence_classification::Label;
//! let output = [
//! [
//! Label {
//! text: "politics".to_string(),
//! score: 0.972,
//! id: 0,
//! sentence: 0,
//! },
//! Label {
//! text: "public health".to_string(),
//! score: 0.032,
//! id: 1,
//! sentence: 0,
//! },
//! Label {
//! text: "economics".to_string(),
//! score: 0.006,
//! id: 2,
//! sentence: 0,
//! },
//! Label {
//! text: "sports".to_string(),
//! score: 0.004,
//! id: 3,
//! sentence: 0,
//! },
//! ],
//! [
//! Label {
//! text: "politics".to_string(),
//! score: 0.975,
//! id: 0,
//! sentence: 1,
//! },
//! Label {
//! text: "economics".to_string(),
//! score: 0.852,
//! id: 2,
//! sentence: 1,
//! },
//! Label {
//! text: "public health".to_string(),
//! score: 0.0818,
//! id: 1,
//! sentence: 1,
//! },
//! Label {
//! text: "sports".to_string(),
//! score: 0.001,
//! id: 3,
//! sentence: 1,
//! },
//! ],
//! ]
//! .to_vec();
//! ```
//!
//! #### 7. Sentiment analysis
//! Predicts the binary sentiment for a sentence. DistilBERT model finetuned on SST-2.
//! ```ignore
//! use rust_bert::pipelines::sentiment::SentimentModel;
//! # fn main() -> anyhow::Result<()> {
//! let sentiment_model = SentimentModel::new(Default::default())?;
//! let input = [
//! "Probably my all-time favorite movie, a story of selflessness, sacrifice and dedication to a noble cause, but it's not preachy or boring.",
//! "This film tried to be too many things all at once: stinging political satire, Hollywood blockbuster, sappy romantic comedy, family values promo...",
//! "If you like original gut wrenching laughter you will like this movie. If you are young or old then you will love this movie, hell even my mom liked it.",
//! ];
//! let output = sentiment_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! (Example courtesy of [IMDb](http://www.imdb.com))
//!
//! Output: \
//! ```ignore
//! # use rust_bert::pipelines::sentiment::Sentiment;
//! # use rust_bert::pipelines::sentiment::SentimentPolarity::{Positive, Negative};
//! # let output =
//! [
//! Sentiment {
//! polarity: Positive,
//! score: 0.998,
//! },
//! Sentiment {
//! polarity: Negative,
//! score: 0.992,
//! },
//! Sentiment {
//! polarity: Positive,
//! score: 0.999,
//! },
//! ]
//! # ;
//! ```
//!
//! #### 8. Named Entity Recognition
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text. The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! Additional pre-trained models are available for English, German, Spanish and Dutch.
//! ```ignore
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```ignore
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! #### 9. Keywords/Keyphrases extraction
//!
//! Extract keywords and keyphrases extractions from input documents. Based on a sentence embedding model
//! to compute the semantic similarity between the full text and word n-grams composing it.
//!
//!```no_run
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::keywords_extraction::KeywordExtractionModel;
//! let keyword_extraction_model = KeywordExtractionModel::new(Default::default())?;
//!
//! let input = "Rust is a multi-paradigm, general-purpose programming language. \
//! Rust emphasizes performance, type safety, and concurrency. Rust enforces memory safety—that is, \
//! that all references point to valid memory—without requiring the use of a garbage collector or \
//! reference counting present in other memory-safe languages. To simultaneously enforce \
//! memory safety and prevent concurrent data races, Rust's borrow checker tracks the object lifetime \
//! and variable scope of all references in a program during compilation. Rust is popular for \
//! systems programming but also offers high-level features including functional programming constructs.";
//! // Credits: Wikimedia https://en.wikipedia.org/wiki/Rust_(programming_language)
//! let output = keyword_extraction_model.predict(&[input])?;
//! Ok(())
//! }
//! ```
//! Output:
//! ```no_run
//! # let output =
//! [
//! ("rust", 0.50910604),
//! ("concurrency", 0.33825397),
//! ("languages", 0.28515345),
//! ("compilation", 0.2801403),
//! ("safety", 0.2657791),
//! ]
//! # ;
//! ```
//!
//! #### 10. Part of Speech tagging
//! Extracts Part of Speech tags (Noun, Verb, Adjective...) from text.
//! ```ignore
//! use rust_bert::pipelines::pos_tagging::POSModel;
//! # fn main() -> anyhow::Result<()> {
//! let pos_model = POSModel::new(Default::default())?;
//! let input = ["My name is Bob"];
//! let output = pos_model.encode_as_tensor(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```ignore
//! # use rust_bert::pipelines::pos_tagging::POSTag;
//! # let output =
//! [
//! POSTag {
//! word: String::from("My"),
//! score: 0.1560,
//! label: String::from("PRP"),
//! },
//! POSTag {
//! word: String::from("name"),
//! score: 0.6565,
//! label: String::from("NN"),
//! },
//! POSTag {
//! word: String::from("is"),
//! score: 0.3697,
//! label: String::from("VBZ"),
//! },
//! POSTag {
//! word: String::from("Bob"),
//! score: 0.7460,
//! label: String::from("NNP"),
//! },
//! ]
//! # ;
//! ```
//!
//! #### 11. Sentence embeddings
//!
//! Generate sentence embeddings (vector representation). These can be used for applications including dense information retrieval.
//!```ignore
//! # use rust_bert::pipelines::sentence_embeddings::{SentenceEmbeddingsBuilder, SentenceEmbeddingsModelType};
//! # fn main() -> anyhow::Result<()> {
//! let model = SentenceEmbeddingsBuilder::remote(
//! SentenceEmbeddingsModelType::AllMiniLmL12V2
//! ).create_model()?;
//!
//! let sentences = [
//! "this is an example sentence",
//! "each sentence is converted"
//! ];
//!
//! let output = model.encode(&sentences);
//! # Ok(())
//! # }
//! ```
//! Output:
//! ```ignore
//! # let output =
//! [
//! [-0.000202666, 0.08148022, 0.03136178, 0.002920636],
//! [0.064757116, 0.048519745, -0.01786038, -0.0479775],
//! ]
//! # ;
//! ```
//!
//! # [Tokenizers](https://github.com/huggingface/tokenizers) support
//!
//! The pipelines support both the default [rust-tokenizers](https://github.com/guillaume-be/rust-tokenizers) and
//! Hugging Face's [Tokenizers](https://github.com/huggingface/tokenizers) library. In order to use the latter,
//! the tokenizer needs to be created manually and passed as an argument to the pipeline's `new_with_tokenizer` method.
//!
//! Note that the `special_token_maps` is required to create a `TokenizerOption` from a HFTokenizer. This file is sometimes not provided
//! (the Python Transformers library provides the special token map information as part of the actual tokenizer loaded wrapping the rust-based
//! tokenizer). If that is the case a temporary file with the special token map information can be created as illustrated below:
//! ```no_run
//! fn main() -> anyhow::Result<()> {
//! use std::fs::File;
//! use std::io::Write;
//! use tempfile::TempDir;
//! use rust_bert::pipelines::common::{ModelType, TokenizerOption};
//! use rust_bert::pipelines::text_generation::{TextGenerationConfig, TextGenerationModel};
//! use rust_bert::resources::{RemoteResource, ResourceProvider};
//!
//! let generate_config = TextGenerationConfig {
//! model_type: ModelType::GPT2,
//! ..Default::default()
//! };
//!
//! // Create tokenizer
//! let tmp_dir = TempDir::new()?;
//! let special_token_map_path = tmp_dir.path().join("special_token_map.json");
//! let mut tmp_file = File::create(&special_token_map_path)?;
//! writeln!(
//! tmp_file,
//! r#"{{"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}}"#
//! )?;
//! let tokenizer_path = RemoteResource::from_pretrained((
//! "gpt2/tokenizer",
//! "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
//! )).get_local_path()?;
//! let tokenizer =
//! TokenizerOption::from_hf_tokenizer_file(tokenizer_path, special_token_map_path)?;
//!
//! // Create model
//! let model = TextGenerationModel::new_with_tokenizer(generate_config, tokenizer)?;
//!
//! let input_context = "The dog";
//! let output = model.generate(&[input_context], None);
//! for sentence in output {
//! println!("{sentence:?}");
//! }
//! Ok(())
//! }
//! ```
pub mod common;
pub mod conversation;
pub mod generation_utils;
pub mod keywords_extraction;
pub mod masked_language;
pub mod ner;
pub mod pos_tagging;
pub mod question_answering;
pub mod sentence_embeddings;
pub mod sentiment;
pub mod sequence_classification;
pub mod summarization;
pub mod text_generation;
pub mod token_classification;
pub mod translation;
pub mod zero_shot_classification;
#[cfg(feature = "onnx")]
pub mod onnx;
#[cfg(feature = "hf-tokenizers")]
pub mod hf_tokenizers; | //! # Ok(())
//! # }
//! ```
//!
//! outputs: | random_line_split |
backfill.py | from __future__ import annotations
import asyncio
from collections import Counter
from functools import partial
import itertools
import typing
from typing import (
AsyncIterator,
Dict,
Iterable,
NamedTuple,
Optional,
Set,
Tuple,
)
from async_service import Service
from eth.abc import (
AtomicDatabaseAPI,
BlockHeaderAPI,
)
from eth.constants import EMPTY_SHA3
from eth.rlp.accounts import Account
from eth_typing import Hash32
import rlp
from trie import (
HexaryTrie,
exceptions as trie_exceptions,
fog,
)
from trie.constants import (
BLANK_NODE_HASH,
)
from trie.utils.nibbles import (
bytes_to_nibbles,
)
from trie.utils.nodes import (
key_starts_with,
)
from trie.typing import (
HexaryTrieNode,
Nibbles,
)
from p2p.exceptions import BaseP2PError, PeerConnectionLost
from trinity.protocol.eth.peer import ETHPeer, ETHPeerPool
from trinity.sync.beam.constants import (
EPOCH_BLOCK_LENGTH,
GAP_BETWEEN_TESTS,
NON_IDEAL_RESPONSE_PENALTY,
PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED,
)
from trinity._utils.async_iter import async_take
from trinity._utils.logging import get_logger
from trinity._utils.timer import Timer
from .queen import (
QueeningQueue,
QueenTrackerAPI,
)
REQUEST_SIZE = 16
class BeamStateBackfill(Service, QueenTrackerAPI):
"""
Use a very simple strategy to fill in state in the background.
Ask each peer in sequence for some nodes, ignoring the lowest RTT node.
Reduce memory pressure by using a depth-first strategy.
An intended side-effect is to build & maintain an accurate measurement of
the round-trip-time that peers take to respond to GetNodeData commands.
"""
_total_added_nodes = 0
_num_added = 0
_num_missed = 0
_num_accounts_completed = 0
_num_storage_completed = 0
_report_interval = 10
_num_requests_by_peer: typing.Counter[ETHPeer]
def __init__(self, db: AtomicDatabaseAPI, peer_pool: ETHPeerPool) -> None:
self.logger = get_logger('trinity.sync.beam.backfill.BeamStateBackfill')
self._db = db
self._peer_pool = peer_pool
self._is_missing: Set[Hash32] = set()
self._num_requests_by_peer = Counter()
self._queening_queue = QueeningQueue(peer_pool)
# Track the nodes that we are requesting in the account trie
self._account_tracker = TrieNodeRequestTracker()
self._storage_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}
self._bytecode_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}
# The most recent root hash to use to navigate the trie
self._next_trie_root_hash: Optional[Hash32] = None
self._begin_backfill = asyncio.Event()
async def get_queen_peer(self) -> ETHPeer:
return await self._queening_queue.get_queen_peer()
def penalize_queen(self, peer: ETHPeer, delay: float = NON_IDEAL_RESPONSE_PENALTY) -> None:
self._queening_queue.penalize_queen(peer, delay=delay)
async def run(self) -> None:
self.manager.run_daemon_task(self._periodically_report_progress)
queening_manager = self.manager.run_daemon_child_service(self._queening_queue)
await queening_manager.wait_started()
await self._run_backfill()
self.manager.cancel()
async def _run_backfill(self) -> None:
await self._begin_backfill.wait()
if self._next_trie_root_hash is None:
raise RuntimeError("Cannot start backfill when a recent trie root hash is unknown")
while self.manager.is_running:
peer = await self._queening_queue.pop_fastest_peasant()
# collect node hashes that might be missing
required_data = tuple([
request async for request in async_take(REQUEST_SIZE, self._missing_trie_hashes())
])
if len(required_data) == 0:
# Nothing available to request, for one of two reasons:
if self._check_complete():
self.logger.info("Downloaded all accounts, storage and bytecode state")
return
else:
# There are active requests to peers, and we don't have enough information to
# ask for any more trie nodes (for example, near the beginning, when the top
# of the trie isn't available).
self._queening_queue.readd_peasant(peer)
self.logger.debug(
"Backfill is waiting for more hashes to arrive, putting %s back in queue",
peer,
)
await asyncio.sleep(PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED)
continue
self.manager.run_task(self._make_request, peer, required_data)
def _check_complete(self) -> bool:
if self._account_tracker.is_complete:
storage_complete = all(
storage_tracker.is_complete
for storage_tracker in self._storage_trackers.values()
)
if storage_complete:
bytecode_complete = all(
bytecode_tracker.is_complete
for bytecode_tracker in self._bytecode_trackers.values()
)
# All backfill is complete only if the account and storage and bytecodes are present
return bytecode_complete
else:
# At least one account is missing a storage trie node
return False
else:
# At least one account trie node is missing
return False
async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:
"""
Walks through the full state trie, yielding one missing node hash/prefix
at a time.
The yielded node info is wrapped in a TrackedRequest. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
# For each account, when we have asked for all known storage and bytecode
# hashes, but some are still not present, we "pause" the account so we can look
# for neighboring nodes.
# This is a list of paused accounts, using the path to the leaf node,
# because that's how the account tracker is indexed.
exhausted_account_leaves: Tuple[Nibbles, ...] = ()
starting_root_hash = self._next_trie_root_hash
try:
while self.manager.is_running:
# Get the next account
# We have to rebuild the account iterator every time because...
# something about an exception during a manual __anext__()?
account_iterator = self._request_tracking_trie_items(
self._account_tracker,
starting_root_hash,
)
try:
next_account_info = await account_iterator.__anext__()
except trie_exceptions.MissingTraversalNode as exc:
# Found a missing trie node while looking for the next account
yield self._account_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
continue
except StopAsyncIteration:
# Finished iterating over all available accounts
break
# Decode account
path_to_leaf, address_hash_nibbles, encoded_account = next_account_info
account = rlp.decode(encoded_account, sedes=Account)
# Iterate over all missing hashes of subcomponents (storage & bytecode)
subcomponent_hashes_iterator = self._missing_subcomponent_hashes(
address_hash_nibbles,
account,
starting_root_hash,
)
async for node_request in subcomponent_hashes_iterator:
yield node_request
# Check if account is fully downloaded
account_components_complete = self._are_account_components_complete(
address_hash_nibbles,
account,
)
if account_components_complete:
# Mark fully downloaded accounts as complete, and do some cleanup
self._mark_account_complete(path_to_leaf, address_hash_nibbles)
else:
# Pause accounts that are not fully downloaded, and track the account
# to resume when the generator exits.
self._account_tracker.pause_review(path_to_leaf)
exhausted_account_leaves += (path_to_leaf, )
except GeneratorExit:
# As the generator is exiting, we want to resume any paused accounts. This
# allows us to find missing storage/bytecode on the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
raise
else:
# If we pause a few accounts and then run out of nodes to ask for, then we
# still need to resume the paused accounts to prepare for the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
# Possible scenarios:
# 1. We have completed backfill
# 2. We have iterated the available nodes, and all known hashes are being requested.
# For example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes, and exit cleanly.
#
# In response to these situations, we might like to:
# 1. Log and celebrate that the full state has been downloaded
# 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it, using a _check_complete() check.
return
async def _request_tracking_trie_items(
self,
request_tracker: TrieNodeRequestTracker,
root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:
"""
Walk through the supplied trie, yielding the request tracker and node
request for any missing trie nodes.
:yield: path to leaf node, a key (as nibbles), and the value found in the trie
:raise: MissingTraversalNode if a node is missing while walking the trie
"""
if self._next_trie_root_hash is None:
# We haven't started beam syncing, so don't know which root to start at
return
trie = HexaryTrie(self._db, root_hash)
starting_index = bytes_to_nibbles(root_hash)
while self.manager.is_running:
try:
path_to_node = request_tracker.next_path_to_explore(starting_index)
except trie_exceptions.PerfectVisibility:
# This doesn't necessarily mean we are finished.
# Any active prefixes might still be hiding some significant portion of the trie
# But it's all we're able to explore for now, until more node data arrives
return
try:
cached_node, uncached_key = request_tracker.get_cached_parent(path_to_node)
except KeyError:
cached_node = None
node_getter = partial(trie.traverse, path_to_node)
else:
node_getter = partial(trie.traverse_from, cached_node, uncached_key)
try:
node = node_getter()
except trie_exceptions.MissingTraversalNode as exc:
# Found missing account trie node
if path_to_node == exc.nibbles_traversed:
raise
elif cached_node is None:
# The path and nibbles traversed should always match in a non-cached traversal
raise RuntimeError(
f"Unexpected: on a non-cached traversal to {path_to_node}, the"
f" exception only claimed to traverse {exc.nibbles_traversed} -- {exc}"
) from exc
else:
# We need to re-raise a version of the exception that includes the whole path
# from the root node (when using cached nodes, we only have the path from
# the parent node to the child node)
# We could always raise this re-wrapped version, but skipping it (probably?)
# improves performance.
missing_hash = exc.missing_node_hash
raise trie_exceptions.MissingTraversalNode(missing_hash, path_to_node) from exc
except trie_exceptions.TraversedPartialPath as exc:
node = exc.simulated_node
if node.value:
full_key_nibbles = path_to_node + node.suffix
if len(node.sub_segments):
# It shouldn't be a problem to skip handling this case, because all keys are
# hashed 32 bytes.
raise NotImplementedError(
"The state backfiller doesn't handle keys of different lengths, where"
f" one key is a prefix of another. But found {node} in trie with"
f" {root_hash!r}"
)
yield path_to_node, full_key_nibbles, node.value
# Note that we do not mark value nodes as completed. It is up to the caller
# to do that when it is ready. For example, the storage iterator will
# immediately treat the key as completed. The account iterator will
# not treat the key as completed until all of its storage and bytecode
# are also marked as complete.
else:
# If this is just an intermediate node, then we can mark it as confirmed.
request_tracker.confirm_prefix(path_to_node, node)
async def _missing_subcomponent_hashes(
self,
address_hash_nibbles: Nibbles,
account: Account,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
storage_node_iterator = self._missing_storage_hashes(
address_hash_nibbles,
account.storage_root,
starting_main_root,
)
async for node_request in storage_node_iterator:
yield node_request
bytecode_node_iterator = self._missing_bytecode_hashes(
address_hash_nibbles,
account.code_hash,
starting_main_root,
)
async for node_request in bytecode_node_iterator:
yield node_request
# Note that completing this iterator does NOT mean we're done with the
# account. It just means that all known missing hashes are actively
# being requested.
async def _missing_storage_hashes(
self,
address_hash_nibbles: Nibbles,
storage_root: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Walks through the storage trie at the given root, yielding one missing
storage node hash/prefix at a time.
The yielded node info is wrapped in a ``TrackedRequest``. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
if storage_root == BLANK_NODE_HASH:
# Nothing to do if the storage has an empty root
return
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
while self.manager.is_running:
storage_iterator = self._request_tracking_trie_items(
storage_tracker,
storage_root,
)
try:
async for path_to_leaf, hashed_key, _storage_value in storage_iterator:
# We don't actually care to look at the storage keys/values during backfill
storage_tracker.confirm_leaf(path_to_leaf)
except trie_exceptions.MissingTraversalNode as exc:
yield storage_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
else:
# Possible scenarios:
# 1. We have completed backfilling this account's storage
# 2. We have iterated the available nodes, and only their children are missing,
# for example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes. | # 2. Look for more missing nodes in neighboring accounts and their storage, etc.
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it.
return
async def _missing_bytecode_hashes(
self,
address_hash_nibbles: Nibbles,
code_hash: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Checks if this bytecode is missing. If so, yield it and then exit.
If not, then exit immediately.
This may seem like overkill, and it is right now. But...
Code merkelization is coming (theoretically), and the other account
and storage trie iterators work similarly to this, so in some ways
it's easier to do this "over-generalized" solution now. It makes
request tracking a bit easier too, to have the same TrackedRequest
result mechanism.
"""
if code_hash == EMPTY_SHA3:
# Nothing to do if the bytecode is for the empty hash
return
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
if bytecode_tracker.is_complete:
# All bytecode has been collected
return
# If there is an active request (for now, there can only be one), then skip
# any database checks until the active request is resolved.
if not bytecode_tracker.has_active_requests:
if code_hash not in self._db:
# The bytecode isn't present, so we ask for it.
# A bit hacky here, since there is no trie, we just treat it as
# if it were a leaf node at the root.
yield bytecode_tracker.generate_request(code_hash, prefix=())
else:
# The bytecode is already present, but the tracker isn't marked
# as completed yet, so finish it off.
bytecode_tracker.confirm_leaf(path_to_leaf=())
def _get_storage_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._storage_trackers:
return self._storage_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._storage_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _get_bytecode_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._bytecode_trackers:
return self._bytecode_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._bytecode_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _mark_account_complete(self, path_to_leaf: Nibbles, address_hash_nibbles: Nibbles) -> None:
self._account_tracker.confirm_leaf(path_to_leaf)
self._num_accounts_completed += 1
# Clear the storage tracker, to reduce memory usage
# and the time to check self._check_complete()
if address_hash_nibbles in self._storage_trackers:
self._num_storage_completed += 1
del self._storage_trackers[address_hash_nibbles]
# Clear the bytecode tracker, for the same reason
if address_hash_nibbles in self._bytecode_trackers:
del self._bytecode_trackers[address_hash_nibbles]
def _are_account_components_complete(
self,
address_hash_nibbles: Nibbles,
account: Account) -> bool:
if account.storage_root != BLANK_NODE_HASH:
# Avoid generating a storage tracker if there is no storage for this account
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
if account.storage_root == BLANK_NODE_HASH or storage_tracker.is_complete:
if account.code_hash == EMPTY_SHA3:
# All storage is downloaded, and no bytecode to download
return True
else:
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
# All storage is downloaded, return True only if bytecode is downloaded
return bytecode_tracker.is_complete
else:
# Missing some storage
return False
async def _make_request(
self,
peer: ETHPeer,
request_data: Iterable[TrackedRequest]) -> None:
self._num_requests_by_peer[peer] += 1
request_hashes = tuple(set(request.node_hash for request in request_data))
try:
nodes = await peer.eth_api.get_node_data(request_hashes)
except asyncio.TimeoutError:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
except PeerConnectionLost:
# Something unhappy, but we don't really care, peer will be gone by next loop
pass
except (BaseP2PError, Exception) as exc:
self.logger.info("Unexpected err while getting background nodes from %s: %s", peer, exc)
self.logger.debug("Problem downloading background nodes from peer...", exc_info=True)
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
else:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS)
self._insert_results(request_hashes, nodes)
finally:
for request in request_data:
request.tracker.mark_for_review(request.prefix)
def _insert_results(
self,
requested_hashes: Tuple[Hash32, ...],
nodes: Tuple[Tuple[Hash32, bytes], ...]) -> None:
returned_nodes = dict(nodes)
with self._db.atomic_batch() as write_batch:
for requested_hash in requested_hashes:
if requested_hash in returned_nodes:
self._num_added += 1
self._total_added_nodes += 1
encoded_node = returned_nodes[requested_hash]
write_batch[requested_hash] = encoded_node
else:
self._num_missed += 1
def set_root_hash(self, header: BlockHeaderAPI, root_hash: Hash32) -> None:
if self._next_trie_root_hash is None:
self._next_trie_root_hash = root_hash
self._begin_backfill.set()
elif header.block_number % EPOCH_BLOCK_LENGTH == 1:
# This is the root hash of the *parent* of the header, so use modulus equals 1
self._next_trie_root_hash = root_hash
async def _periodically_report_progress(self) -> None:
for step in itertools.count():
if not self.manager.is_running:
break
self._num_added = 0
self._num_missed = 0
timer = Timer()
await asyncio.sleep(self._report_interval)
if not self._begin_backfill.is_set():
self.logger.debug("Beam-Backfill: waiting for new state root")
continue
msg = "total=%d" % self._total_added_nodes
msg += " new=%d" % self._num_added
msg += " miss=%d" % self._num_missed
self.logger.debug("Beam-Backfill: %s", msg)
# log peer counts
show_top_n_peers = 3
self.logger.debug(
"Beam-Backfill-Peer-Usage-Top-%d: %s",
show_top_n_peers,
self._num_requests_by_peer.most_common(show_top_n_peers),
)
# For now, report every 30s (1/3 as often as the debug report above)
if step % 3 == 0:
num_storage_trackers = len(self._storage_trackers)
if num_storage_trackers:
active_storage_completion = sum(
self._complete_trie_fraction(store_tracker)
for store_tracker in self._storage_trackers.values()
) / num_storage_trackers
else:
active_storage_completion = 0
# Log backfill state stats as a progress indicator to the user:
# - nodes: the total number of nodes collected during this backfill session
# - accts: number of accounts completed, including all storage and bytecode,
# if present. This includes accounts downloaded and ones already present.
# - prog: the progress to completion, measured as a percentage of accounts
# completed, using trie structure. Ignores imbalances caused by storage.
# - stores: number of non-trivial complete storages downloaded
# - storing: the percentage complete and number of storage tries being
# downloaded actively
# - walked: the part of the account trie walked from this
# epoch's index, as parts per million (a fraction of the
# total account trie)
# - tnps: trie nodes collected per second, since the last debug log (in the
# last 10 seconds, at comment time)
num_requests = sum(self._num_requests_by_peer.values())
if num_requests == 0:
log = self.logger.debug
else:
log = self.logger.info
log(
(
"State Stats: nodes=%d accts=%d prog=%.2f%% stores=%d"
" storing=%.1f%% of %d walked=%.1fppm tnps=%.0f req=%d"
),
self._total_added_nodes,
self._num_accounts_completed,
self._complete_trie_fraction(self._account_tracker) * 100,
self._num_storage_completed,
active_storage_completion * 100,
num_storage_trackers,
self._contiguous_accounts_complete_fraction() * 1e6,
self._num_added / timer.elapsed,
num_requests,
)
self._num_requests_by_peer.clear()
def _complete_trie_fraction(self, tracker: TrieNodeRequestTracker) -> float:
"""
Calculate stats for logging: estimate what percent of the trie is completed,
by looking at unexplored prefixes in the account trie.
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion
One awkward thing: there will be no apparent progress while filling in
the storage of a single large account. Progress is slow enough anyway
that this is probably immaterial.
"""
# Move this logic into HexaryTrieFog someday
unknown_prefixes = tracker._trie_fog._unexplored_prefixes
# Basic estimation logic:
# - An unknown prefix 0xf means that we are missing 1/16 of the trie
# - An unknown prefix 0x12 means that we are missing 1/(16^2) of the trie
# - Add up all the unknown prefixes to estimate the total collected fraction.
unknown_fraction = sum(
(1 / 16) ** len(prefix)
for prefix in unknown_prefixes
)
return 1 - unknown_fraction
def _contiguous_accounts_complete_fraction(self) -> float:
"""
Estimate the completed fraction of the trie that is contiguous with
the current index (which rotates every 32 blocks)
It will be probably be quite noticeable that it will get "stuck" when
downloading a lot of storage, because we'll have to blow it up to more
than a percentage to see any significant change within 32 blocks. (when
the index will change again anyway)
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion contiguous with the current backfill index key
"""
starting_index = bytes_to_nibbles(self._next_trie_root_hash)
unknown_prefixes = self._account_tracker._trie_fog._unexplored_prefixes
if len(unknown_prefixes) == 0:
return 1
# find the nearest unknown prefix (typically, on the right)
nearest_index = unknown_prefixes.bisect(starting_index)
# Get the nearest unknown prefix to the left
if nearest_index == 0:
left_prefix = (0, ) * 64
else:
left_prefix = unknown_prefixes[nearest_index - 1]
if key_starts_with(starting_index, left_prefix):
# The prefix of the starting index is unknown, so the index
# itself is unknown.
return 0
# Get the nearest unknown prefix to the right
if len(unknown_prefixes) == nearest_index:
right_prefix = (0xf, ) * 64
else:
right_prefix = unknown_prefixes[nearest_index]
# Use the space between the unknown prefixes to estimate the completed contiguous fraction
# At the base, every gap in the first nibble is a full 1/16th of the state complete
known_first_nibbles = right_prefix[0] - left_prefix[0] - 1
completed_fraction_base = (1 / 16) * known_first_nibbles
# Underneath, you can count completed subtrees on the right, each child 1/16 of the parent
right_side_completed = sum(
nibble * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(right_prefix[1:], 2)
)
# Do the same on the left
left_side_completed = sum(
(0xf - nibble) * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(left_prefix[1:], 2)
)
# Add up all completed areas
return left_side_completed + completed_fraction_base + right_side_completed
class TrieNodeRequestTracker:
def __init__(self) -> None:
self._trie_fog = fog.HexaryTrieFog()
self._active_prefixes: Set[Nibbles] = set()
# cache of nodes used to speed up trie walking
self._node_frontier_cache = fog.TrieFrontierCache()
def mark_for_review(self, prefix: Nibbles) -> None:
# Calling this does not mean that the nodes were returned, only that they are eligible again
# for review (either they were returned or we can ask a different peer for them)
self._active_prefixes.remove(prefix)
def pause_review(self, prefix: Nibbles) -> None:
"""
Stop iterating this node, until mark_for_review() is called
"""
self._active_prefixes.add(prefix)
def _get_eligible_fog(self) -> fog.HexaryTrieFog:
"""
Return the Trie Fog that can be searched, ignoring any nodes that are currently
being requested.
"""
return self._trie_fog.mark_all_complete(self._active_prefixes)
def next_path_to_explore(self, starting_index: Nibbles) -> Nibbles:
return self._get_eligible_fog().nearest_unknown(starting_index)
def confirm_prefix(
self,
confirmed_prefix: Nibbles,
node: fog.HexaryTrieFog) -> None:
if node.sub_segments:
# No nodes have both value and sub_segments, so we can wait to update the cache
self.add_cache(confirmed_prefix, node, node.sub_segments)
elif node.value:
# If we are confirming a leaf, use confirm_leaf(). We do not attempt to handle a
# situation where one key is a prefix of another key, and simply error out.
raise ValueError("Do not handle case where prefix of another key has a value")
else:
# We don't have to look up this node anymore, so can delete it from our cache
self.delete_cache(confirmed_prefix)
self._trie_fog = self._trie_fog.explore(confirmed_prefix, node.sub_segments)
def confirm_leaf(self, path_to_leaf: Nibbles) -> None:
# We don't handle keys that are subkeys of other keys (because
# all keys are 32 bytes), so we can just hard-code that there
# are no children of this address.
self.delete_cache(path_to_leaf)
self._trie_fog = self._trie_fog.explore(path_to_leaf, ())
def generate_request(
self,
node_hash: Hash32,
prefix: Nibbles) -> TrackedRequest:
self.pause_review(prefix)
return TrackedRequest(self, node_hash, prefix)
@property
def has_active_requests(self) -> bool:
return len(self._active_prefixes) > 0
def get_cached_parent(self, prefix: Nibbles) -> Tuple[HexaryTrieNode, Nibbles]:
return self._node_frontier_cache.get(prefix)
def add_cache(
self,
prefix: Nibbles,
node: HexaryTrieNode,
sub_segments: Iterable[Nibbles]) -> None:
self._node_frontier_cache.add(prefix, node, sub_segments)
def delete_cache(self, prefix: Nibbles) -> None:
self._node_frontier_cache.delete(prefix)
@property
def is_complete(self) -> bool:
return self._trie_fog.is_complete
def __repr__(self) -> str:
return (
f"TrieNodeRequestTracker(trie_fog={self._trie_fog!r},"
f" active_prefixes={self._active_prefixes!r})"
)
class TrackedRequest(NamedTuple):
tracker: TrieNodeRequestTracker
node_hash: Hash32
prefix: Nibbles | #
# In response to these situations, we might like to:
# 1. Debug log? | random_line_split |
backfill.py | from __future__ import annotations
import asyncio
from collections import Counter
from functools import partial
import itertools
import typing
from typing import (
AsyncIterator,
Dict,
Iterable,
NamedTuple,
Optional,
Set,
Tuple,
)
from async_service import Service
from eth.abc import (
AtomicDatabaseAPI,
BlockHeaderAPI,
)
from eth.constants import EMPTY_SHA3
from eth.rlp.accounts import Account
from eth_typing import Hash32
import rlp
from trie import (
HexaryTrie,
exceptions as trie_exceptions,
fog,
)
from trie.constants import (
BLANK_NODE_HASH,
)
from trie.utils.nibbles import (
bytes_to_nibbles,
)
from trie.utils.nodes import (
key_starts_with,
)
from trie.typing import (
HexaryTrieNode,
Nibbles,
)
from p2p.exceptions import BaseP2PError, PeerConnectionLost
from trinity.protocol.eth.peer import ETHPeer, ETHPeerPool
from trinity.sync.beam.constants import (
EPOCH_BLOCK_LENGTH,
GAP_BETWEEN_TESTS,
NON_IDEAL_RESPONSE_PENALTY,
PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED,
)
from trinity._utils.async_iter import async_take
from trinity._utils.logging import get_logger
from trinity._utils.timer import Timer
from .queen import (
QueeningQueue,
QueenTrackerAPI,
)
REQUEST_SIZE = 16
class BeamStateBackfill(Service, QueenTrackerAPI):
"""
Use a very simple strategy to fill in state in the background.
Ask each peer in sequence for some nodes, ignoring the lowest RTT node.
Reduce memory pressure by using a depth-first strategy.
An intended side-effect is to build & maintain an accurate measurement of
the round-trip-time that peers take to respond to GetNodeData commands.
"""
_total_added_nodes = 0
_num_added = 0
_num_missed = 0
_num_accounts_completed = 0
_num_storage_completed = 0
_report_interval = 10
_num_requests_by_peer: typing.Counter[ETHPeer]
def __init__(self, db: AtomicDatabaseAPI, peer_pool: ETHPeerPool) -> None:
self.logger = get_logger('trinity.sync.beam.backfill.BeamStateBackfill')
self._db = db
self._peer_pool = peer_pool
self._is_missing: Set[Hash32] = set()
self._num_requests_by_peer = Counter()
self._queening_queue = QueeningQueue(peer_pool)
# Track the nodes that we are requesting in the account trie
self._account_tracker = TrieNodeRequestTracker()
self._storage_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}
self._bytecode_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}
# The most recent root hash to use to navigate the trie
self._next_trie_root_hash: Optional[Hash32] = None
self._begin_backfill = asyncio.Event()
async def get_queen_peer(self) -> ETHPeer:
return await self._queening_queue.get_queen_peer()
def penalize_queen(self, peer: ETHPeer, delay: float = NON_IDEAL_RESPONSE_PENALTY) -> None:
self._queening_queue.penalize_queen(peer, delay=delay)
async def run(self) -> None:
self.manager.run_daemon_task(self._periodically_report_progress)
queening_manager = self.manager.run_daemon_child_service(self._queening_queue)
await queening_manager.wait_started()
await self._run_backfill()
self.manager.cancel()
async def _run_backfill(self) -> None:
await self._begin_backfill.wait()
if self._next_trie_root_hash is None:
raise RuntimeError("Cannot start backfill when a recent trie root hash is unknown")
while self.manager.is_running:
peer = await self._queening_queue.pop_fastest_peasant()
# collect node hashes that might be missing
required_data = tuple([
request async for request in async_take(REQUEST_SIZE, self._missing_trie_hashes())
])
if len(required_data) == 0:
# Nothing available to request, for one of two reasons:
if self._check_complete():
self.logger.info("Downloaded all accounts, storage and bytecode state")
return
else:
# There are active requests to peers, and we don't have enough information to
# ask for any more trie nodes (for example, near the beginning, when the top
# of the trie isn't available).
self._queening_queue.readd_peasant(peer)
self.logger.debug(
"Backfill is waiting for more hashes to arrive, putting %s back in queue",
peer,
)
await asyncio.sleep(PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED)
continue
self.manager.run_task(self._make_request, peer, required_data)
def _check_complete(self) -> bool:
if self._account_tracker.is_complete:
storage_complete = all(
storage_tracker.is_complete
for storage_tracker in self._storage_trackers.values()
)
if storage_complete:
bytecode_complete = all(
bytecode_tracker.is_complete
for bytecode_tracker in self._bytecode_trackers.values()
)
# All backfill is complete only if the account and storage and bytecodes are present
return bytecode_complete
else:
# At least one account is missing a storage trie node
return False
else:
# At least one account trie node is missing
return False
async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:
"""
Walks through the full state trie, yielding one missing node hash/prefix
at a time.
The yielded node info is wrapped in a TrackedRequest. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
# For each account, when we have asked for all known storage and bytecode
# hashes, but some are still not present, we "pause" the account so we can look
# for neighboring nodes.
# This is a list of paused accounts, using the path to the leaf node,
# because that's how the account tracker is indexed.
exhausted_account_leaves: Tuple[Nibbles, ...] = ()
starting_root_hash = self._next_trie_root_hash
try:
while self.manager.is_running:
# Get the next account
# We have to rebuild the account iterator every time because...
# something about an exception during a manual __anext__()?
account_iterator = self._request_tracking_trie_items(
self._account_tracker,
starting_root_hash,
)
try:
next_account_info = await account_iterator.__anext__()
except trie_exceptions.MissingTraversalNode as exc:
# Found a missing trie node while looking for the next account
yield self._account_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
continue
except StopAsyncIteration:
# Finished iterating over all available accounts
break
# Decode account
path_to_leaf, address_hash_nibbles, encoded_account = next_account_info
account = rlp.decode(encoded_account, sedes=Account)
# Iterate over all missing hashes of subcomponents (storage & bytecode)
subcomponent_hashes_iterator = self._missing_subcomponent_hashes(
address_hash_nibbles,
account,
starting_root_hash,
)
async for node_request in subcomponent_hashes_iterator:
yield node_request
# Check if account is fully downloaded
account_components_complete = self._are_account_components_complete(
address_hash_nibbles,
account,
)
if account_components_complete:
# Mark fully downloaded accounts as complete, and do some cleanup
self._mark_account_complete(path_to_leaf, address_hash_nibbles)
else:
# Pause accounts that are not fully downloaded, and track the account
# to resume when the generator exits.
self._account_tracker.pause_review(path_to_leaf)
exhausted_account_leaves += (path_to_leaf, )
except GeneratorExit:
# As the generator is exiting, we want to resume any paused accounts. This
# allows us to find missing storage/bytecode on the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
raise
else:
# If we pause a few accounts and then run out of nodes to ask for, then we
# still need to resume the paused accounts to prepare for the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
# Possible scenarios:
# 1. We have completed backfill
# 2. We have iterated the available nodes, and all known hashes are being requested.
# For example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes, and exit cleanly.
#
# In response to these situations, we might like to:
# 1. Log and celebrate that the full state has been downloaded
# 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it, using a _check_complete() check.
return
async def _request_tracking_trie_items(
self,
request_tracker: TrieNodeRequestTracker,
root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:
"""
Walk through the supplied trie, yielding the request tracker and node
request for any missing trie nodes.
:yield: path to leaf node, a key (as nibbles), and the value found in the trie
:raise: MissingTraversalNode if a node is missing while walking the trie
"""
if self._next_trie_root_hash is None:
# We haven't started beam syncing, so don't know which root to start at
return
trie = HexaryTrie(self._db, root_hash)
starting_index = bytes_to_nibbles(root_hash)
while self.manager.is_running:
try:
path_to_node = request_tracker.next_path_to_explore(starting_index)
except trie_exceptions.PerfectVisibility:
# This doesn't necessarily mean we are finished.
# Any active prefixes might still be hiding some significant portion of the trie
# But it's all we're able to explore for now, until more node data arrives
return
try:
cached_node, uncached_key = request_tracker.get_cached_parent(path_to_node)
except KeyError:
cached_node = None
node_getter = partial(trie.traverse, path_to_node)
else:
node_getter = partial(trie.traverse_from, cached_node, uncached_key)
try:
node = node_getter()
except trie_exceptions.MissingTraversalNode as exc:
# Found missing account trie node
if path_to_node == exc.nibbles_traversed:
raise
elif cached_node is None:
# The path and nibbles traversed should always match in a non-cached traversal
raise RuntimeError(
f"Unexpected: on a non-cached traversal to {path_to_node}, the"
f" exception only claimed to traverse {exc.nibbles_traversed} -- {exc}"
) from exc
else:
# We need to re-raise a version of the exception that includes the whole path
# from the root node (when using cached nodes, we only have the path from
# the parent node to the child node)
# We could always raise this re-wrapped version, but skipping it (probably?)
# improves performance.
missing_hash = exc.missing_node_hash
raise trie_exceptions.MissingTraversalNode(missing_hash, path_to_node) from exc
except trie_exceptions.TraversedPartialPath as exc:
node = exc.simulated_node
if node.value:
full_key_nibbles = path_to_node + node.suffix
if len(node.sub_segments):
# It shouldn't be a problem to skip handling this case, because all keys are
# hashed 32 bytes.
raise NotImplementedError(
"The state backfiller doesn't handle keys of different lengths, where"
f" one key is a prefix of another. But found {node} in trie with"
f" {root_hash!r}"
)
yield path_to_node, full_key_nibbles, node.value
# Note that we do not mark value nodes as completed. It is up to the caller
# to do that when it is ready. For example, the storage iterator will
# immediately treat the key as completed. The account iterator will
# not treat the key as completed until all of its storage and bytecode
# are also marked as complete.
else:
# If this is just an intermediate node, then we can mark it as confirmed.
request_tracker.confirm_prefix(path_to_node, node)
async def | (
self,
address_hash_nibbles: Nibbles,
account: Account,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
storage_node_iterator = self._missing_storage_hashes(
address_hash_nibbles,
account.storage_root,
starting_main_root,
)
async for node_request in storage_node_iterator:
yield node_request
bytecode_node_iterator = self._missing_bytecode_hashes(
address_hash_nibbles,
account.code_hash,
starting_main_root,
)
async for node_request in bytecode_node_iterator:
yield node_request
# Note that completing this iterator does NOT mean we're done with the
# account. It just means that all known missing hashes are actively
# being requested.
async def _missing_storage_hashes(
self,
address_hash_nibbles: Nibbles,
storage_root: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Walks through the storage trie at the given root, yielding one missing
storage node hash/prefix at a time.
The yielded node info is wrapped in a ``TrackedRequest``. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
if storage_root == BLANK_NODE_HASH:
# Nothing to do if the storage has an empty root
return
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
while self.manager.is_running:
storage_iterator = self._request_tracking_trie_items(
storage_tracker,
storage_root,
)
try:
async for path_to_leaf, hashed_key, _storage_value in storage_iterator:
# We don't actually care to look at the storage keys/values during backfill
storage_tracker.confirm_leaf(path_to_leaf)
except trie_exceptions.MissingTraversalNode as exc:
yield storage_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
else:
# Possible scenarios:
# 1. We have completed backfilling this account's storage
# 2. We have iterated the available nodes, and only their children are missing,
# for example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes.
#
# In response to these situations, we might like to:
# 1. Debug log?
# 2. Look for more missing nodes in neighboring accounts and their storage, etc.
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it.
return
async def _missing_bytecode_hashes(
self,
address_hash_nibbles: Nibbles,
code_hash: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Checks if this bytecode is missing. If so, yield it and then exit.
If not, then exit immediately.
This may seem like overkill, and it is right now. But...
Code merkelization is coming (theoretically), and the other account
and storage trie iterators work similarly to this, so in some ways
it's easier to do this "over-generalized" solution now. It makes
request tracking a bit easier too, to have the same TrackedRequest
result mechanism.
"""
if code_hash == EMPTY_SHA3:
# Nothing to do if the bytecode is for the empty hash
return
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
if bytecode_tracker.is_complete:
# All bytecode has been collected
return
# If there is an active request (for now, there can only be one), then skip
# any database checks until the active request is resolved.
if not bytecode_tracker.has_active_requests:
if code_hash not in self._db:
# The bytecode isn't present, so we ask for it.
# A bit hacky here, since there is no trie, we just treat it as
# if it were a leaf node at the root.
yield bytecode_tracker.generate_request(code_hash, prefix=())
else:
# The bytecode is already present, but the tracker isn't marked
# as completed yet, so finish it off.
bytecode_tracker.confirm_leaf(path_to_leaf=())
def _get_storage_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._storage_trackers:
return self._storage_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._storage_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _get_bytecode_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._bytecode_trackers:
return self._bytecode_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._bytecode_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _mark_account_complete(self, path_to_leaf: Nibbles, address_hash_nibbles: Nibbles) -> None:
self._account_tracker.confirm_leaf(path_to_leaf)
self._num_accounts_completed += 1
# Clear the storage tracker, to reduce memory usage
# and the time to check self._check_complete()
if address_hash_nibbles in self._storage_trackers:
self._num_storage_completed += 1
del self._storage_trackers[address_hash_nibbles]
# Clear the bytecode tracker, for the same reason
if address_hash_nibbles in self._bytecode_trackers:
del self._bytecode_trackers[address_hash_nibbles]
def _are_account_components_complete(
self,
address_hash_nibbles: Nibbles,
account: Account) -> bool:
if account.storage_root != BLANK_NODE_HASH:
# Avoid generating a storage tracker if there is no storage for this account
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
if account.storage_root == BLANK_NODE_HASH or storage_tracker.is_complete:
if account.code_hash == EMPTY_SHA3:
# All storage is downloaded, and no bytecode to download
return True
else:
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
# All storage is downloaded, return True only if bytecode is downloaded
return bytecode_tracker.is_complete
else:
# Missing some storage
return False
async def _make_request(
self,
peer: ETHPeer,
request_data: Iterable[TrackedRequest]) -> None:
self._num_requests_by_peer[peer] += 1
request_hashes = tuple(set(request.node_hash for request in request_data))
try:
nodes = await peer.eth_api.get_node_data(request_hashes)
except asyncio.TimeoutError:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
except PeerConnectionLost:
# Something unhappy, but we don't really care, peer will be gone by next loop
pass
except (BaseP2PError, Exception) as exc:
self.logger.info("Unexpected err while getting background nodes from %s: %s", peer, exc)
self.logger.debug("Problem downloading background nodes from peer...", exc_info=True)
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
else:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS)
self._insert_results(request_hashes, nodes)
finally:
for request in request_data:
request.tracker.mark_for_review(request.prefix)
def _insert_results(
self,
requested_hashes: Tuple[Hash32, ...],
nodes: Tuple[Tuple[Hash32, bytes], ...]) -> None:
returned_nodes = dict(nodes)
with self._db.atomic_batch() as write_batch:
for requested_hash in requested_hashes:
if requested_hash in returned_nodes:
self._num_added += 1
self._total_added_nodes += 1
encoded_node = returned_nodes[requested_hash]
write_batch[requested_hash] = encoded_node
else:
self._num_missed += 1
def set_root_hash(self, header: BlockHeaderAPI, root_hash: Hash32) -> None:
if self._next_trie_root_hash is None:
self._next_trie_root_hash = root_hash
self._begin_backfill.set()
elif header.block_number % EPOCH_BLOCK_LENGTH == 1:
# This is the root hash of the *parent* of the header, so use modulus equals 1
self._next_trie_root_hash = root_hash
async def _periodically_report_progress(self) -> None:
for step in itertools.count():
if not self.manager.is_running:
break
self._num_added = 0
self._num_missed = 0
timer = Timer()
await asyncio.sleep(self._report_interval)
if not self._begin_backfill.is_set():
self.logger.debug("Beam-Backfill: waiting for new state root")
continue
msg = "total=%d" % self._total_added_nodes
msg += " new=%d" % self._num_added
msg += " miss=%d" % self._num_missed
self.logger.debug("Beam-Backfill: %s", msg)
# log peer counts
show_top_n_peers = 3
self.logger.debug(
"Beam-Backfill-Peer-Usage-Top-%d: %s",
show_top_n_peers,
self._num_requests_by_peer.most_common(show_top_n_peers),
)
# For now, report every 30s (1/3 as often as the debug report above)
if step % 3 == 0:
num_storage_trackers = len(self._storage_trackers)
if num_storage_trackers:
active_storage_completion = sum(
self._complete_trie_fraction(store_tracker)
for store_tracker in self._storage_trackers.values()
) / num_storage_trackers
else:
active_storage_completion = 0
# Log backfill state stats as a progress indicator to the user:
# - nodes: the total number of nodes collected during this backfill session
# - accts: number of accounts completed, including all storage and bytecode,
# if present. This includes accounts downloaded and ones already present.
# - prog: the progress to completion, measured as a percentage of accounts
# completed, using trie structure. Ignores imbalances caused by storage.
# - stores: number of non-trivial complete storages downloaded
# - storing: the percentage complete and number of storage tries being
# downloaded actively
# - walked: the part of the account trie walked from this
# epoch's index, as parts per million (a fraction of the
# total account trie)
# - tnps: trie nodes collected per second, since the last debug log (in the
# last 10 seconds, at comment time)
num_requests = sum(self._num_requests_by_peer.values())
if num_requests == 0:
log = self.logger.debug
else:
log = self.logger.info
log(
(
"State Stats: nodes=%d accts=%d prog=%.2f%% stores=%d"
" storing=%.1f%% of %d walked=%.1fppm tnps=%.0f req=%d"
),
self._total_added_nodes,
self._num_accounts_completed,
self._complete_trie_fraction(self._account_tracker) * 100,
self._num_storage_completed,
active_storage_completion * 100,
num_storage_trackers,
self._contiguous_accounts_complete_fraction() * 1e6,
self._num_added / timer.elapsed,
num_requests,
)
self._num_requests_by_peer.clear()
def _complete_trie_fraction(self, tracker: TrieNodeRequestTracker) -> float:
"""
Calculate stats for logging: estimate what percent of the trie is completed,
by looking at unexplored prefixes in the account trie.
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion
One awkward thing: there will be no apparent progress while filling in
the storage of a single large account. Progress is slow enough anyway
that this is probably immaterial.
"""
# Move this logic into HexaryTrieFog someday
unknown_prefixes = tracker._trie_fog._unexplored_prefixes
# Basic estimation logic:
# - An unknown prefix 0xf means that we are missing 1/16 of the trie
# - An unknown prefix 0x12 means that we are missing 1/(16^2) of the trie
# - Add up all the unknown prefixes to estimate the total collected fraction.
unknown_fraction = sum(
(1 / 16) ** len(prefix)
for prefix in unknown_prefixes
)
return 1 - unknown_fraction
def _contiguous_accounts_complete_fraction(self) -> float:
"""
Estimate the completed fraction of the trie that is contiguous with
the current index (which rotates every 32 blocks)
It will be probably be quite noticeable that it will get "stuck" when
downloading a lot of storage, because we'll have to blow it up to more
than a percentage to see any significant change within 32 blocks. (when
the index will change again anyway)
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion contiguous with the current backfill index key
"""
starting_index = bytes_to_nibbles(self._next_trie_root_hash)
unknown_prefixes = self._account_tracker._trie_fog._unexplored_prefixes
if len(unknown_prefixes) == 0:
return 1
# find the nearest unknown prefix (typically, on the right)
nearest_index = unknown_prefixes.bisect(starting_index)
# Get the nearest unknown prefix to the left
if nearest_index == 0:
left_prefix = (0, ) * 64
else:
left_prefix = unknown_prefixes[nearest_index - 1]
if key_starts_with(starting_index, left_prefix):
# The prefix of the starting index is unknown, so the index
# itself is unknown.
return 0
# Get the nearest unknown prefix to the right
if len(unknown_prefixes) == nearest_index:
right_prefix = (0xf, ) * 64
else:
right_prefix = unknown_prefixes[nearest_index]
# Use the space between the unknown prefixes to estimate the completed contiguous fraction
# At the base, every gap in the first nibble is a full 1/16th of the state complete
known_first_nibbles = right_prefix[0] - left_prefix[0] - 1
completed_fraction_base = (1 / 16) * known_first_nibbles
# Underneath, you can count completed subtrees on the right, each child 1/16 of the parent
right_side_completed = sum(
nibble * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(right_prefix[1:], 2)
)
# Do the same on the left
left_side_completed = sum(
(0xf - nibble) * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(left_prefix[1:], 2)
)
# Add up all completed areas
return left_side_completed + completed_fraction_base + right_side_completed
class TrieNodeRequestTracker:
def __init__(self) -> None:
self._trie_fog = fog.HexaryTrieFog()
self._active_prefixes: Set[Nibbles] = set()
# cache of nodes used to speed up trie walking
self._node_frontier_cache = fog.TrieFrontierCache()
def mark_for_review(self, prefix: Nibbles) -> None:
# Calling this does not mean that the nodes were returned, only that they are eligible again
# for review (either they were returned or we can ask a different peer for them)
self._active_prefixes.remove(prefix)
def pause_review(self, prefix: Nibbles) -> None:
"""
Stop iterating this node, until mark_for_review() is called
"""
self._active_prefixes.add(prefix)
def _get_eligible_fog(self) -> fog.HexaryTrieFog:
"""
Return the Trie Fog that can be searched, ignoring any nodes that are currently
being requested.
"""
return self._trie_fog.mark_all_complete(self._active_prefixes)
def next_path_to_explore(self, starting_index: Nibbles) -> Nibbles:
return self._get_eligible_fog().nearest_unknown(starting_index)
def confirm_prefix(
self,
confirmed_prefix: Nibbles,
node: fog.HexaryTrieFog) -> None:
if node.sub_segments:
# No nodes have both value and sub_segments, so we can wait to update the cache
self.add_cache(confirmed_prefix, node, node.sub_segments)
elif node.value:
# If we are confirming a leaf, use confirm_leaf(). We do not attempt to handle a
# situation where one key is a prefix of another key, and simply error out.
raise ValueError("Do not handle case where prefix of another key has a value")
else:
# We don't have to look up this node anymore, so can delete it from our cache
self.delete_cache(confirmed_prefix)
self._trie_fog = self._trie_fog.explore(confirmed_prefix, node.sub_segments)
def confirm_leaf(self, path_to_leaf: Nibbles) -> None:
# We don't handle keys that are subkeys of other keys (because
# all keys are 32 bytes), so we can just hard-code that there
# are no children of this address.
self.delete_cache(path_to_leaf)
self._trie_fog = self._trie_fog.explore(path_to_leaf, ())
def generate_request(
self,
node_hash: Hash32,
prefix: Nibbles) -> TrackedRequest:
self.pause_review(prefix)
return TrackedRequest(self, node_hash, prefix)
@property
def has_active_requests(self) -> bool:
return len(self._active_prefixes) > 0
def get_cached_parent(self, prefix: Nibbles) -> Tuple[HexaryTrieNode, Nibbles]:
return self._node_frontier_cache.get(prefix)
def add_cache(
self,
prefix: Nibbles,
node: HexaryTrieNode,
sub_segments: Iterable[Nibbles]) -> None:
self._node_frontier_cache.add(prefix, node, sub_segments)
def delete_cache(self, prefix: Nibbles) -> None:
self._node_frontier_cache.delete(prefix)
@property
def is_complete(self) -> bool:
return self._trie_fog.is_complete
def __repr__(self) -> str:
return (
f"TrieNodeRequestTracker(trie_fog={self._trie_fog!r},"
f" active_prefixes={self._active_prefixes!r})"
)
class TrackedRequest(NamedTuple):
tracker: TrieNodeRequestTracker
node_hash: Hash32
prefix: Nibbles
| _missing_subcomponent_hashes | identifier_name |
backfill.py | from __future__ import annotations
import asyncio
from collections import Counter
from functools import partial
import itertools
import typing
from typing import (
AsyncIterator,
Dict,
Iterable,
NamedTuple,
Optional,
Set,
Tuple,
)
from async_service import Service
from eth.abc import (
AtomicDatabaseAPI,
BlockHeaderAPI,
)
from eth.constants import EMPTY_SHA3
from eth.rlp.accounts import Account
from eth_typing import Hash32
import rlp
from trie import (
HexaryTrie,
exceptions as trie_exceptions,
fog,
)
from trie.constants import (
BLANK_NODE_HASH,
)
from trie.utils.nibbles import (
bytes_to_nibbles,
)
from trie.utils.nodes import (
key_starts_with,
)
from trie.typing import (
HexaryTrieNode,
Nibbles,
)
from p2p.exceptions import BaseP2PError, PeerConnectionLost
from trinity.protocol.eth.peer import ETHPeer, ETHPeerPool
from trinity.sync.beam.constants import (
EPOCH_BLOCK_LENGTH,
GAP_BETWEEN_TESTS,
NON_IDEAL_RESPONSE_PENALTY,
PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED,
)
from trinity._utils.async_iter import async_take
from trinity._utils.logging import get_logger
from trinity._utils.timer import Timer
from .queen import (
QueeningQueue,
QueenTrackerAPI,
)
REQUEST_SIZE = 16
class BeamStateBackfill(Service, QueenTrackerAPI):
"""
Use a very simple strategy to fill in state in the background.
Ask each peer in sequence for some nodes, ignoring the lowest RTT node.
Reduce memory pressure by using a depth-first strategy.
An intended side-effect is to build & maintain an accurate measurement of
the round-trip-time that peers take to respond to GetNodeData commands.
"""
_total_added_nodes = 0
_num_added = 0
_num_missed = 0
_num_accounts_completed = 0
_num_storage_completed = 0
_report_interval = 10
_num_requests_by_peer: typing.Counter[ETHPeer]
def __init__(self, db: AtomicDatabaseAPI, peer_pool: ETHPeerPool) -> None:
self.logger = get_logger('trinity.sync.beam.backfill.BeamStateBackfill')
self._db = db
self._peer_pool = peer_pool
self._is_missing: Set[Hash32] = set()
self._num_requests_by_peer = Counter()
self._queening_queue = QueeningQueue(peer_pool)
# Track the nodes that we are requesting in the account trie
self._account_tracker = TrieNodeRequestTracker()
self._storage_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}
self._bytecode_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}
# The most recent root hash to use to navigate the trie
self._next_trie_root_hash: Optional[Hash32] = None
self._begin_backfill = asyncio.Event()
async def get_queen_peer(self) -> ETHPeer:
return await self._queening_queue.get_queen_peer()
def penalize_queen(self, peer: ETHPeer, delay: float = NON_IDEAL_RESPONSE_PENALTY) -> None:
self._queening_queue.penalize_queen(peer, delay=delay)
async def run(self) -> None:
self.manager.run_daemon_task(self._periodically_report_progress)
queening_manager = self.manager.run_daemon_child_service(self._queening_queue)
await queening_manager.wait_started()
await self._run_backfill()
self.manager.cancel()
async def _run_backfill(self) -> None:
await self._begin_backfill.wait()
if self._next_trie_root_hash is None:
raise RuntimeError("Cannot start backfill when a recent trie root hash is unknown")
while self.manager.is_running:
peer = await self._queening_queue.pop_fastest_peasant()
# collect node hashes that might be missing
required_data = tuple([
request async for request in async_take(REQUEST_SIZE, self._missing_trie_hashes())
])
if len(required_data) == 0:
# Nothing available to request, for one of two reasons:
if self._check_complete():
self.logger.info("Downloaded all accounts, storage and bytecode state")
return
else:
# There are active requests to peers, and we don't have enough information to
# ask for any more trie nodes (for example, near the beginning, when the top
# of the trie isn't available).
self._queening_queue.readd_peasant(peer)
self.logger.debug(
"Backfill is waiting for more hashes to arrive, putting %s back in queue",
peer,
)
await asyncio.sleep(PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED)
continue
self.manager.run_task(self._make_request, peer, required_data)
def _check_complete(self) -> bool:
if self._account_tracker.is_complete:
storage_complete = all(
storage_tracker.is_complete
for storage_tracker in self._storage_trackers.values()
)
if storage_complete:
bytecode_complete = all(
bytecode_tracker.is_complete
for bytecode_tracker in self._bytecode_trackers.values()
)
# All backfill is complete only if the account and storage and bytecodes are present
return bytecode_complete
else:
# At least one account is missing a storage trie node
return False
else:
# At least one account trie node is missing
return False
async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:
"""
Walks through the full state trie, yielding one missing node hash/prefix
at a time.
The yielded node info is wrapped in a TrackedRequest. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
# For each account, when we have asked for all known storage and bytecode
# hashes, but some are still not present, we "pause" the account so we can look
# for neighboring nodes.
# This is a list of paused accounts, using the path to the leaf node,
# because that's how the account tracker is indexed.
exhausted_account_leaves: Tuple[Nibbles, ...] = ()
starting_root_hash = self._next_trie_root_hash
try:
while self.manager.is_running:
# Get the next account
# We have to rebuild the account iterator every time because...
# something about an exception during a manual __anext__()?
account_iterator = self._request_tracking_trie_items(
self._account_tracker,
starting_root_hash,
)
try:
next_account_info = await account_iterator.__anext__()
except trie_exceptions.MissingTraversalNode as exc:
# Found a missing trie node while looking for the next account
yield self._account_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
continue
except StopAsyncIteration:
# Finished iterating over all available accounts
break
# Decode account
path_to_leaf, address_hash_nibbles, encoded_account = next_account_info
account = rlp.decode(encoded_account, sedes=Account)
# Iterate over all missing hashes of subcomponents (storage & bytecode)
subcomponent_hashes_iterator = self._missing_subcomponent_hashes(
address_hash_nibbles,
account,
starting_root_hash,
)
async for node_request in subcomponent_hashes_iterator:
yield node_request
# Check if account is fully downloaded
account_components_complete = self._are_account_components_complete(
address_hash_nibbles,
account,
)
if account_components_complete:
# Mark fully downloaded accounts as complete, and do some cleanup
self._mark_account_complete(path_to_leaf, address_hash_nibbles)
else:
# Pause accounts that are not fully downloaded, and track the account
# to resume when the generator exits.
self._account_tracker.pause_review(path_to_leaf)
exhausted_account_leaves += (path_to_leaf, )
except GeneratorExit:
# As the generator is exiting, we want to resume any paused accounts. This
# allows us to find missing storage/bytecode on the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
raise
else:
# If we pause a few accounts and then run out of nodes to ask for, then we
# still need to resume the paused accounts to prepare for the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
# Possible scenarios:
# 1. We have completed backfill
# 2. We have iterated the available nodes, and all known hashes are being requested.
# For example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes, and exit cleanly.
#
# In response to these situations, we might like to:
# 1. Log and celebrate that the full state has been downloaded
# 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it, using a _check_complete() check.
return
async def _request_tracking_trie_items(
self,
request_tracker: TrieNodeRequestTracker,
root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:
"""
Walk through the supplied trie, yielding the request tracker and node
request for any missing trie nodes.
:yield: path to leaf node, a key (as nibbles), and the value found in the trie
:raise: MissingTraversalNode if a node is missing while walking the trie
"""
if self._next_trie_root_hash is None:
# We haven't started beam syncing, so don't know which root to start at
return
trie = HexaryTrie(self._db, root_hash)
starting_index = bytes_to_nibbles(root_hash)
while self.manager.is_running:
try:
path_to_node = request_tracker.next_path_to_explore(starting_index)
except trie_exceptions.PerfectVisibility:
# This doesn't necessarily mean we are finished.
# Any active prefixes might still be hiding some significant portion of the trie
# But it's all we're able to explore for now, until more node data arrives
return
try:
cached_node, uncached_key = request_tracker.get_cached_parent(path_to_node)
except KeyError:
cached_node = None
node_getter = partial(trie.traverse, path_to_node)
else:
node_getter = partial(trie.traverse_from, cached_node, uncached_key)
try:
node = node_getter()
except trie_exceptions.MissingTraversalNode as exc:
# Found missing account trie node
if path_to_node == exc.nibbles_traversed:
raise
elif cached_node is None:
# The path and nibbles traversed should always match in a non-cached traversal
raise RuntimeError(
f"Unexpected: on a non-cached traversal to {path_to_node}, the"
f" exception only claimed to traverse {exc.nibbles_traversed} -- {exc}"
) from exc
else:
# We need to re-raise a version of the exception that includes the whole path
# from the root node (when using cached nodes, we only have the path from
# the parent node to the child node)
# We could always raise this re-wrapped version, but skipping it (probably?)
# improves performance.
missing_hash = exc.missing_node_hash
raise trie_exceptions.MissingTraversalNode(missing_hash, path_to_node) from exc
except trie_exceptions.TraversedPartialPath as exc:
node = exc.simulated_node
if node.value:
full_key_nibbles = path_to_node + node.suffix
if len(node.sub_segments):
# It shouldn't be a problem to skip handling this case, because all keys are
# hashed 32 bytes.
raise NotImplementedError(
"The state backfiller doesn't handle keys of different lengths, where"
f" one key is a prefix of another. But found {node} in trie with"
f" {root_hash!r}"
)
yield path_to_node, full_key_nibbles, node.value
# Note that we do not mark value nodes as completed. It is up to the caller
# to do that when it is ready. For example, the storage iterator will
# immediately treat the key as completed. The account iterator will
# not treat the key as completed until all of its storage and bytecode
# are also marked as complete.
else:
# If this is just an intermediate node, then we can mark it as confirmed.
request_tracker.confirm_prefix(path_to_node, node)
async def _missing_subcomponent_hashes(
self,
address_hash_nibbles: Nibbles,
account: Account,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
storage_node_iterator = self._missing_storage_hashes(
address_hash_nibbles,
account.storage_root,
starting_main_root,
)
async for node_request in storage_node_iterator:
yield node_request
bytecode_node_iterator = self._missing_bytecode_hashes(
address_hash_nibbles,
account.code_hash,
starting_main_root,
)
async for node_request in bytecode_node_iterator:
yield node_request
# Note that completing this iterator does NOT mean we're done with the
# account. It just means that all known missing hashes are actively
# being requested.
async def _missing_storage_hashes(
self,
address_hash_nibbles: Nibbles,
storage_root: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Walks through the storage trie at the given root, yielding one missing
storage node hash/prefix at a time.
The yielded node info is wrapped in a ``TrackedRequest``. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
if storage_root == BLANK_NODE_HASH:
# Nothing to do if the storage has an empty root
return
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
while self.manager.is_running:
storage_iterator = self._request_tracking_trie_items(
storage_tracker,
storage_root,
)
try:
async for path_to_leaf, hashed_key, _storage_value in storage_iterator:
# We don't actually care to look at the storage keys/values during backfill
storage_tracker.confirm_leaf(path_to_leaf)
except trie_exceptions.MissingTraversalNode as exc:
yield storage_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
else:
# Possible scenarios:
# 1. We have completed backfilling this account's storage
# 2. We have iterated the available nodes, and only their children are missing,
# for example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes.
#
# In response to these situations, we might like to:
# 1. Debug log?
# 2. Look for more missing nodes in neighboring accounts and their storage, etc.
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it.
return
async def _missing_bytecode_hashes(
self,
address_hash_nibbles: Nibbles,
code_hash: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Checks if this bytecode is missing. If so, yield it and then exit.
If not, then exit immediately.
This may seem like overkill, and it is right now. But...
Code merkelization is coming (theoretically), and the other account
and storage trie iterators work similarly to this, so in some ways
it's easier to do this "over-generalized" solution now. It makes
request tracking a bit easier too, to have the same TrackedRequest
result mechanism.
"""
if code_hash == EMPTY_SHA3:
# Nothing to do if the bytecode is for the empty hash
return
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
if bytecode_tracker.is_complete:
# All bytecode has been collected
return
# If there is an active request (for now, there can only be one), then skip
# any database checks until the active request is resolved.
if not bytecode_tracker.has_active_requests:
if code_hash not in self._db:
# The bytecode isn't present, so we ask for it.
# A bit hacky here, since there is no trie, we just treat it as
# if it were a leaf node at the root.
yield bytecode_tracker.generate_request(code_hash, prefix=())
else:
# The bytecode is already present, but the tracker isn't marked
# as completed yet, so finish it off.
bytecode_tracker.confirm_leaf(path_to_leaf=())
def _get_storage_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._storage_trackers:
return self._storage_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._storage_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _get_bytecode_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._bytecode_trackers:
return self._bytecode_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._bytecode_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _mark_account_complete(self, path_to_leaf: Nibbles, address_hash_nibbles: Nibbles) -> None:
self._account_tracker.confirm_leaf(path_to_leaf)
self._num_accounts_completed += 1
# Clear the storage tracker, to reduce memory usage
# and the time to check self._check_complete()
if address_hash_nibbles in self._storage_trackers:
self._num_storage_completed += 1
del self._storage_trackers[address_hash_nibbles]
# Clear the bytecode tracker, for the same reason
if address_hash_nibbles in self._bytecode_trackers:
del self._bytecode_trackers[address_hash_nibbles]
def _are_account_components_complete(
self,
address_hash_nibbles: Nibbles,
account: Account) -> bool:
if account.storage_root != BLANK_NODE_HASH:
# Avoid generating a storage tracker if there is no storage for this account
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
if account.storage_root == BLANK_NODE_HASH or storage_tracker.is_complete:
if account.code_hash == EMPTY_SHA3:
# All storage is downloaded, and no bytecode to download
return True
else:
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
# All storage is downloaded, return True only if bytecode is downloaded
return bytecode_tracker.is_complete
else:
# Missing some storage
return False
async def _make_request(
self,
peer: ETHPeer,
request_data: Iterable[TrackedRequest]) -> None:
self._num_requests_by_peer[peer] += 1
request_hashes = tuple(set(request.node_hash for request in request_data))
try:
nodes = await peer.eth_api.get_node_data(request_hashes)
except asyncio.TimeoutError:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
except PeerConnectionLost:
# Something unhappy, but we don't really care, peer will be gone by next loop
pass
except (BaseP2PError, Exception) as exc:
self.logger.info("Unexpected err while getting background nodes from %s: %s", peer, exc)
self.logger.debug("Problem downloading background nodes from peer...", exc_info=True)
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
else:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS)
self._insert_results(request_hashes, nodes)
finally:
for request in request_data:
request.tracker.mark_for_review(request.prefix)
def _insert_results(
self,
requested_hashes: Tuple[Hash32, ...],
nodes: Tuple[Tuple[Hash32, bytes], ...]) -> None:
returned_nodes = dict(nodes)
with self._db.atomic_batch() as write_batch:
for requested_hash in requested_hashes:
if requested_hash in returned_nodes:
self._num_added += 1
self._total_added_nodes += 1
encoded_node = returned_nodes[requested_hash]
write_batch[requested_hash] = encoded_node
else:
self._num_missed += 1
def set_root_hash(self, header: BlockHeaderAPI, root_hash: Hash32) -> None:
if self._next_trie_root_hash is None:
self._next_trie_root_hash = root_hash
self._begin_backfill.set()
elif header.block_number % EPOCH_BLOCK_LENGTH == 1:
# This is the root hash of the *parent* of the header, so use modulus equals 1
self._next_trie_root_hash = root_hash
async def _periodically_report_progress(self) -> None:
for step in itertools.count():
if not self.manager.is_running:
break
self._num_added = 0
self._num_missed = 0
timer = Timer()
await asyncio.sleep(self._report_interval)
if not self._begin_backfill.is_set():
self.logger.debug("Beam-Backfill: waiting for new state root")
continue
msg = "total=%d" % self._total_added_nodes
msg += " new=%d" % self._num_added
msg += " miss=%d" % self._num_missed
self.logger.debug("Beam-Backfill: %s", msg)
# log peer counts
show_top_n_peers = 3
self.logger.debug(
"Beam-Backfill-Peer-Usage-Top-%d: %s",
show_top_n_peers,
self._num_requests_by_peer.most_common(show_top_n_peers),
)
# For now, report every 30s (1/3 as often as the debug report above)
if step % 3 == 0:
num_storage_trackers = len(self._storage_trackers)
if num_storage_trackers:
|
else:
active_storage_completion = 0
# Log backfill state stats as a progress indicator to the user:
# - nodes: the total number of nodes collected during this backfill session
# - accts: number of accounts completed, including all storage and bytecode,
# if present. This includes accounts downloaded and ones already present.
# - prog: the progress to completion, measured as a percentage of accounts
# completed, using trie structure. Ignores imbalances caused by storage.
# - stores: number of non-trivial complete storages downloaded
# - storing: the percentage complete and number of storage tries being
# downloaded actively
# - walked: the part of the account trie walked from this
# epoch's index, as parts per million (a fraction of the
# total account trie)
# - tnps: trie nodes collected per second, since the last debug log (in the
# last 10 seconds, at comment time)
num_requests = sum(self._num_requests_by_peer.values())
if num_requests == 0:
log = self.logger.debug
else:
log = self.logger.info
log(
(
"State Stats: nodes=%d accts=%d prog=%.2f%% stores=%d"
" storing=%.1f%% of %d walked=%.1fppm tnps=%.0f req=%d"
),
self._total_added_nodes,
self._num_accounts_completed,
self._complete_trie_fraction(self._account_tracker) * 100,
self._num_storage_completed,
active_storage_completion * 100,
num_storage_trackers,
self._contiguous_accounts_complete_fraction() * 1e6,
self._num_added / timer.elapsed,
num_requests,
)
self._num_requests_by_peer.clear()
def _complete_trie_fraction(self, tracker: TrieNodeRequestTracker) -> float:
"""
Calculate stats for logging: estimate what percent of the trie is completed,
by looking at unexplored prefixes in the account trie.
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion
One awkward thing: there will be no apparent progress while filling in
the storage of a single large account. Progress is slow enough anyway
that this is probably immaterial.
"""
# Move this logic into HexaryTrieFog someday
unknown_prefixes = tracker._trie_fog._unexplored_prefixes
# Basic estimation logic:
# - An unknown prefix 0xf means that we are missing 1/16 of the trie
# - An unknown prefix 0x12 means that we are missing 1/(16^2) of the trie
# - Add up all the unknown prefixes to estimate the total collected fraction.
unknown_fraction = sum(
(1 / 16) ** len(prefix)
for prefix in unknown_prefixes
)
return 1 - unknown_fraction
def _contiguous_accounts_complete_fraction(self) -> float:
"""
Estimate the completed fraction of the trie that is contiguous with
the current index (which rotates every 32 blocks)
It will be probably be quite noticeable that it will get "stuck" when
downloading a lot of storage, because we'll have to blow it up to more
than a percentage to see any significant change within 32 blocks. (when
the index will change again anyway)
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion contiguous with the current backfill index key
"""
starting_index = bytes_to_nibbles(self._next_trie_root_hash)
unknown_prefixes = self._account_tracker._trie_fog._unexplored_prefixes
if len(unknown_prefixes) == 0:
return 1
# find the nearest unknown prefix (typically, on the right)
nearest_index = unknown_prefixes.bisect(starting_index)
# Get the nearest unknown prefix to the left
if nearest_index == 0:
left_prefix = (0, ) * 64
else:
left_prefix = unknown_prefixes[nearest_index - 1]
if key_starts_with(starting_index, left_prefix):
# The prefix of the starting index is unknown, so the index
# itself is unknown.
return 0
# Get the nearest unknown prefix to the right
if len(unknown_prefixes) == nearest_index:
right_prefix = (0xf, ) * 64
else:
right_prefix = unknown_prefixes[nearest_index]
# Use the space between the unknown prefixes to estimate the completed contiguous fraction
# At the base, every gap in the first nibble is a full 1/16th of the state complete
known_first_nibbles = right_prefix[0] - left_prefix[0] - 1
completed_fraction_base = (1 / 16) * known_first_nibbles
# Underneath, you can count completed subtrees on the right, each child 1/16 of the parent
right_side_completed = sum(
nibble * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(right_prefix[1:], 2)
)
# Do the same on the left
left_side_completed = sum(
(0xf - nibble) * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(left_prefix[1:], 2)
)
# Add up all completed areas
return left_side_completed + completed_fraction_base + right_side_completed
class TrieNodeRequestTracker:
def __init__(self) -> None:
self._trie_fog = fog.HexaryTrieFog()
self._active_prefixes: Set[Nibbles] = set()
# cache of nodes used to speed up trie walking
self._node_frontier_cache = fog.TrieFrontierCache()
def mark_for_review(self, prefix: Nibbles) -> None:
# Calling this does not mean that the nodes were returned, only that they are eligible again
# for review (either they were returned or we can ask a different peer for them)
self._active_prefixes.remove(prefix)
def pause_review(self, prefix: Nibbles) -> None:
"""
Stop iterating this node, until mark_for_review() is called
"""
self._active_prefixes.add(prefix)
def _get_eligible_fog(self) -> fog.HexaryTrieFog:
"""
Return the Trie Fog that can be searched, ignoring any nodes that are currently
being requested.
"""
return self._trie_fog.mark_all_complete(self._active_prefixes)
def next_path_to_explore(self, starting_index: Nibbles) -> Nibbles:
return self._get_eligible_fog().nearest_unknown(starting_index)
def confirm_prefix(
self,
confirmed_prefix: Nibbles,
node: fog.HexaryTrieFog) -> None:
if node.sub_segments:
# No nodes have both value and sub_segments, so we can wait to update the cache
self.add_cache(confirmed_prefix, node, node.sub_segments)
elif node.value:
# If we are confirming a leaf, use confirm_leaf(). We do not attempt to handle a
# situation where one key is a prefix of another key, and simply error out.
raise ValueError("Do not handle case where prefix of another key has a value")
else:
# We don't have to look up this node anymore, so can delete it from our cache
self.delete_cache(confirmed_prefix)
self._trie_fog = self._trie_fog.explore(confirmed_prefix, node.sub_segments)
def confirm_leaf(self, path_to_leaf: Nibbles) -> None:
# We don't handle keys that are subkeys of other keys (because
# all keys are 32 bytes), so we can just hard-code that there
# are no children of this address.
self.delete_cache(path_to_leaf)
self._trie_fog = self._trie_fog.explore(path_to_leaf, ())
def generate_request(
self,
node_hash: Hash32,
prefix: Nibbles) -> TrackedRequest:
self.pause_review(prefix)
return TrackedRequest(self, node_hash, prefix)
@property
def has_active_requests(self) -> bool:
return len(self._active_prefixes) > 0
def get_cached_parent(self, prefix: Nibbles) -> Tuple[HexaryTrieNode, Nibbles]:
return self._node_frontier_cache.get(prefix)
def add_cache(
self,
prefix: Nibbles,
node: HexaryTrieNode,
sub_segments: Iterable[Nibbles]) -> None:
self._node_frontier_cache.add(prefix, node, sub_segments)
def delete_cache(self, prefix: Nibbles) -> None:
self._node_frontier_cache.delete(prefix)
@property
def is_complete(self) -> bool:
return self._trie_fog.is_complete
def __repr__(self) -> str:
return (
f"TrieNodeRequestTracker(trie_fog={self._trie_fog!r},"
f" active_prefixes={self._active_prefixes!r})"
)
class TrackedRequest(NamedTuple):
tracker: TrieNodeRequestTracker
node_hash: Hash32
prefix: Nibbles
| active_storage_completion = sum(
self._complete_trie_fraction(store_tracker)
for store_tracker in self._storage_trackers.values()
) / num_storage_trackers | conditional_block |
backfill.py | from __future__ import annotations
import asyncio
from collections import Counter
from functools import partial
import itertools
import typing
from typing import (
AsyncIterator,
Dict,
Iterable,
NamedTuple,
Optional,
Set,
Tuple,
)
from async_service import Service
from eth.abc import (
AtomicDatabaseAPI,
BlockHeaderAPI,
)
from eth.constants import EMPTY_SHA3
from eth.rlp.accounts import Account
from eth_typing import Hash32
import rlp
from trie import (
HexaryTrie,
exceptions as trie_exceptions,
fog,
)
from trie.constants import (
BLANK_NODE_HASH,
)
from trie.utils.nibbles import (
bytes_to_nibbles,
)
from trie.utils.nodes import (
key_starts_with,
)
from trie.typing import (
HexaryTrieNode,
Nibbles,
)
from p2p.exceptions import BaseP2PError, PeerConnectionLost
from trinity.protocol.eth.peer import ETHPeer, ETHPeerPool
from trinity.sync.beam.constants import (
EPOCH_BLOCK_LENGTH,
GAP_BETWEEN_TESTS,
NON_IDEAL_RESPONSE_PENALTY,
PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED,
)
from trinity._utils.async_iter import async_take
from trinity._utils.logging import get_logger
from trinity._utils.timer import Timer
from .queen import (
QueeningQueue,
QueenTrackerAPI,
)
REQUEST_SIZE = 16
class BeamStateBackfill(Service, QueenTrackerAPI):
"""
Use a very simple strategy to fill in state in the background.
Ask each peer in sequence for some nodes, ignoring the lowest RTT node.
Reduce memory pressure by using a depth-first strategy.
An intended side-effect is to build & maintain an accurate measurement of
the round-trip-time that peers take to respond to GetNodeData commands.
"""
_total_added_nodes = 0
_num_added = 0
_num_missed = 0
_num_accounts_completed = 0
_num_storage_completed = 0
_report_interval = 10
_num_requests_by_peer: typing.Counter[ETHPeer]
def __init__(self, db: AtomicDatabaseAPI, peer_pool: ETHPeerPool) -> None:
self.logger = get_logger('trinity.sync.beam.backfill.BeamStateBackfill')
self._db = db
self._peer_pool = peer_pool
self._is_missing: Set[Hash32] = set()
self._num_requests_by_peer = Counter()
self._queening_queue = QueeningQueue(peer_pool)
# Track the nodes that we are requesting in the account trie
self._account_tracker = TrieNodeRequestTracker()
self._storage_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}
self._bytecode_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}
# The most recent root hash to use to navigate the trie
self._next_trie_root_hash: Optional[Hash32] = None
self._begin_backfill = asyncio.Event()
async def get_queen_peer(self) -> ETHPeer:
return await self._queening_queue.get_queen_peer()
def penalize_queen(self, peer: ETHPeer, delay: float = NON_IDEAL_RESPONSE_PENALTY) -> None:
self._queening_queue.penalize_queen(peer, delay=delay)
async def run(self) -> None:
self.manager.run_daemon_task(self._periodically_report_progress)
queening_manager = self.manager.run_daemon_child_service(self._queening_queue)
await queening_manager.wait_started()
await self._run_backfill()
self.manager.cancel()
async def _run_backfill(self) -> None:
await self._begin_backfill.wait()
if self._next_trie_root_hash is None:
raise RuntimeError("Cannot start backfill when a recent trie root hash is unknown")
while self.manager.is_running:
peer = await self._queening_queue.pop_fastest_peasant()
# collect node hashes that might be missing
required_data = tuple([
request async for request in async_take(REQUEST_SIZE, self._missing_trie_hashes())
])
if len(required_data) == 0:
# Nothing available to request, for one of two reasons:
if self._check_complete():
self.logger.info("Downloaded all accounts, storage and bytecode state")
return
else:
# There are active requests to peers, and we don't have enough information to
# ask for any more trie nodes (for example, near the beginning, when the top
# of the trie isn't available).
self._queening_queue.readd_peasant(peer)
self.logger.debug(
"Backfill is waiting for more hashes to arrive, putting %s back in queue",
peer,
)
await asyncio.sleep(PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED)
continue
self.manager.run_task(self._make_request, peer, required_data)
def _check_complete(self) -> bool:
if self._account_tracker.is_complete:
storage_complete = all(
storage_tracker.is_complete
for storage_tracker in self._storage_trackers.values()
)
if storage_complete:
bytecode_complete = all(
bytecode_tracker.is_complete
for bytecode_tracker in self._bytecode_trackers.values()
)
# All backfill is complete only if the account and storage and bytecodes are present
return bytecode_complete
else:
# At least one account is missing a storage trie node
return False
else:
# At least one account trie node is missing
return False
async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:
"""
Walks through the full state trie, yielding one missing node hash/prefix
at a time.
The yielded node info is wrapped in a TrackedRequest. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
# For each account, when we have asked for all known storage and bytecode
# hashes, but some are still not present, we "pause" the account so we can look
# for neighboring nodes.
# This is a list of paused accounts, using the path to the leaf node,
# because that's how the account tracker is indexed.
exhausted_account_leaves: Tuple[Nibbles, ...] = ()
starting_root_hash = self._next_trie_root_hash
try:
while self.manager.is_running:
# Get the next account
# We have to rebuild the account iterator every time because...
# something about an exception during a manual __anext__()?
account_iterator = self._request_tracking_trie_items(
self._account_tracker,
starting_root_hash,
)
try:
next_account_info = await account_iterator.__anext__()
except trie_exceptions.MissingTraversalNode as exc:
# Found a missing trie node while looking for the next account
yield self._account_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
continue
except StopAsyncIteration:
# Finished iterating over all available accounts
break
# Decode account
path_to_leaf, address_hash_nibbles, encoded_account = next_account_info
account = rlp.decode(encoded_account, sedes=Account)
# Iterate over all missing hashes of subcomponents (storage & bytecode)
subcomponent_hashes_iterator = self._missing_subcomponent_hashes(
address_hash_nibbles,
account,
starting_root_hash,
)
async for node_request in subcomponent_hashes_iterator:
yield node_request
# Check if account is fully downloaded
account_components_complete = self._are_account_components_complete(
address_hash_nibbles,
account,
)
if account_components_complete:
# Mark fully downloaded accounts as complete, and do some cleanup
self._mark_account_complete(path_to_leaf, address_hash_nibbles)
else:
# Pause accounts that are not fully downloaded, and track the account
# to resume when the generator exits.
self._account_tracker.pause_review(path_to_leaf)
exhausted_account_leaves += (path_to_leaf, )
except GeneratorExit:
# As the generator is exiting, we want to resume any paused accounts. This
# allows us to find missing storage/bytecode on the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
raise
else:
# If we pause a few accounts and then run out of nodes to ask for, then we
# still need to resume the paused accounts to prepare for the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
# Possible scenarios:
# 1. We have completed backfill
# 2. We have iterated the available nodes, and all known hashes are being requested.
# For example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes, and exit cleanly.
#
# In response to these situations, we might like to:
# 1. Log and celebrate that the full state has been downloaded
# 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it, using a _check_complete() check.
return
async def _request_tracking_trie_items(
self,
request_tracker: TrieNodeRequestTracker,
root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:
"""
Walk through the supplied trie, yielding the request tracker and node
request for any missing trie nodes.
:yield: path to leaf node, a key (as nibbles), and the value found in the trie
:raise: MissingTraversalNode if a node is missing while walking the trie
"""
if self._next_trie_root_hash is None:
# We haven't started beam syncing, so don't know which root to start at
return
trie = HexaryTrie(self._db, root_hash)
starting_index = bytes_to_nibbles(root_hash)
while self.manager.is_running:
try:
path_to_node = request_tracker.next_path_to_explore(starting_index)
except trie_exceptions.PerfectVisibility:
# This doesn't necessarily mean we are finished.
# Any active prefixes might still be hiding some significant portion of the trie
# But it's all we're able to explore for now, until more node data arrives
return
try:
cached_node, uncached_key = request_tracker.get_cached_parent(path_to_node)
except KeyError:
cached_node = None
node_getter = partial(trie.traverse, path_to_node)
else:
node_getter = partial(trie.traverse_from, cached_node, uncached_key)
try:
node = node_getter()
except trie_exceptions.MissingTraversalNode as exc:
# Found missing account trie node
if path_to_node == exc.nibbles_traversed:
raise
elif cached_node is None:
# The path and nibbles traversed should always match in a non-cached traversal
raise RuntimeError(
f"Unexpected: on a non-cached traversal to {path_to_node}, the"
f" exception only claimed to traverse {exc.nibbles_traversed} -- {exc}"
) from exc
else:
# We need to re-raise a version of the exception that includes the whole path
# from the root node (when using cached nodes, we only have the path from
# the parent node to the child node)
# We could always raise this re-wrapped version, but skipping it (probably?)
# improves performance.
missing_hash = exc.missing_node_hash
raise trie_exceptions.MissingTraversalNode(missing_hash, path_to_node) from exc
except trie_exceptions.TraversedPartialPath as exc:
node = exc.simulated_node
if node.value:
full_key_nibbles = path_to_node + node.suffix
if len(node.sub_segments):
# It shouldn't be a problem to skip handling this case, because all keys are
# hashed 32 bytes.
raise NotImplementedError(
"The state backfiller doesn't handle keys of different lengths, where"
f" one key is a prefix of another. But found {node} in trie with"
f" {root_hash!r}"
)
yield path_to_node, full_key_nibbles, node.value
# Note that we do not mark value nodes as completed. It is up to the caller
# to do that when it is ready. For example, the storage iterator will
# immediately treat the key as completed. The account iterator will
# not treat the key as completed until all of its storage and bytecode
# are also marked as complete.
else:
# If this is just an intermediate node, then we can mark it as confirmed.
request_tracker.confirm_prefix(path_to_node, node)
async def _missing_subcomponent_hashes(
self,
address_hash_nibbles: Nibbles,
account: Account,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
storage_node_iterator = self._missing_storage_hashes(
address_hash_nibbles,
account.storage_root,
starting_main_root,
)
async for node_request in storage_node_iterator:
yield node_request
bytecode_node_iterator = self._missing_bytecode_hashes(
address_hash_nibbles,
account.code_hash,
starting_main_root,
)
async for node_request in bytecode_node_iterator:
yield node_request
# Note that completing this iterator does NOT mean we're done with the
# account. It just means that all known missing hashes are actively
# being requested.
async def _missing_storage_hashes(
self,
address_hash_nibbles: Nibbles,
storage_root: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Walks through the storage trie at the given root, yielding one missing
storage node hash/prefix at a time.
The yielded node info is wrapped in a ``TrackedRequest``. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
if storage_root == BLANK_NODE_HASH:
# Nothing to do if the storage has an empty root
return
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
while self.manager.is_running:
storage_iterator = self._request_tracking_trie_items(
storage_tracker,
storage_root,
)
try:
async for path_to_leaf, hashed_key, _storage_value in storage_iterator:
# We don't actually care to look at the storage keys/values during backfill
storage_tracker.confirm_leaf(path_to_leaf)
except trie_exceptions.MissingTraversalNode as exc:
yield storage_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
else:
# Possible scenarios:
# 1. We have completed backfilling this account's storage
# 2. We have iterated the available nodes, and only their children are missing,
# for example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes.
#
# In response to these situations, we might like to:
# 1. Debug log?
# 2. Look for more missing nodes in neighboring accounts and their storage, etc.
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it.
return
async def _missing_bytecode_hashes(
self,
address_hash_nibbles: Nibbles,
code_hash: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Checks if this bytecode is missing. If so, yield it and then exit.
If not, then exit immediately.
This may seem like overkill, and it is right now. But...
Code merkelization is coming (theoretically), and the other account
and storage trie iterators work similarly to this, so in some ways
it's easier to do this "over-generalized" solution now. It makes
request tracking a bit easier too, to have the same TrackedRequest
result mechanism.
"""
if code_hash == EMPTY_SHA3:
# Nothing to do if the bytecode is for the empty hash
return
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
if bytecode_tracker.is_complete:
# All bytecode has been collected
return
# If there is an active request (for now, there can only be one), then skip
# any database checks until the active request is resolved.
if not bytecode_tracker.has_active_requests:
if code_hash not in self._db:
# The bytecode isn't present, so we ask for it.
# A bit hacky here, since there is no trie, we just treat it as
# if it were a leaf node at the root.
yield bytecode_tracker.generate_request(code_hash, prefix=())
else:
# The bytecode is already present, but the tracker isn't marked
# as completed yet, so finish it off.
bytecode_tracker.confirm_leaf(path_to_leaf=())
def _get_storage_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._storage_trackers:
return self._storage_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._storage_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _get_bytecode_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._bytecode_trackers:
return self._bytecode_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._bytecode_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _mark_account_complete(self, path_to_leaf: Nibbles, address_hash_nibbles: Nibbles) -> None:
self._account_tracker.confirm_leaf(path_to_leaf)
self._num_accounts_completed += 1
# Clear the storage tracker, to reduce memory usage
# and the time to check self._check_complete()
if address_hash_nibbles in self._storage_trackers:
self._num_storage_completed += 1
del self._storage_trackers[address_hash_nibbles]
# Clear the bytecode tracker, for the same reason
if address_hash_nibbles in self._bytecode_trackers:
del self._bytecode_trackers[address_hash_nibbles]
def _are_account_components_complete(
self,
address_hash_nibbles: Nibbles,
account: Account) -> bool:
if account.storage_root != BLANK_NODE_HASH:
# Avoid generating a storage tracker if there is no storage for this account
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
if account.storage_root == BLANK_NODE_HASH or storage_tracker.is_complete:
if account.code_hash == EMPTY_SHA3:
# All storage is downloaded, and no bytecode to download
return True
else:
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
# All storage is downloaded, return True only if bytecode is downloaded
return bytecode_tracker.is_complete
else:
# Missing some storage
return False
async def _make_request(
self,
peer: ETHPeer,
request_data: Iterable[TrackedRequest]) -> None:
self._num_requests_by_peer[peer] += 1
request_hashes = tuple(set(request.node_hash for request in request_data))
try:
nodes = await peer.eth_api.get_node_data(request_hashes)
except asyncio.TimeoutError:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
except PeerConnectionLost:
# Something unhappy, but we don't really care, peer will be gone by next loop
pass
except (BaseP2PError, Exception) as exc:
self.logger.info("Unexpected err while getting background nodes from %s: %s", peer, exc)
self.logger.debug("Problem downloading background nodes from peer...", exc_info=True)
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
else:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS)
self._insert_results(request_hashes, nodes)
finally:
for request in request_data:
request.tracker.mark_for_review(request.prefix)
def _insert_results(
self,
requested_hashes: Tuple[Hash32, ...],
nodes: Tuple[Tuple[Hash32, bytes], ...]) -> None:
returned_nodes = dict(nodes)
with self._db.atomic_batch() as write_batch:
for requested_hash in requested_hashes:
if requested_hash in returned_nodes:
self._num_added += 1
self._total_added_nodes += 1
encoded_node = returned_nodes[requested_hash]
write_batch[requested_hash] = encoded_node
else:
self._num_missed += 1
def set_root_hash(self, header: BlockHeaderAPI, root_hash: Hash32) -> None:
if self._next_trie_root_hash is None:
self._next_trie_root_hash = root_hash
self._begin_backfill.set()
elif header.block_number % EPOCH_BLOCK_LENGTH == 1:
# This is the root hash of the *parent* of the header, so use modulus equals 1
self._next_trie_root_hash = root_hash
async def _periodically_report_progress(self) -> None:
for step in itertools.count():
if not self.manager.is_running:
break
self._num_added = 0
self._num_missed = 0
timer = Timer()
await asyncio.sleep(self._report_interval)
if not self._begin_backfill.is_set():
self.logger.debug("Beam-Backfill: waiting for new state root")
continue
msg = "total=%d" % self._total_added_nodes
msg += " new=%d" % self._num_added
msg += " miss=%d" % self._num_missed
self.logger.debug("Beam-Backfill: %s", msg)
# log peer counts
show_top_n_peers = 3
self.logger.debug(
"Beam-Backfill-Peer-Usage-Top-%d: %s",
show_top_n_peers,
self._num_requests_by_peer.most_common(show_top_n_peers),
)
# For now, report every 30s (1/3 as often as the debug report above)
if step % 3 == 0:
num_storage_trackers = len(self._storage_trackers)
if num_storage_trackers:
active_storage_completion = sum(
self._complete_trie_fraction(store_tracker)
for store_tracker in self._storage_trackers.values()
) / num_storage_trackers
else:
active_storage_completion = 0
# Log backfill state stats as a progress indicator to the user:
# - nodes: the total number of nodes collected during this backfill session
# - accts: number of accounts completed, including all storage and bytecode,
# if present. This includes accounts downloaded and ones already present.
# - prog: the progress to completion, measured as a percentage of accounts
# completed, using trie structure. Ignores imbalances caused by storage.
# - stores: number of non-trivial complete storages downloaded
# - storing: the percentage complete and number of storage tries being
# downloaded actively
# - walked: the part of the account trie walked from this
# epoch's index, as parts per million (a fraction of the
# total account trie)
# - tnps: trie nodes collected per second, since the last debug log (in the
# last 10 seconds, at comment time)
num_requests = sum(self._num_requests_by_peer.values())
if num_requests == 0:
log = self.logger.debug
else:
log = self.logger.info
log(
(
"State Stats: nodes=%d accts=%d prog=%.2f%% stores=%d"
" storing=%.1f%% of %d walked=%.1fppm tnps=%.0f req=%d"
),
self._total_added_nodes,
self._num_accounts_completed,
self._complete_trie_fraction(self._account_tracker) * 100,
self._num_storage_completed,
active_storage_completion * 100,
num_storage_trackers,
self._contiguous_accounts_complete_fraction() * 1e6,
self._num_added / timer.elapsed,
num_requests,
)
self._num_requests_by_peer.clear()
def _complete_trie_fraction(self, tracker: TrieNodeRequestTracker) -> float:
"""
Calculate stats for logging: estimate what percent of the trie is completed,
by looking at unexplored prefixes in the account trie.
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion
One awkward thing: there will be no apparent progress while filling in
the storage of a single large account. Progress is slow enough anyway
that this is probably immaterial.
"""
# Move this logic into HexaryTrieFog someday
unknown_prefixes = tracker._trie_fog._unexplored_prefixes
# Basic estimation logic:
# - An unknown prefix 0xf means that we are missing 1/16 of the trie
# - An unknown prefix 0x12 means that we are missing 1/(16^2) of the trie
# - Add up all the unknown prefixes to estimate the total collected fraction.
unknown_fraction = sum(
(1 / 16) ** len(prefix)
for prefix in unknown_prefixes
)
return 1 - unknown_fraction
def _contiguous_accounts_complete_fraction(self) -> float:
"""
Estimate the completed fraction of the trie that is contiguous with
the current index (which rotates every 32 blocks)
It will be probably be quite noticeable that it will get "stuck" when
downloading a lot of storage, because we'll have to blow it up to more
than a percentage to see any significant change within 32 blocks. (when
the index will change again anyway)
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion contiguous with the current backfill index key
"""
starting_index = bytes_to_nibbles(self._next_trie_root_hash)
unknown_prefixes = self._account_tracker._trie_fog._unexplored_prefixes
if len(unknown_prefixes) == 0:
return 1
# find the nearest unknown prefix (typically, on the right)
nearest_index = unknown_prefixes.bisect(starting_index)
# Get the nearest unknown prefix to the left
if nearest_index == 0:
left_prefix = (0, ) * 64
else:
left_prefix = unknown_prefixes[nearest_index - 1]
if key_starts_with(starting_index, left_prefix):
# The prefix of the starting index is unknown, so the index
# itself is unknown.
return 0
# Get the nearest unknown prefix to the right
if len(unknown_prefixes) == nearest_index:
right_prefix = (0xf, ) * 64
else:
right_prefix = unknown_prefixes[nearest_index]
# Use the space between the unknown prefixes to estimate the completed contiguous fraction
# At the base, every gap in the first nibble is a full 1/16th of the state complete
known_first_nibbles = right_prefix[0] - left_prefix[0] - 1
completed_fraction_base = (1 / 16) * known_first_nibbles
# Underneath, you can count completed subtrees on the right, each child 1/16 of the parent
right_side_completed = sum(
nibble * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(right_prefix[1:], 2)
)
# Do the same on the left
left_side_completed = sum(
(0xf - nibble) * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(left_prefix[1:], 2)
)
# Add up all completed areas
return left_side_completed + completed_fraction_base + right_side_completed
class TrieNodeRequestTracker:
|
class TrackedRequest(NamedTuple):
tracker: TrieNodeRequestTracker
node_hash: Hash32
prefix: Nibbles
| def __init__(self) -> None:
self._trie_fog = fog.HexaryTrieFog()
self._active_prefixes: Set[Nibbles] = set()
# cache of nodes used to speed up trie walking
self._node_frontier_cache = fog.TrieFrontierCache()
def mark_for_review(self, prefix: Nibbles) -> None:
# Calling this does not mean that the nodes were returned, only that they are eligible again
# for review (either they were returned or we can ask a different peer for them)
self._active_prefixes.remove(prefix)
def pause_review(self, prefix: Nibbles) -> None:
"""
Stop iterating this node, until mark_for_review() is called
"""
self._active_prefixes.add(prefix)
def _get_eligible_fog(self) -> fog.HexaryTrieFog:
"""
Return the Trie Fog that can be searched, ignoring any nodes that are currently
being requested.
"""
return self._trie_fog.mark_all_complete(self._active_prefixes)
def next_path_to_explore(self, starting_index: Nibbles) -> Nibbles:
return self._get_eligible_fog().nearest_unknown(starting_index)
def confirm_prefix(
self,
confirmed_prefix: Nibbles,
node: fog.HexaryTrieFog) -> None:
if node.sub_segments:
# No nodes have both value and sub_segments, so we can wait to update the cache
self.add_cache(confirmed_prefix, node, node.sub_segments)
elif node.value:
# If we are confirming a leaf, use confirm_leaf(). We do not attempt to handle a
# situation where one key is a prefix of another key, and simply error out.
raise ValueError("Do not handle case where prefix of another key has a value")
else:
# We don't have to look up this node anymore, so can delete it from our cache
self.delete_cache(confirmed_prefix)
self._trie_fog = self._trie_fog.explore(confirmed_prefix, node.sub_segments)
def confirm_leaf(self, path_to_leaf: Nibbles) -> None:
# We don't handle keys that are subkeys of other keys (because
# all keys are 32 bytes), so we can just hard-code that there
# are no children of this address.
self.delete_cache(path_to_leaf)
self._trie_fog = self._trie_fog.explore(path_to_leaf, ())
def generate_request(
self,
node_hash: Hash32,
prefix: Nibbles) -> TrackedRequest:
self.pause_review(prefix)
return TrackedRequest(self, node_hash, prefix)
@property
def has_active_requests(self) -> bool:
return len(self._active_prefixes) > 0
def get_cached_parent(self, prefix: Nibbles) -> Tuple[HexaryTrieNode, Nibbles]:
return self._node_frontier_cache.get(prefix)
def add_cache(
self,
prefix: Nibbles,
node: HexaryTrieNode,
sub_segments: Iterable[Nibbles]) -> None:
self._node_frontier_cache.add(prefix, node, sub_segments)
def delete_cache(self, prefix: Nibbles) -> None:
self._node_frontier_cache.delete(prefix)
@property
def is_complete(self) -> bool:
return self._trie_fog.is_complete
def __repr__(self) -> str:
return (
f"TrieNodeRequestTracker(trie_fog={self._trie_fog!r},"
f" active_prefixes={self._active_prefixes!r})"
) | identifier_body |
copy_up.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fs
import (
"fmt"
"io"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
// copyUp copies a file in an overlay from a lower filesystem to an
// upper filesytem so that the file can be modified in the upper
// filesystem. Copying a file involves several steps:
//
// - All parent directories of the file are created in the upper
// filesystem if they don't exist there. For instance:
//
// upper /dir0
// lower /dir0/dir1/file
//
// copyUp of /dir0/dir1/file creates /dir0/dir1 in order to create
// /dir0/dir1/file.
//
// - The file content is copied from the lower file to the upper
// file. For symlinks this is the symlink target. For directories,
// upper directory entries are merged with lower directory entries
// so there is no need to copy any entries.
//
// - A subset of file attributes of the lower file are set on the
// upper file. These are the file owner, the file timestamps,
// and all non-overlay extended attributes. copyUp will fail if
// the upper filesystem does not support the setting of these
// attributes.
//
// The file's permissions are set when the file is created and its
// size will be brought up to date when its contents are copied.
// Notably no attempt is made to bring link count up to date because
// hard links are currently not preserved across overlay filesystems.
//
// - Memory mappings of the lower file are invalidated and memory
// references are transferred to the upper file. From this point on,
// memory mappings of the file will be backed by content in the upper
// filesystem.
//
// Synchronization:
//
// copyUp synchronizes with rename(2) using renameMu to ensure that
// parentage does not change while a file is being copied. In the context
// of rename(2), copyUpLockedForRename should be used to avoid deadlock on
// renameMu.
//
// The following operations synchronize with copyUp using copyMu:
//
// - InodeOperations, i.e. to ensure that looking up a directory takes
// into account new upper filesystem directories created by copy up,
// which subsequently can be modified.
//
// - FileOperations, i.e. to ensure that reading from a file does not
// continue using a stale, lower filesystem handle when the file is
// written to.
//
// Lock ordering: Dirent.mu -> Inode.overlay.copyMu -> Inode.mu.
//
// Caveats:
//
// If any step in copying up a file fails, copyUp cleans the upper
// filesystem of any partially up-to-date file. If this cleanup fails,
// the overlay may be in an unacceptable, inconsistent state, so copyUp
// panics. If copyUp fails because any step (above) fails, a generic
// error is returned.
//
// copyUp currently makes no attempt to optimize copying up file content.
// For large files, this means that copyUp blocks until the entire file
// is copied synchronously.
func copyUp(ctx context.Context, d *Dirent) error {
renameMu.RLock()
defer renameMu.RUnlock()
return copyUpLockedForRename(ctx, d)
}
// copyUpLockedForRename is the same as copyUp except that it does not lock
// renameMu.
//
// It copies each component of d that does not yet exist in the upper
// filesystem. If d already exists in the upper filesystem, it is a no-op.
//
// Any error returned indicates a failure to copy all of d. This may
// leave the upper filesystem filled with any number of parent directories
// but the upper filesystem will never be in an inconsistent state.
//
// Preconditions:
// - d.Inode.overlay is non-nil.
func copyUpLockedForRename(ctx context.Context, d *Dirent) error {
for {
// Did we race with another copy up or does there
// already exist something in the upper filesystem
// for d?
d.Inode.overlay.copyMu.RLock()
if d.Inode.overlay.upper != nil {
d.Inode.overlay.copyMu.RUnlock()
// Done, d is in the upper filesystem.
return nil
}
d.Inode.overlay.copyMu.RUnlock()
// Find the next component to copy up. We will work our way
// down to the last component of d and finally copy it.
next := findNextCopyUp(ctx, d)
// Attempt to copy.
if err := doCopyUp(ctx, next); err != nil {
return err
}
}
}
// findNextCopyUp finds the next component of d from root that does not
// yet exist in the upper filesystem. The parent of this component is
// also returned, which is the root of the overlay in the worst case.
func findNextCopyUp(ctx context.Context, d *Dirent) *Dirent {
next := d
for parent := next.parent; ; /* checked in-loop */ /* updated in-loop */ {
// Does this parent have a non-nil upper Inode?
parent.Inode.overlay.copyMu.RLock()
if parent.Inode.overlay.upper != nil {
parent.Inode.overlay.copyMu.RUnlock()
// Note that since we found an upper, it is stable.
return next
}
parent.Inode.overlay.copyMu.RUnlock()
// Continue searching for a parent with a non-nil
// upper Inode.
next = parent
parent = next.parent
}
}
func doCopyUp(ctx context.Context, d *Dirent) error {
// Fail fast on Inode types we won't be able to copy up anyways. These
// Inodes may block in GetFile while holding copyMu for reading. If we
// then try to take copyMu for writing here, we'd deadlock.
t := d.Inode.overlay.lower.StableAttr.Type
if t != RegularFile && t != Directory && t != Symlink {
return syserror.EINVAL
}
// Wait to get exclusive access to the upper Inode.
d.Inode.overlay.copyMu.Lock()
defer d.Inode.overlay.copyMu.Unlock()
if d.Inode.overlay.upper != nil {
// We raced with another doCopyUp, no problem.
return nil
}
// Perform the copy.
return copyUpLocked(ctx, d.parent, d)
}
// copyUpLocked creates a copy of next in the upper filesystem of parent.
//
// copyUpLocked must be called with d.Inode.overlay.copyMu locked.
//
// Returns a generic error on failure.
//
// Preconditions:
// - parent.Inode.overlay.upper must be non-nil.
// - next.Inode.overlay.copyMu must be locked writable.
// - next.Inode.overlay.lower must be non-nil.
// - next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory,
// or Symlink.
// - upper filesystem must support setting file ownership and timestamps.
func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Extract the attributes of the file we wish to copy.
attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx)
if err != nil {
log.Warningf("copy up failed to get lower attributes: %v", err)
return syserror.EIO
}
var childUpperInode *Inode
parentUpper := parent.Inode.overlay.upper
root := RootFromContext(ctx)
if root != nil {
defer root.DecRef()
}
// Create the file in the upper filesystem and get an Inode for it.
switch next.Inode.StableAttr.Type {
case RegularFile:
childFile, err := parentUpper.Create(ctx, root, next.name, FileFlags{Read: true, Write: true}, attrs.Perms)
if err != nil {
log.Warningf("copy up failed to create file: %v", err)
return syserror.EIO
}
defer childFile.DecRef()
childUpperInode = childFile.Dirent.Inode
case Directory:
if err := parentUpper.CreateDirectory(ctx, root, next.name, attrs.Perms); err != nil {
log.Warningf("copy up failed to create directory: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup directory: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
case Symlink:
childLower := next.Inode.overlay.lower
link, err := childLower.Readlink(ctx)
if err != nil {
log.Warningf("copy up failed to read symlink value: %v", err)
return syserror.EIO
}
if err := parentUpper.CreateLink(ctx, root, link, next.name); err != nil {
log.Warningf("copy up failed to create symlink: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup symlink: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
default:
panic(fmt.Sprintf("copy up of invalid type %v on %+v", next.Inode.StableAttr.Type, next))
}
// Bring file attributes up to date. This does not include size, which will be
// brought up to date with copyContentsLocked.
if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil {
log.Warningf("copy up failed to copy up attributes: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Copy the entire file.
if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil {
log.Warningf("copy up failed to copy up contents: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
lowerMappable := next.Inode.overlay.lower.Mappable()
upperMappable := childUpperInode.Mappable()
if lowerMappable != nil && upperMappable == nil {
log.Warningf("copy up failed: cannot ensure memory mapping coherence")
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Propagate memory mappings to the upper Inode.
next.Inode.overlay.mapsMu.Lock()
defer next.Inode.overlay.mapsMu.Unlock()
if upperMappable != nil |
// Take a reference on the upper Inode (transferred to
// next.Inode.overlay.upper) and make new translations use it.
next.Inode.overlay.dataMu.Lock()
childUpperInode.IncRef()
next.Inode.overlay.upper = childUpperInode
next.Inode.overlay.dataMu.Unlock()
// Invalidate existing translations through the lower Inode.
next.Inode.overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
// Remove existing memory mappings from the lower Inode.
if lowerMappable != nil {
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
for m := range seg.Value() {
lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
}
}
return nil
}
// cleanupUpper removes name from parent, and panics if it is unsuccessful.
func cleanupUpper(ctx context.Context, parent *Inode, name string) {
if err := parent.InodeOperations.Remove(ctx, parent, name); err != nil {
// Unfortunately we don't have much choice. We shouldn't
// willingly give the caller access to a nonsense filesystem.
panic(fmt.Sprintf("overlay filesystem is in an inconsistent state: failed to remove %q from upper filesystem: %v", name, err))
}
}
// copyUpBuffers is a buffer pool for copying file content. The buffer
// size is the same used by io.Copy.
var copyUpBuffers = sync.Pool{New: func() interface{} { return make([]byte, 8*usermem.PageSize) }}
// copyContentsLocked copies the contents of lower to upper. It panics if
// less than size bytes can be copied.
func copyContentsLocked(ctx context.Context, upper *Inode, lower *Inode, size int64) error {
// We don't support copying up for anything other than regular files.
if lower.StableAttr.Type != RegularFile {
return nil
}
// Get a handle to the upper filesystem, which we will write to.
upperFile, err := overlayFile(ctx, upper, FileFlags{Write: true})
if err != nil {
return err
}
defer upperFile.DecRef()
// Get a handle to the lower filesystem, which we will read from.
lowerFile, err := overlayFile(ctx, lower, FileFlags{Read: true})
if err != nil {
return err
}
defer lowerFile.DecRef()
// Use a buffer pool to minimize allocations.
buf := copyUpBuffers.Get().([]byte)
defer copyUpBuffers.Put(buf)
// Transfer the contents.
//
// One might be able to optimize this by doing parallel reads, parallel writes and reads, larger
// buffers, etc. But we really don't know anything about the underlying implementation, so these
// optimizations could be self-defeating. So we leave this as simple as possible.
var offset int64
for {
nr, err := lowerFile.FileOperations.Read(ctx, lowerFile, usermem.BytesIOSequence(buf), offset)
if err != nil && err != io.EOF {
return err
}
if nr == 0 {
if offset != size {
// Same as in cleanupUpper, we cannot live
// with ourselves if we do anything less.
panic(fmt.Sprintf("filesystem is in an inconsistent state: wrote only %d bytes of %d sized file", offset, size))
}
return nil
}
nw, err := upperFile.FileOperations.Write(ctx, upperFile, usermem.BytesIOSequence(buf[:nr]), offset)
if err != nil {
return err
}
offset += nw
}
}
// copyAttributesLocked copies a subset of lower's attributes to upper,
// specifically owner, timestamps (except of status change time), and
// extended attributes. Notably no attempt is made to copy link count.
// Size and permissions are set on upper when the file content is copied
// and when the file is created respectively.
func copyAttributesLocked(ctx context.Context, upper *Inode, lower *Inode) error {
// Extract attributes from the lower filesystem.
lowerAttr, err := lower.UnstableAttr(ctx)
if err != nil {
return err
}
lowerXattr, err := lower.ListXattr(ctx, linux.XATTR_SIZE_MAX)
if err != nil && err != syserror.EOPNOTSUPP {
return err
}
// Set the attributes on the upper filesystem.
if err := upper.InodeOperations.SetOwner(ctx, upper, lowerAttr.Owner); err != nil {
return err
}
if err := upper.InodeOperations.SetTimestamps(ctx, upper, TimeSpec{
ATime: lowerAttr.AccessTime,
MTime: lowerAttr.ModificationTime,
}); err != nil {
return err
}
for name := range lowerXattr {
// Don't copy-up attributes that configure an overlay in the
// lower.
if isXattrOverlay(name) {
continue
}
value, err := lower.GetXattr(ctx, name, linux.XATTR_SIZE_MAX)
if err != nil {
return err
}
if err := upper.InodeOperations.SetXattr(ctx, upper, name, value, 0 /* flags */); err != nil {
return err
}
}
return nil
}
| {
// Remember which mappings we added so we can remove them on failure.
allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange)
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
added := make(memmap.MappingsOfRange)
for m := range seg.Value() {
if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable); err != nil {
for m := range added {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
for mr, mappings := range allAdded {
for m := range mappings {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start, m.Writable)
}
}
return err
}
added[m] = struct{}{}
}
allAdded[seg.Range()] = added
}
} | conditional_block |
copy_up.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fs
import (
"fmt"
"io"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
// copyUp copies a file in an overlay from a lower filesystem to an
// upper filesytem so that the file can be modified in the upper
// filesystem. Copying a file involves several steps:
//
// - All parent directories of the file are created in the upper
// filesystem if they don't exist there. For instance:
//
// upper /dir0
// lower /dir0/dir1/file
//
// copyUp of /dir0/dir1/file creates /dir0/dir1 in order to create
// /dir0/dir1/file.
//
// - The file content is copied from the lower file to the upper
// file. For symlinks this is the symlink target. For directories,
// upper directory entries are merged with lower directory entries
// so there is no need to copy any entries.
//
// - A subset of file attributes of the lower file are set on the
// upper file. These are the file owner, the file timestamps,
// and all non-overlay extended attributes. copyUp will fail if
// the upper filesystem does not support the setting of these
// attributes.
//
// The file's permissions are set when the file is created and its
// size will be brought up to date when its contents are copied.
// Notably no attempt is made to bring link count up to date because
// hard links are currently not preserved across overlay filesystems.
//
// - Memory mappings of the lower file are invalidated and memory
// references are transferred to the upper file. From this point on,
// memory mappings of the file will be backed by content in the upper
// filesystem.
//
// Synchronization:
//
// copyUp synchronizes with rename(2) using renameMu to ensure that
// parentage does not change while a file is being copied. In the context
// of rename(2), copyUpLockedForRename should be used to avoid deadlock on
// renameMu.
//
// The following operations synchronize with copyUp using copyMu:
//
// - InodeOperations, i.e. to ensure that looking up a directory takes
// into account new upper filesystem directories created by copy up,
// which subsequently can be modified.
//
// - FileOperations, i.e. to ensure that reading from a file does not
// continue using a stale, lower filesystem handle when the file is
// written to.
//
// Lock ordering: Dirent.mu -> Inode.overlay.copyMu -> Inode.mu.
//
// Caveats:
//
// If any step in copying up a file fails, copyUp cleans the upper
// filesystem of any partially up-to-date file. If this cleanup fails,
// the overlay may be in an unacceptable, inconsistent state, so copyUp
// panics. If copyUp fails because any step (above) fails, a generic
// error is returned.
//
// copyUp currently makes no attempt to optimize copying up file content.
// For large files, this means that copyUp blocks until the entire file
// is copied synchronously.
func copyUp(ctx context.Context, d *Dirent) error {
renameMu.RLock()
defer renameMu.RUnlock()
return copyUpLockedForRename(ctx, d)
}
// copyUpLockedForRename is the same as copyUp except that it does not lock
// renameMu.
//
// It copies each component of d that does not yet exist in the upper
// filesystem. If d already exists in the upper filesystem, it is a no-op.
//
// Any error returned indicates a failure to copy all of d. This may
// leave the upper filesystem filled with any number of parent directories
// but the upper filesystem will never be in an inconsistent state.
//
// Preconditions:
// - d.Inode.overlay is non-nil.
func copyUpLockedForRename(ctx context.Context, d *Dirent) error {
for {
// Did we race with another copy up or does there
// already exist something in the upper filesystem
// for d?
d.Inode.overlay.copyMu.RLock()
if d.Inode.overlay.upper != nil {
d.Inode.overlay.copyMu.RUnlock()
// Done, d is in the upper filesystem.
return nil
}
d.Inode.overlay.copyMu.RUnlock()
// Find the next component to copy up. We will work our way
// down to the last component of d and finally copy it.
next := findNextCopyUp(ctx, d)
// Attempt to copy.
if err := doCopyUp(ctx, next); err != nil {
return err
}
}
}
// findNextCopyUp finds the next component of d from root that does not
// yet exist in the upper filesystem. The parent of this component is
// also returned, which is the root of the overlay in the worst case.
func findNextCopyUp(ctx context.Context, d *Dirent) *Dirent {
next := d
for parent := next.parent; ; /* checked in-loop */ /* updated in-loop */ {
// Does this parent have a non-nil upper Inode?
parent.Inode.overlay.copyMu.RLock()
if parent.Inode.overlay.upper != nil {
parent.Inode.overlay.copyMu.RUnlock()
// Note that since we found an upper, it is stable.
return next
}
parent.Inode.overlay.copyMu.RUnlock()
// Continue searching for a parent with a non-nil
// upper Inode.
next = parent
parent = next.parent
}
}
func doCopyUp(ctx context.Context, d *Dirent) error |
// copyUpLocked creates a copy of next in the upper filesystem of parent.
//
// copyUpLocked must be called with d.Inode.overlay.copyMu locked.
//
// Returns a generic error on failure.
//
// Preconditions:
// - parent.Inode.overlay.upper must be non-nil.
// - next.Inode.overlay.copyMu must be locked writable.
// - next.Inode.overlay.lower must be non-nil.
// - next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory,
// or Symlink.
// - upper filesystem must support setting file ownership and timestamps.
func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Extract the attributes of the file we wish to copy.
attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx)
if err != nil {
log.Warningf("copy up failed to get lower attributes: %v", err)
return syserror.EIO
}
var childUpperInode *Inode
parentUpper := parent.Inode.overlay.upper
root := RootFromContext(ctx)
if root != nil {
defer root.DecRef()
}
// Create the file in the upper filesystem and get an Inode for it.
switch next.Inode.StableAttr.Type {
case RegularFile:
childFile, err := parentUpper.Create(ctx, root, next.name, FileFlags{Read: true, Write: true}, attrs.Perms)
if err != nil {
log.Warningf("copy up failed to create file: %v", err)
return syserror.EIO
}
defer childFile.DecRef()
childUpperInode = childFile.Dirent.Inode
case Directory:
if err := parentUpper.CreateDirectory(ctx, root, next.name, attrs.Perms); err != nil {
log.Warningf("copy up failed to create directory: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup directory: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
case Symlink:
childLower := next.Inode.overlay.lower
link, err := childLower.Readlink(ctx)
if err != nil {
log.Warningf("copy up failed to read symlink value: %v", err)
return syserror.EIO
}
if err := parentUpper.CreateLink(ctx, root, link, next.name); err != nil {
log.Warningf("copy up failed to create symlink: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup symlink: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
default:
panic(fmt.Sprintf("copy up of invalid type %v on %+v", next.Inode.StableAttr.Type, next))
}
// Bring file attributes up to date. This does not include size, which will be
// brought up to date with copyContentsLocked.
if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil {
log.Warningf("copy up failed to copy up attributes: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Copy the entire file.
if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil {
log.Warningf("copy up failed to copy up contents: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
lowerMappable := next.Inode.overlay.lower.Mappable()
upperMappable := childUpperInode.Mappable()
if lowerMappable != nil && upperMappable == nil {
log.Warningf("copy up failed: cannot ensure memory mapping coherence")
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Propagate memory mappings to the upper Inode.
next.Inode.overlay.mapsMu.Lock()
defer next.Inode.overlay.mapsMu.Unlock()
if upperMappable != nil {
// Remember which mappings we added so we can remove them on failure.
allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange)
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
added := make(memmap.MappingsOfRange)
for m := range seg.Value() {
if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable); err != nil {
for m := range added {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
for mr, mappings := range allAdded {
for m := range mappings {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start, m.Writable)
}
}
return err
}
added[m] = struct{}{}
}
allAdded[seg.Range()] = added
}
}
// Take a reference on the upper Inode (transferred to
// next.Inode.overlay.upper) and make new translations use it.
next.Inode.overlay.dataMu.Lock()
childUpperInode.IncRef()
next.Inode.overlay.upper = childUpperInode
next.Inode.overlay.dataMu.Unlock()
// Invalidate existing translations through the lower Inode.
next.Inode.overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
// Remove existing memory mappings from the lower Inode.
if lowerMappable != nil {
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
for m := range seg.Value() {
lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
}
}
return nil
}
// cleanupUpper removes name from parent, and panics if it is unsuccessful.
func cleanupUpper(ctx context.Context, parent *Inode, name string) {
if err := parent.InodeOperations.Remove(ctx, parent, name); err != nil {
// Unfortunately we don't have much choice. We shouldn't
// willingly give the caller access to a nonsense filesystem.
panic(fmt.Sprintf("overlay filesystem is in an inconsistent state: failed to remove %q from upper filesystem: %v", name, err))
}
}
// copyUpBuffers is a buffer pool for copying file content. The buffer
// size is the same used by io.Copy.
var copyUpBuffers = sync.Pool{New: func() interface{} { return make([]byte, 8*usermem.PageSize) }}
// copyContentsLocked copies the contents of lower to upper. It panics if
// less than size bytes can be copied.
func copyContentsLocked(ctx context.Context, upper *Inode, lower *Inode, size int64) error {
// We don't support copying up for anything other than regular files.
if lower.StableAttr.Type != RegularFile {
return nil
}
// Get a handle to the upper filesystem, which we will write to.
upperFile, err := overlayFile(ctx, upper, FileFlags{Write: true})
if err != nil {
return err
}
defer upperFile.DecRef()
// Get a handle to the lower filesystem, which we will read from.
lowerFile, err := overlayFile(ctx, lower, FileFlags{Read: true})
if err != nil {
return err
}
defer lowerFile.DecRef()
// Use a buffer pool to minimize allocations.
buf := copyUpBuffers.Get().([]byte)
defer copyUpBuffers.Put(buf)
// Transfer the contents.
//
// One might be able to optimize this by doing parallel reads, parallel writes and reads, larger
// buffers, etc. But we really don't know anything about the underlying implementation, so these
// optimizations could be self-defeating. So we leave this as simple as possible.
var offset int64
for {
nr, err := lowerFile.FileOperations.Read(ctx, lowerFile, usermem.BytesIOSequence(buf), offset)
if err != nil && err != io.EOF {
return err
}
if nr == 0 {
if offset != size {
// Same as in cleanupUpper, we cannot live
// with ourselves if we do anything less.
panic(fmt.Sprintf("filesystem is in an inconsistent state: wrote only %d bytes of %d sized file", offset, size))
}
return nil
}
nw, err := upperFile.FileOperations.Write(ctx, upperFile, usermem.BytesIOSequence(buf[:nr]), offset)
if err != nil {
return err
}
offset += nw
}
}
// copyAttributesLocked copies a subset of lower's attributes to upper,
// specifically owner, timestamps (except of status change time), and
// extended attributes. Notably no attempt is made to copy link count.
// Size and permissions are set on upper when the file content is copied
// and when the file is created respectively.
func copyAttributesLocked(ctx context.Context, upper *Inode, lower *Inode) error {
// Extract attributes from the lower filesystem.
lowerAttr, err := lower.UnstableAttr(ctx)
if err != nil {
return err
}
lowerXattr, err := lower.ListXattr(ctx, linux.XATTR_SIZE_MAX)
if err != nil && err != syserror.EOPNOTSUPP {
return err
}
// Set the attributes on the upper filesystem.
if err := upper.InodeOperations.SetOwner(ctx, upper, lowerAttr.Owner); err != nil {
return err
}
if err := upper.InodeOperations.SetTimestamps(ctx, upper, TimeSpec{
ATime: lowerAttr.AccessTime,
MTime: lowerAttr.ModificationTime,
}); err != nil {
return err
}
for name := range lowerXattr {
// Don't copy-up attributes that configure an overlay in the
// lower.
if isXattrOverlay(name) {
continue
}
value, err := lower.GetXattr(ctx, name, linux.XATTR_SIZE_MAX)
if err != nil {
return err
}
if err := upper.InodeOperations.SetXattr(ctx, upper, name, value, 0 /* flags */); err != nil {
return err
}
}
return nil
}
| {
// Fail fast on Inode types we won't be able to copy up anyways. These
// Inodes may block in GetFile while holding copyMu for reading. If we
// then try to take copyMu for writing here, we'd deadlock.
t := d.Inode.overlay.lower.StableAttr.Type
if t != RegularFile && t != Directory && t != Symlink {
return syserror.EINVAL
}
// Wait to get exclusive access to the upper Inode.
d.Inode.overlay.copyMu.Lock()
defer d.Inode.overlay.copyMu.Unlock()
if d.Inode.overlay.upper != nil {
// We raced with another doCopyUp, no problem.
return nil
}
// Perform the copy.
return copyUpLocked(ctx, d.parent, d)
} | identifier_body |
copy_up.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fs
import (
"fmt"
"io"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
// copyUp copies a file in an overlay from a lower filesystem to an
// upper filesytem so that the file can be modified in the upper
// filesystem. Copying a file involves several steps:
//
// - All parent directories of the file are created in the upper
// filesystem if they don't exist there. For instance:
//
// upper /dir0
// lower /dir0/dir1/file
//
// copyUp of /dir0/dir1/file creates /dir0/dir1 in order to create
// /dir0/dir1/file.
//
// - The file content is copied from the lower file to the upper
// file. For symlinks this is the symlink target. For directories,
// upper directory entries are merged with lower directory entries
// so there is no need to copy any entries.
//
// - A subset of file attributes of the lower file are set on the
// upper file. These are the file owner, the file timestamps,
// and all non-overlay extended attributes. copyUp will fail if
// the upper filesystem does not support the setting of these
// attributes.
//
// The file's permissions are set when the file is created and its
// size will be brought up to date when its contents are copied.
// Notably no attempt is made to bring link count up to date because
// hard links are currently not preserved across overlay filesystems.
//
// - Memory mappings of the lower file are invalidated and memory
// references are transferred to the upper file. From this point on,
// memory mappings of the file will be backed by content in the upper
// filesystem.
//
// Synchronization:
//
// copyUp synchronizes with rename(2) using renameMu to ensure that
// parentage does not change while a file is being copied. In the context
// of rename(2), copyUpLockedForRename should be used to avoid deadlock on
// renameMu.
//
// The following operations synchronize with copyUp using copyMu:
//
// - InodeOperations, i.e. to ensure that looking up a directory takes
// into account new upper filesystem directories created by copy up,
// which subsequently can be modified.
//
// - FileOperations, i.e. to ensure that reading from a file does not
// continue using a stale, lower filesystem handle when the file is
// written to.
//
// Lock ordering: Dirent.mu -> Inode.overlay.copyMu -> Inode.mu.
//
// Caveats:
//
// If any step in copying up a file fails, copyUp cleans the upper
// filesystem of any partially up-to-date file. If this cleanup fails,
// the overlay may be in an unacceptable, inconsistent state, so copyUp
// panics. If copyUp fails because any step (above) fails, a generic
// error is returned.
//
// copyUp currently makes no attempt to optimize copying up file content.
// For large files, this means that copyUp blocks until the entire file
// is copied synchronously.
func copyUp(ctx context.Context, d *Dirent) error {
renameMu.RLock()
defer renameMu.RUnlock()
return copyUpLockedForRename(ctx, d)
}
// copyUpLockedForRename is the same as copyUp except that it does not lock
// renameMu.
//
// It copies each component of d that does not yet exist in the upper
// filesystem. If d already exists in the upper filesystem, it is a no-op.
//
// Any error returned indicates a failure to copy all of d. This may
// leave the upper filesystem filled with any number of parent directories
// but the upper filesystem will never be in an inconsistent state.
//
// Preconditions:
// - d.Inode.overlay is non-nil.
func copyUpLockedForRename(ctx context.Context, d *Dirent) error {
for {
// Did we race with another copy up or does there
// already exist something in the upper filesystem
// for d?
d.Inode.overlay.copyMu.RLock()
if d.Inode.overlay.upper != nil {
d.Inode.overlay.copyMu.RUnlock()
// Done, d is in the upper filesystem.
return nil
}
d.Inode.overlay.copyMu.RUnlock()
// Find the next component to copy up. We will work our way
// down to the last component of d and finally copy it.
next := findNextCopyUp(ctx, d)
// Attempt to copy.
if err := doCopyUp(ctx, next); err != nil {
return err
}
}
}
// findNextCopyUp finds the next component of d from root that does not
// yet exist in the upper filesystem. The parent of this component is
// also returned, which is the root of the overlay in the worst case.
func findNextCopyUp(ctx context.Context, d *Dirent) *Dirent {
next := d
for parent := next.parent; ; /* checked in-loop */ /* updated in-loop */ {
// Does this parent have a non-nil upper Inode?
parent.Inode.overlay.copyMu.RLock()
if parent.Inode.overlay.upper != nil {
parent.Inode.overlay.copyMu.RUnlock()
// Note that since we found an upper, it is stable.
return next
}
parent.Inode.overlay.copyMu.RUnlock()
// Continue searching for a parent with a non-nil
// upper Inode.
next = parent
parent = next.parent
}
}
func doCopyUp(ctx context.Context, d *Dirent) error {
// Fail fast on Inode types we won't be able to copy up anyways. These
// Inodes may block in GetFile while holding copyMu for reading. If we
// then try to take copyMu for writing here, we'd deadlock.
t := d.Inode.overlay.lower.StableAttr.Type
if t != RegularFile && t != Directory && t != Symlink {
return syserror.EINVAL
}
// Wait to get exclusive access to the upper Inode.
d.Inode.overlay.copyMu.Lock()
defer d.Inode.overlay.copyMu.Unlock()
if d.Inode.overlay.upper != nil {
// We raced with another doCopyUp, no problem.
return nil
}
// Perform the copy.
return copyUpLocked(ctx, d.parent, d)
}
// copyUpLocked creates a copy of next in the upper filesystem of parent.
//
// copyUpLocked must be called with d.Inode.overlay.copyMu locked.
//
// Returns a generic error on failure.
//
// Preconditions:
// - parent.Inode.overlay.upper must be non-nil.
// - next.Inode.overlay.copyMu must be locked writable.
// - next.Inode.overlay.lower must be non-nil.
// - next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory,
// or Symlink.
// - upper filesystem must support setting file ownership and timestamps.
func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Extract the attributes of the file we wish to copy.
attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx)
if err != nil {
log.Warningf("copy up failed to get lower attributes: %v", err)
return syserror.EIO
}
var childUpperInode *Inode
parentUpper := parent.Inode.overlay.upper
root := RootFromContext(ctx)
if root != nil {
defer root.DecRef()
}
// Create the file in the upper filesystem and get an Inode for it.
switch next.Inode.StableAttr.Type {
case RegularFile:
childFile, err := parentUpper.Create(ctx, root, next.name, FileFlags{Read: true, Write: true}, attrs.Perms)
if err != nil {
log.Warningf("copy up failed to create file: %v", err)
return syserror.EIO
}
defer childFile.DecRef()
childUpperInode = childFile.Dirent.Inode
case Directory:
if err := parentUpper.CreateDirectory(ctx, root, next.name, attrs.Perms); err != nil {
log.Warningf("copy up failed to create directory: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup directory: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
case Symlink:
childLower := next.Inode.overlay.lower | if err != nil {
log.Warningf("copy up failed to read symlink value: %v", err)
return syserror.EIO
}
if err := parentUpper.CreateLink(ctx, root, link, next.name); err != nil {
log.Warningf("copy up failed to create symlink: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup symlink: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
default:
panic(fmt.Sprintf("copy up of invalid type %v on %+v", next.Inode.StableAttr.Type, next))
}
// Bring file attributes up to date. This does not include size, which will be
// brought up to date with copyContentsLocked.
if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil {
log.Warningf("copy up failed to copy up attributes: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Copy the entire file.
if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil {
log.Warningf("copy up failed to copy up contents: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
lowerMappable := next.Inode.overlay.lower.Mappable()
upperMappable := childUpperInode.Mappable()
if lowerMappable != nil && upperMappable == nil {
log.Warningf("copy up failed: cannot ensure memory mapping coherence")
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Propagate memory mappings to the upper Inode.
next.Inode.overlay.mapsMu.Lock()
defer next.Inode.overlay.mapsMu.Unlock()
if upperMappable != nil {
// Remember which mappings we added so we can remove them on failure.
allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange)
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
added := make(memmap.MappingsOfRange)
for m := range seg.Value() {
if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable); err != nil {
for m := range added {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
for mr, mappings := range allAdded {
for m := range mappings {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start, m.Writable)
}
}
return err
}
added[m] = struct{}{}
}
allAdded[seg.Range()] = added
}
}
// Take a reference on the upper Inode (transferred to
// next.Inode.overlay.upper) and make new translations use it.
next.Inode.overlay.dataMu.Lock()
childUpperInode.IncRef()
next.Inode.overlay.upper = childUpperInode
next.Inode.overlay.dataMu.Unlock()
// Invalidate existing translations through the lower Inode.
next.Inode.overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
// Remove existing memory mappings from the lower Inode.
if lowerMappable != nil {
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
for m := range seg.Value() {
lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
}
}
return nil
}
// cleanupUpper removes name from parent, and panics if it is unsuccessful.
func cleanupUpper(ctx context.Context, parent *Inode, name string) {
if err := parent.InodeOperations.Remove(ctx, parent, name); err != nil {
// Unfortunately we don't have much choice. We shouldn't
// willingly give the caller access to a nonsense filesystem.
panic(fmt.Sprintf("overlay filesystem is in an inconsistent state: failed to remove %q from upper filesystem: %v", name, err))
}
}
// copyUpBuffers is a buffer pool for copying file content. The buffer
// size is the same used by io.Copy.
var copyUpBuffers = sync.Pool{New: func() interface{} { return make([]byte, 8*usermem.PageSize) }}
// copyContentsLocked copies the contents of lower to upper. It panics if
// less than size bytes can be copied.
func copyContentsLocked(ctx context.Context, upper *Inode, lower *Inode, size int64) error {
// We don't support copying up for anything other than regular files.
if lower.StableAttr.Type != RegularFile {
return nil
}
// Get a handle to the upper filesystem, which we will write to.
upperFile, err := overlayFile(ctx, upper, FileFlags{Write: true})
if err != nil {
return err
}
defer upperFile.DecRef()
// Get a handle to the lower filesystem, which we will read from.
lowerFile, err := overlayFile(ctx, lower, FileFlags{Read: true})
if err != nil {
return err
}
defer lowerFile.DecRef()
// Use a buffer pool to minimize allocations.
buf := copyUpBuffers.Get().([]byte)
defer copyUpBuffers.Put(buf)
// Transfer the contents.
//
// One might be able to optimize this by doing parallel reads, parallel writes and reads, larger
// buffers, etc. But we really don't know anything about the underlying implementation, so these
// optimizations could be self-defeating. So we leave this as simple as possible.
var offset int64
for {
nr, err := lowerFile.FileOperations.Read(ctx, lowerFile, usermem.BytesIOSequence(buf), offset)
if err != nil && err != io.EOF {
return err
}
if nr == 0 {
if offset != size {
// Same as in cleanupUpper, we cannot live
// with ourselves if we do anything less.
panic(fmt.Sprintf("filesystem is in an inconsistent state: wrote only %d bytes of %d sized file", offset, size))
}
return nil
}
nw, err := upperFile.FileOperations.Write(ctx, upperFile, usermem.BytesIOSequence(buf[:nr]), offset)
if err != nil {
return err
}
offset += nw
}
}
// copyAttributesLocked copies a subset of lower's attributes to upper,
// specifically owner, timestamps (except of status change time), and
// extended attributes. Notably no attempt is made to copy link count.
// Size and permissions are set on upper when the file content is copied
// and when the file is created respectively.
func copyAttributesLocked(ctx context.Context, upper *Inode, lower *Inode) error {
// Extract attributes from the lower filesystem.
lowerAttr, err := lower.UnstableAttr(ctx)
if err != nil {
return err
}
lowerXattr, err := lower.ListXattr(ctx, linux.XATTR_SIZE_MAX)
if err != nil && err != syserror.EOPNOTSUPP {
return err
}
// Set the attributes on the upper filesystem.
if err := upper.InodeOperations.SetOwner(ctx, upper, lowerAttr.Owner); err != nil {
return err
}
if err := upper.InodeOperations.SetTimestamps(ctx, upper, TimeSpec{
ATime: lowerAttr.AccessTime,
MTime: lowerAttr.ModificationTime,
}); err != nil {
return err
}
for name := range lowerXattr {
// Don't copy-up attributes that configure an overlay in the
// lower.
if isXattrOverlay(name) {
continue
}
value, err := lower.GetXattr(ctx, name, linux.XATTR_SIZE_MAX)
if err != nil {
return err
}
if err := upper.InodeOperations.SetXattr(ctx, upper, name, value, 0 /* flags */); err != nil {
return err
}
}
return nil
} | link, err := childLower.Readlink(ctx) | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.